1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <signal.h>
30 #include <errno.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
38
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/mutex.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/refcnt.h>
46 #include <pulsecore/llist.h>
47 #include <pulsecore/flist.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/memtrap.h>
50
51 #include "memblock.h"
52
53 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
54 * note that the footprint is usually much smaller, since the data is
55 * stored in SHM and our OS does not commit the memory before we use
56 * it for the first time. */
57 #define PA_MEMPOOL_SLOTS_MAX 1024
58 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
59
60 #define PA_MEMEXPORT_SLOTS_MAX 128
61
62 #define PA_MEMIMPORT_SLOTS_MAX 160
63 #define PA_MEMIMPORT_SEGMENTS_MAX 16
64
65 struct pa_memblock {
66 PA_REFCNT_DECLARE; /* the reference counter */
67 pa_mempool *pool;
68
69 pa_memblock_type_t type;
70
71 bool read_only:1;
72 bool is_silence:1;
73
74 pa_atomic_ptr_t data;
75 size_t length;
76
77 pa_atomic_t n_acquired;
78 pa_atomic_t please_signal;
79
80 union {
81 struct {
82 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
83 pa_free_cb_t free_cb;
84 /* If type == PA_MEMBLOCK_USER this is passed as free_cb argument */
85 void *free_cb_data;
86 } user;
87
88 struct {
89 uint32_t id;
90 pa_memimport_segment *segment;
91 } imported;
92 } per_type;
93 };
94
95 struct pa_memimport_segment {
96 pa_memimport *import;
97 pa_shm memory;
98 pa_memtrap *trap;
99 unsigned n_blocks;
100 bool writable;
101 };
102
103 /*
104 * If true, this segment's lifetime will not be limited by the
105 * number of active blocks (seg->n_blocks) using its shared memory.
106 * Rather, it will exist for the full lifetime of the memimport it
107 * is attached to.
108 *
109 * This is done to support memfd blocks transport.
110 *
111 * To transfer memfd-backed blocks without passing their fd every
112 * time, thus minimizing overhead and avoiding fd leaks, a command
113 * is sent with the memfd fd as ancil data very early on.
114 *
115 * This command has an ID that identifies the memfd region. Further
116 * block references are then exclusively done using this ID. On the
117 * receiving end, such logic is enabled by the memimport's segment
118 * hash and 'permanent' segments below.
119 */
segment_is_permanent(pa_memimport_segment * seg)120 static bool segment_is_permanent(pa_memimport_segment *seg) {
121 pa_assert(seg);
122 return seg->memory.type == PA_MEM_TYPE_SHARED_MEMFD;
123 }
124
125 /* A collection of multiple segments */
126 struct pa_memimport {
127 pa_mutex *mutex;
128
129 pa_mempool *pool;
130 pa_hashmap *segments;
131 pa_hashmap *blocks;
132
133 /* Called whenever an imported memory block is no longer
134 * needed. */
135 pa_memimport_release_cb_t release_cb;
136 void *userdata;
137
138 PA_LLIST_FIELDS(pa_memimport);
139 };
140
141 struct memexport_slot {
142 PA_LLIST_FIELDS(struct memexport_slot);
143 pa_memblock *block;
144 };
145
146 struct pa_memexport {
147 pa_mutex *mutex;
148 pa_mempool *pool;
149
150 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
151
152 PA_LLIST_HEAD(struct memexport_slot, free_slots);
153 PA_LLIST_HEAD(struct memexport_slot, used_slots);
154 unsigned n_init;
155 unsigned baseidx;
156
157 /* Called whenever a client from which we imported a memory block
158 which we in turn exported to another client dies and we need to
159 revoke the memory block accordingly */
160 pa_memexport_revoke_cb_t revoke_cb;
161 void *userdata;
162
163 PA_LLIST_FIELDS(pa_memexport);
164 };
165
166 struct pa_mempool {
167 /* Reference count the mempool
168 *
169 * Any block allocation from the pool itself, or even just imported from
170 * another process through SHM and attached to it (PA_MEMBLOCK_IMPORTED),
171 * shall increase the refcount.
172 *
173 * This is done for per-client mempools: global references to blocks in
174 * the pool, or just to attached ones, can still be lingering around when
175 * the client connection dies and all per-client objects are to be freed.
176 * That is, current PulseAudio design does not guarantee that the client
177 * mempool blocks are referenced only by client-specific objects.
178 *
179 * For further details, please check:
180 * https://lists.freedesktop.org/archives/pulseaudio-discuss/2016-February/025587.html
181 */
182 PA_REFCNT_DECLARE;
183
184 pa_semaphore *semaphore;
185 pa_mutex *mutex;
186
187 pa_shm memory;
188
189 bool global;
190
191 size_t block_size;
192 unsigned n_blocks;
193 bool is_remote_writable;
194
195 pa_atomic_t n_init;
196
197 PA_LLIST_HEAD(pa_memimport, imports);
198 PA_LLIST_HEAD(pa_memexport, exports);
199
200 /* A list of free slots that may be reused */
201 pa_flist *free_slots;
202
203 pa_mempool_stat stat;
204 };
205
206 static void segment_detach(pa_memimport_segment *seg);
207
208 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
209
210 /* No lock necessary */
stat_add(pa_memblock * b)211 static void stat_add(pa_memblock*b) {
212 pa_assert(b);
213 pa_assert(b->pool);
214
215 pa_atomic_inc(&b->pool->stat.n_allocated);
216 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
217
218 pa_atomic_inc(&b->pool->stat.n_accumulated);
219 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
220
221 if (b->type == PA_MEMBLOCK_IMPORTED) {
222 pa_atomic_inc(&b->pool->stat.n_imported);
223 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
224 }
225
226 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
227 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
228 }
229
230 /* No lock necessary */
stat_remove(pa_memblock * b)231 static void stat_remove(pa_memblock *b) {
232 pa_assert(b);
233 pa_assert(b->pool);
234
235 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
236 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
237
238 pa_atomic_dec(&b->pool->stat.n_allocated);
239 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
240
241 if (b->type == PA_MEMBLOCK_IMPORTED) {
242 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
243 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
244
245 pa_atomic_dec(&b->pool->stat.n_imported);
246 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
247 }
248
249 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
250 }
251
252 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
253
254 /* No lock necessary */
pa_memblock_new(pa_mempool * p,size_t length)255 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
256 pa_memblock *b;
257
258 pa_assert(p);
259 pa_assert(length);
260
261 if (!(b = pa_memblock_new_pool(p, length)))
262 b = memblock_new_appended(p, length);
263
264 return b;
265 }
266
267 /* No lock necessary */
memblock_new_appended(pa_mempool * p,size_t length)268 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
269 pa_memblock *b;
270
271 pa_assert(p);
272 pa_assert(length);
273
274 /* If -1 is passed as length we choose the size for the caller. */
275
276 if (length == (size_t) -1)
277 length = pa_mempool_block_size_max(p);
278
279 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
280 PA_REFCNT_INIT(b);
281 b->pool = p;
282 pa_mempool_ref(b->pool);
283 b->type = PA_MEMBLOCK_APPENDED;
284 b->read_only = b->is_silence = false;
285 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
286 b->length = length;
287 pa_atomic_store(&b->n_acquired, 0);
288 pa_atomic_store(&b->please_signal, 0);
289
290 stat_add(b);
291 return b;
292 }
293
294 /* No lock necessary */
mempool_allocate_slot(pa_mempool * p)295 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
296 struct mempool_slot *slot;
297 pa_assert(p);
298
299 if (!(slot = pa_flist_pop(p->free_slots))) {
300 int idx;
301
302 /* The free list was empty, we have to allocate a new entry */
303
304 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
305 pa_atomic_dec(&p->n_init);
306 else
307 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
308
309 if (!slot) {
310 if (pa_log_ratelimit(PA_LOG_DEBUG))
311 pa_log_debug("Pool full");
312 pa_atomic_inc(&p->stat.n_pool_full);
313 return NULL;
314 }
315 }
316
317 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
318 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
319 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
320 /* } */
321 /* #endif */
322
323 return slot;
324 }
325
326 /* No lock necessary, totally redundant anyway */
mempool_slot_data(struct mempool_slot * slot)327 static inline void* mempool_slot_data(struct mempool_slot *slot) {
328 return slot;
329 }
330
331 /* No lock necessary */
mempool_slot_idx(pa_mempool * p,void * ptr)332 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
333 pa_assert(p);
334
335 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
336 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
337
338 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
339 }
340
341 /* No lock necessary */
mempool_slot_by_ptr(pa_mempool * p,void * ptr)342 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
343 unsigned idx;
344
345 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
346 return NULL;
347
348 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
349 }
350
351 /* No lock necessary */
pa_mempool_is_remote_writable(pa_mempool * p)352 bool pa_mempool_is_remote_writable(pa_mempool *p) {
353 pa_assert(p);
354 return p->is_remote_writable;
355 }
356
357 /* No lock necessary */
pa_mempool_set_is_remote_writable(pa_mempool * p,bool writable)358 void pa_mempool_set_is_remote_writable(pa_mempool *p, bool writable) {
359 pa_assert(p);
360 pa_assert(!writable || pa_mempool_is_shared(p));
361 p->is_remote_writable = writable;
362 }
363
364 /* No lock necessary */
pa_memblock_new_pool(pa_mempool * p,size_t length)365 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
366 pa_memblock *b = NULL;
367 struct mempool_slot *slot;
368 static int mempool_disable = 0;
369
370 pa_assert(p);
371 pa_assert(length);
372
373 if (mempool_disable == 0)
374 mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
375
376 if (mempool_disable > 0)
377 return NULL;
378
379 /* If -1 is passed as length we choose the size for the caller: we
380 * take the largest size that fits in one of our slots. */
381
382 if (length == (size_t) -1)
383 length = pa_mempool_block_size_max(p);
384
385 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
386
387 if (!(slot = mempool_allocate_slot(p)))
388 return NULL;
389
390 b = mempool_slot_data(slot);
391 b->type = PA_MEMBLOCK_POOL;
392 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
393
394 } else if (p->block_size >= length) {
395
396 if (!(slot = mempool_allocate_slot(p)))
397 return NULL;
398
399 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
400 b = pa_xnew(pa_memblock, 1);
401
402 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
403 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
404
405 } else {
406 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
407 pa_atomic_inc(&p->stat.n_too_large_for_pool);
408 return NULL;
409 }
410
411 PA_REFCNT_INIT(b);
412 b->pool = p;
413 pa_mempool_ref(b->pool);
414 b->read_only = b->is_silence = false;
415 b->length = length;
416 pa_atomic_store(&b->n_acquired, 0);
417 pa_atomic_store(&b->please_signal, 0);
418
419 stat_add(b);
420 return b;
421 }
422
423 /* No lock necessary */
pa_memblock_new_fixed(pa_mempool * p,void * d,size_t length,bool read_only)424 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, bool read_only) {
425 pa_memblock *b;
426
427 pa_assert(p);
428 pa_assert(d);
429 pa_assert(length != (size_t) -1);
430 pa_assert(length);
431
432 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
433 b = pa_xnew(pa_memblock, 1);
434
435 PA_REFCNT_INIT(b);
436 b->pool = p;
437 pa_mempool_ref(b->pool);
438 b->type = PA_MEMBLOCK_FIXED;
439 b->read_only = read_only;
440 b->is_silence = false;
441 pa_atomic_ptr_store(&b->data, d);
442 b->length = length;
443 pa_atomic_store(&b->n_acquired, 0);
444 pa_atomic_store(&b->please_signal, 0);
445
446 stat_add(b);
447 return b;
448 }
449
450 /* No lock necessary */
pa_memblock_new_user(pa_mempool * p,void * d,size_t length,pa_free_cb_t free_cb,void * free_cb_data,bool read_only)451 pa_memblock *pa_memblock_new_user(
452 pa_mempool *p,
453 void *d,
454 size_t length,
455 pa_free_cb_t free_cb,
456 void *free_cb_data,
457 bool read_only) {
458 pa_memblock *b;
459
460 pa_assert(p);
461 pa_assert(d);
462 pa_assert(length);
463 pa_assert(length != (size_t) -1);
464 pa_assert(free_cb);
465
466 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
467 b = pa_xnew(pa_memblock, 1);
468
469 PA_REFCNT_INIT(b);
470 b->pool = p;
471 pa_mempool_ref(b->pool);
472 b->type = PA_MEMBLOCK_USER;
473 b->read_only = read_only;
474 b->is_silence = false;
475 pa_atomic_ptr_store(&b->data, d);
476 b->length = length;
477 pa_atomic_store(&b->n_acquired, 0);
478 pa_atomic_store(&b->please_signal, 0);
479
480 b->per_type.user.free_cb = free_cb;
481 b->per_type.user.free_cb_data = free_cb_data;
482
483 stat_add(b);
484 return b;
485 }
486
487 /* No lock necessary */
pa_memblock_is_ours(pa_memblock * b)488 bool pa_memblock_is_ours(pa_memblock *b) {
489 pa_assert(b);
490 pa_assert(PA_REFCNT_VALUE(b) > 0);
491
492 return b->type != PA_MEMBLOCK_IMPORTED;
493 }
494
495 /* No lock necessary */
pa_memblock_is_read_only(pa_memblock * b)496 bool pa_memblock_is_read_only(pa_memblock *b) {
497 pa_assert(b);
498 pa_assert(PA_REFCNT_VALUE(b) > 0);
499
500 return b->read_only || PA_REFCNT_VALUE(b) > 1;
501 }
502
503 /* No lock necessary */
pa_memblock_is_silence(pa_memblock * b)504 bool pa_memblock_is_silence(pa_memblock *b) {
505 pa_assert(b);
506 pa_assert(PA_REFCNT_VALUE(b) > 0);
507
508 return b->is_silence;
509 }
510
511 /* No lock necessary */
pa_memblock_set_is_silence(pa_memblock * b,bool v)512 void pa_memblock_set_is_silence(pa_memblock *b, bool v) {
513 pa_assert(b);
514 pa_assert(PA_REFCNT_VALUE(b) > 0);
515
516 b->is_silence = v;
517 }
518
519 /* No lock necessary */
pa_memblock_ref_is_one(pa_memblock * b)520 bool pa_memblock_ref_is_one(pa_memblock *b) {
521 int r;
522 pa_assert(b);
523
524 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
525
526 return r == 1;
527 }
528
529 /* No lock necessary */
pa_memblock_acquire(pa_memblock * b)530 void* pa_memblock_acquire(pa_memblock *b) {
531 pa_assert(b);
532 pa_assert(PA_REFCNT_VALUE(b) > 0);
533
534 pa_atomic_inc(&b->n_acquired);
535
536 return pa_atomic_ptr_load(&b->data);
537 }
538
539 /* No lock necessary */
pa_memblock_acquire_chunk(const pa_memchunk * c)540 void *pa_memblock_acquire_chunk(const pa_memchunk *c) {
541 pa_assert(c);
542
543 return (uint8_t *) pa_memblock_acquire(c->memblock) + c->index;
544 }
545
546 /* No lock necessary, in corner cases locks by its own */
pa_memblock_release(pa_memblock * b)547 void pa_memblock_release(pa_memblock *b) {
548 int r;
549 pa_assert(b);
550 pa_assert(PA_REFCNT_VALUE(b) > 0);
551
552 r = pa_atomic_dec(&b->n_acquired);
553 pa_assert(r >= 1);
554
555 /* Signal a waiting thread that this memblock is no longer used */
556 if (r == 1 && pa_atomic_load(&b->please_signal))
557 pa_semaphore_post(b->pool->semaphore);
558 }
559
pa_memblock_get_length(pa_memblock * b)560 size_t pa_memblock_get_length(pa_memblock *b) {
561 pa_assert(b);
562 pa_assert(PA_REFCNT_VALUE(b) > 0);
563
564 return b->length;
565 }
566
567 /* Note! Always unref the returned pool after use */
pa_memblock_get_pool(pa_memblock * b)568 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
569 pa_assert(b);
570 pa_assert(PA_REFCNT_VALUE(b) > 0);
571 pa_assert(b->pool);
572
573 pa_mempool_ref(b->pool);
574 return b->pool;
575 }
576
577 /* No lock necessary */
pa_memblock_ref(pa_memblock * b)578 pa_memblock* pa_memblock_ref(pa_memblock*b) {
579 pa_assert(b);
580 pa_assert(PA_REFCNT_VALUE(b) > 0);
581
582 PA_REFCNT_INC(b);
583 return b;
584 }
585
memblock_free(pa_memblock * b)586 static void memblock_free(pa_memblock *b) {
587 pa_mempool *pool;
588
589 pa_assert(b);
590 pa_assert(b->pool);
591 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
592
593 pool = b->pool;
594 stat_remove(b);
595
596 switch (b->type) {
597 case PA_MEMBLOCK_USER :
598 pa_assert(b->per_type.user.free_cb);
599 b->per_type.user.free_cb(b->per_type.user.free_cb_data);
600
601 /* Fall through */
602
603 case PA_MEMBLOCK_FIXED:
604 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
605 pa_xfree(b);
606
607 break;
608
609 case PA_MEMBLOCK_APPENDED:
610
611 /* We could attach it to unused_memblocks, but that would
612 * probably waste some considerable amount of memory */
613 pa_xfree(b);
614 break;
615
616 case PA_MEMBLOCK_IMPORTED: {
617 pa_memimport_segment *segment;
618 pa_memimport *import;
619
620 /* FIXME! This should be implemented lock-free */
621
622 pa_assert_se(segment = b->per_type.imported.segment);
623 pa_assert_se(import = segment->import);
624
625 pa_mutex_lock(import->mutex);
626
627 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
628
629 pa_assert(segment->n_blocks >= 1);
630 if (-- segment->n_blocks <= 0)
631 segment_detach(segment);
632
633 pa_mutex_unlock(import->mutex);
634
635 import->release_cb(import, b->per_type.imported.id, import->userdata);
636
637 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
638 pa_xfree(b);
639
640 break;
641 }
642
643 case PA_MEMBLOCK_POOL_EXTERNAL:
644 case PA_MEMBLOCK_POOL: {
645 struct mempool_slot *slot;
646 bool call_free;
647
648 pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
649
650 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
651
652 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
653 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
654 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
655 /* } */
656 /* #endif */
657
658 /* The free list dimensions should easily allow all slots
659 * to fit in, hence try harder if pushing this slot into
660 * the free list fails */
661 while (pa_flist_push(b->pool->free_slots, slot) < 0)
662 ;
663
664 if (call_free)
665 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
666 pa_xfree(b);
667
668 break;
669 }
670
671 case PA_MEMBLOCK_TYPE_MAX:
672 default:
673 pa_assert_not_reached();
674 }
675
676 pa_mempool_unref(pool);
677 }
678
679 /* No lock necessary */
pa_memblock_unref(pa_memblock * b)680 void pa_memblock_unref(pa_memblock*b) {
681 pa_assert(b);
682 pa_assert(PA_REFCNT_VALUE(b) > 0);
683
684 if (PA_REFCNT_DEC(b) > 0)
685 return;
686
687 memblock_free(b);
688 }
689
690 /* Self locked */
memblock_wait(pa_memblock * b)691 static void memblock_wait(pa_memblock *b) {
692 pa_assert(b);
693
694 if (pa_atomic_load(&b->n_acquired) > 0) {
695 /* We need to wait until all threads gave up access to the
696 * memory block before we can go on. Unfortunately this means
697 * that we have to lock and wait here. Sniff! */
698
699 pa_atomic_inc(&b->please_signal);
700
701 while (pa_atomic_load(&b->n_acquired) > 0)
702 pa_semaphore_wait(b->pool->semaphore);
703
704 pa_atomic_dec(&b->please_signal);
705 }
706 }
707
708 /* No lock necessary. This function is not multiple caller safe! */
memblock_make_local(pa_memblock * b)709 static void memblock_make_local(pa_memblock *b) {
710 pa_assert(b);
711
712 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
713
714 if (b->length <= b->pool->block_size) {
715 struct mempool_slot *slot;
716
717 if ((slot = mempool_allocate_slot(b->pool))) {
718 void *new_data;
719 /* We can move it into a local pool, perfect! */
720
721 new_data = mempool_slot_data(slot);
722 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
723 pa_atomic_ptr_store(&b->data, new_data);
724
725 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
726 b->read_only = false;
727
728 goto finish;
729 }
730 }
731
732 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
733 b->per_type.user.free_cb = pa_xfree;
734 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
735 b->per_type.user.free_cb_data = pa_atomic_ptr_load(&b->data);
736
737 b->type = PA_MEMBLOCK_USER;
738 b->read_only = false;
739
740 finish:
741 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
742 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
743 memblock_wait(b);
744 }
745
746 /* No lock necessary. This function is not multiple caller safe */
pa_memblock_unref_fixed(pa_memblock * b)747 void pa_memblock_unref_fixed(pa_memblock *b) {
748 pa_assert(b);
749 pa_assert(PA_REFCNT_VALUE(b) > 0);
750 pa_assert(b->type == PA_MEMBLOCK_FIXED);
751
752 if (PA_REFCNT_VALUE(b) > 1)
753 memblock_make_local(b);
754
755 pa_memblock_unref(b);
756 }
757
758 /* No lock necessary. */
pa_memblock_will_need(pa_memblock * b)759 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
760 void *p;
761
762 pa_assert(b);
763 pa_assert(PA_REFCNT_VALUE(b) > 0);
764
765 p = pa_memblock_acquire(b);
766 pa_will_need(p, b->length);
767 pa_memblock_release(b);
768
769 return b;
770 }
771
772 /* Self-locked. This function is not multiple-caller safe */
memblock_replace_import(pa_memblock * b)773 static void memblock_replace_import(pa_memblock *b) {
774 pa_memimport_segment *segment;
775 pa_memimport *import;
776
777 pa_assert(b);
778 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
779
780 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
781 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
782 pa_atomic_dec(&b->pool->stat.n_imported);
783 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
784
785 pa_assert_se(segment = b->per_type.imported.segment);
786 pa_assert_se(import = segment->import);
787
788 pa_mutex_lock(import->mutex);
789
790 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
791
792 memblock_make_local(b);
793
794 pa_assert(segment->n_blocks >= 1);
795 if (-- segment->n_blocks <= 0)
796 segment_detach(segment);
797
798 pa_mutex_unlock(import->mutex);
799 }
800
801 /*@per_client: This is a security measure. By default this should
802 * be set to true where the created mempool is never shared with more
803 * than one client in the system. Set this to false if a global
804 * mempool, shared with all existing and future clients, is required.
805 *
806 * NOTE-1: Do not create any further global mempools! They allow data
807 * leaks between clients and thus conflict with the xdg-app containers
808 * model. They also complicate the handling of memfd-based pools.
809 *
810 * NOTE-2: Almost all mempools are now created on a per client basis.
811 * The only exception is the pa_core's mempool which is still shared
812 * between all clients of the system.
813 *
814 * Beside security issues, special marking for global mempools is
815 * required for memfd communication. To avoid fd leaks, memfd pools
816 * are registered with the connection pstream to create an ID<->memfd
817 * mapping on both PA endpoints. Such memory regions are then always
818 * referenced by their IDs and never by their fds and thus their fds
819 * can be quickly closed later.
820 *
821 * Unfortunately this scheme cannot work with global pools since the
822 * ID registration mechanism needs to happen for each newly connected
823 * client, and thus the need for a more special handling. That is,
824 * for the pool's fd to be always open :-(
825 *
826 * TODO-1: Transform the global core mempool to a per-client one
827 * TODO-2: Remove global mempools support */
pa_mempool_new(pa_mem_type_t type,size_t size,bool per_client)828 pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size, bool per_client) {
829 pa_mempool *p;
830 char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
831 const size_t page_size = pa_page_size();
832
833 p = pa_xnew0(pa_mempool, 1);
834 PA_REFCNT_INIT(p);
835
836 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
837 if (p->block_size < page_size)
838 p->block_size = page_size;
839
840 if (size <= 0)
841 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
842 else {
843 p->n_blocks = (unsigned) (size / p->block_size);
844
845 if (p->n_blocks < 2)
846 p->n_blocks = 2;
847 }
848
849 if (pa_shm_create_rw(&p->memory, type, p->n_blocks * p->block_size, 0700) < 0) {
850 pa_xfree(p);
851 return NULL;
852 }
853
854 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
855 pa_mem_type_to_string(type),
856 p->n_blocks,
857 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
858 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
859 (unsigned long) pa_mempool_block_size_max(p));
860
861 p->global = !per_client;
862
863 pa_atomic_store(&p->n_init, 0);
864
865 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
866 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
867
868 p->mutex = pa_mutex_new(true, true);
869 p->semaphore = pa_semaphore_new(0);
870
871 p->free_slots = pa_flist_new(p->n_blocks);
872
873 return p;
874 }
875
mempool_free(pa_mempool * p)876 static void mempool_free(pa_mempool *p) {
877 pa_assert(p);
878
879 pa_mutex_lock(p->mutex);
880
881 while (p->imports)
882 pa_memimport_free(p->imports);
883
884 while (p->exports)
885 pa_memexport_free(p->exports);
886
887 pa_mutex_unlock(p->mutex);
888
889 pa_flist_free(p->free_slots, NULL);
890
891 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
892
893 /* Ouch, somebody is retaining a memory block reference! */
894
895 #ifdef DEBUG_REF
896 unsigned i;
897 pa_flist *list;
898
899 /* Let's try to find at least one of those leaked memory blocks */
900
901 list = pa_flist_new(p->n_blocks);
902
903 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
904 struct mempool_slot *slot;
905 pa_memblock *b, *k;
906
907 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
908 b = mempool_slot_data(slot);
909
910 while ((k = pa_flist_pop(p->free_slots))) {
911 while (pa_flist_push(list, k) < 0)
912 ;
913
914 if (b == k)
915 break;
916 }
917
918 if (!k)
919 pa_log("REF: Leaked memory block %p", b);
920
921 while ((k = pa_flist_pop(list)))
922 while (pa_flist_push(p->free_slots, k) < 0)
923 ;
924 }
925
926 pa_flist_free(list, NULL);
927
928 #endif
929
930 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
931
932 /* PA_DEBUG_TRAP; */
933 }
934
935 pa_shm_free(&p->memory);
936
937 pa_mutex_free(p->mutex);
938 pa_semaphore_free(p->semaphore);
939
940 pa_xfree(p);
941 }
942
943 /* No lock necessary */
pa_mempool_get_stat(pa_mempool * p)944 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
945 pa_assert(p);
946
947 return &p->stat;
948 }
949
950 /* No lock necessary */
pa_mempool_block_size_max(pa_mempool * p)951 size_t pa_mempool_block_size_max(pa_mempool *p) {
952 pa_assert(p);
953
954 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
955 }
956
957 /* No lock necessary */
pa_mempool_vacuum(pa_mempool * p)958 void pa_mempool_vacuum(pa_mempool *p) {
959 struct mempool_slot *slot;
960 pa_flist *list;
961
962 pa_assert(p);
963
964 list = pa_flist_new(p->n_blocks);
965
966 while ((slot = pa_flist_pop(p->free_slots)))
967 while (pa_flist_push(list, slot) < 0)
968 ;
969
970 while ((slot = pa_flist_pop(list))) {
971 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
972
973 while (pa_flist_push(p->free_slots, slot))
974 ;
975 }
976
977 pa_flist_free(list, NULL);
978 }
979
980 /* No lock necessary */
pa_mempool_is_shared(pa_mempool * p)981 bool pa_mempool_is_shared(pa_mempool *p) {
982 pa_assert(p);
983
984 return pa_mem_type_is_shared(p->memory.type);
985 }
986
987 /* No lock necessary */
pa_mempool_is_memfd_backed(const pa_mempool * p)988 bool pa_mempool_is_memfd_backed(const pa_mempool *p) {
989 pa_assert(p);
990
991 return (p->memory.type == PA_MEM_TYPE_SHARED_MEMFD);
992 }
993
994 /* No lock necessary */
pa_mempool_get_shm_id(pa_mempool * p,uint32_t * id)995 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
996 pa_assert(p);
997
998 if (!pa_mempool_is_shared(p))
999 return -1;
1000
1001 *id = p->memory.id;
1002
1003 return 0;
1004 }
1005
pa_mempool_ref(pa_mempool * p)1006 pa_mempool* pa_mempool_ref(pa_mempool *p) {
1007 pa_assert(p);
1008 pa_assert(PA_REFCNT_VALUE(p) > 0);
1009
1010 PA_REFCNT_INC(p);
1011 return p;
1012 }
1013
pa_mempool_unref(pa_mempool * p)1014 void pa_mempool_unref(pa_mempool *p) {
1015 pa_assert(p);
1016 pa_assert(PA_REFCNT_VALUE(p) > 0);
1017
1018 if (PA_REFCNT_DEC(p) <= 0)
1019 mempool_free(p);
1020 }
1021
1022 /* No lock necessary
1023 * Check pa_mempool_new() for per-client vs. global mempools */
pa_mempool_is_global(pa_mempool * p)1024 bool pa_mempool_is_global(pa_mempool *p) {
1025 pa_assert(p);
1026
1027 return p->global;
1028 }
1029
1030 /* No lock necessary
1031 * Check pa_mempool_new() for per-client vs. global mempools */
pa_mempool_is_per_client(pa_mempool * p)1032 bool pa_mempool_is_per_client(pa_mempool *p) {
1033 return !pa_mempool_is_global(p);
1034 }
1035
1036 /* Self-locked
1037 *
1038 * This is only for per-client mempools!
1039 *
1040 * After this method's return, the caller owns the file descriptor
1041 * and is responsible for closing it in the appropriate time. This
1042 * should only be called once during during a mempool's lifetime.
1043 *
1044 * Check pa_shm->fd and pa_mempool_new() for further context. */
pa_mempool_take_memfd_fd(pa_mempool * p)1045 int pa_mempool_take_memfd_fd(pa_mempool *p) {
1046 int memfd_fd;
1047
1048 pa_assert(p);
1049 pa_assert(pa_mempool_is_shared(p));
1050 pa_assert(pa_mempool_is_memfd_backed(p));
1051 pa_assert(pa_mempool_is_per_client(p));
1052
1053 pa_mutex_lock(p->mutex);
1054
1055 memfd_fd = p->memory.fd;
1056 p->memory.fd = -1;
1057
1058 pa_mutex_unlock(p->mutex);
1059
1060 pa_assert(memfd_fd != -1);
1061 return memfd_fd;
1062 }
1063
1064 /* No lock necessary
1065 *
1066 * This is only for global mempools!
1067 *
1068 * Global mempools have their memfd descriptor always open. DO NOT
1069 * close the returned descriptor by your own.
1070 *
1071 * Check pa_mempool_new() for further context. */
pa_mempool_get_memfd_fd(pa_mempool * p)1072 int pa_mempool_get_memfd_fd(pa_mempool *p) {
1073 int memfd_fd;
1074
1075 pa_assert(p);
1076 pa_assert(pa_mempool_is_shared(p));
1077 pa_assert(pa_mempool_is_memfd_backed(p));
1078 pa_assert(pa_mempool_is_global(p));
1079
1080 memfd_fd = p->memory.fd;
1081 pa_assert(memfd_fd != -1);
1082
1083 return memfd_fd;
1084 }
1085
1086 /* For receiving blocks from other nodes */
pa_memimport_new(pa_mempool * p,pa_memimport_release_cb_t cb,void * userdata)1087 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
1088 pa_memimport *i;
1089
1090 pa_assert(p);
1091 pa_assert(cb);
1092
1093 i = pa_xnew(pa_memimport, 1);
1094 i->mutex = pa_mutex_new(true, true);
1095 i->pool = p;
1096 pa_mempool_ref(i->pool);
1097 i->segments = pa_hashmap_new(NULL, NULL);
1098 i->blocks = pa_hashmap_new(NULL, NULL);
1099 i->release_cb = cb;
1100 i->userdata = userdata;
1101
1102 pa_mutex_lock(p->mutex);
1103 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
1104 pa_mutex_unlock(p->mutex);
1105
1106 return i;
1107 }
1108
1109 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
1110
1111 /* Should be called locked
1112 * Caller owns passed @memfd_fd and must close it down when appropriate. */
segment_attach(pa_memimport * i,pa_mem_type_t type,uint32_t shm_id,int memfd_fd,bool writable)1113 static pa_memimport_segment* segment_attach(pa_memimport *i, pa_mem_type_t type, uint32_t shm_id,
1114 int memfd_fd, bool writable) {
1115 pa_memimport_segment* seg;
1116 pa_assert(pa_mem_type_is_shared(type));
1117
1118 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
1119 return NULL;
1120
1121 seg = pa_xnew0(pa_memimport_segment, 1);
1122
1123 if (pa_shm_attach(&seg->memory, type, shm_id, memfd_fd, writable) < 0) {
1124 pa_xfree(seg);
1125 return NULL;
1126 }
1127
1128 seg->writable = writable;
1129 seg->import = i;
1130 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
1131
1132 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
1133 return seg;
1134 }
1135
1136 /* Should be called locked */
segment_detach(pa_memimport_segment * seg)1137 static void segment_detach(pa_memimport_segment *seg) {
1138 pa_assert(seg);
1139 pa_assert(seg->n_blocks == (segment_is_permanent(seg) ? 1u : 0u));
1140
1141 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
1142 pa_shm_free(&seg->memory);
1143
1144 if (seg->trap)
1145 pa_memtrap_remove(seg->trap);
1146
1147 pa_xfree(seg);
1148 }
1149
1150 /* Self-locked. Not multiple-caller safe */
pa_memimport_free(pa_memimport * i)1151 void pa_memimport_free(pa_memimport *i) {
1152 pa_memexport *e;
1153 pa_memblock *b;
1154 pa_memimport_segment *seg;
1155 void *state = NULL;
1156
1157 pa_assert(i);
1158
1159 pa_mutex_lock(i->mutex);
1160
1161 while ((b = pa_hashmap_first(i->blocks)))
1162 memblock_replace_import(b);
1163
1164 /* Permanent segments exist for the lifetime of the memimport. Now
1165 * that we're freeing the memimport itself, clear them all up.
1166 *
1167 * Careful! segment_detach() internally removes itself from the
1168 * memimport's hash; the same hash we're now using for iteration. */
1169 PA_HASHMAP_FOREACH(seg, i->segments, state) {
1170 if (segment_is_permanent(seg))
1171 segment_detach(seg);
1172 }
1173 pa_assert(pa_hashmap_size(i->segments) == 0);
1174
1175 pa_mutex_unlock(i->mutex);
1176
1177 pa_mutex_lock(i->pool->mutex);
1178
1179 /* If we've exported this block further we need to revoke that export */
1180 for (e = i->pool->exports; e; e = e->next)
1181 memexport_revoke_blocks(e, i);
1182
1183 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
1184
1185 pa_mutex_unlock(i->pool->mutex);
1186
1187 pa_mempool_unref(i->pool);
1188 pa_hashmap_free(i->blocks);
1189 pa_hashmap_free(i->segments);
1190
1191 pa_mutex_free(i->mutex);
1192
1193 pa_xfree(i);
1194 }
1195
1196 /* Create a new memimport's memfd segment entry, with passed SHM ID
1197 * as key and the newly-created segment (with its mmap()-ed memfd
1198 * memory region) as its value.
1199 *
1200 * Note! check comments at 'pa_shm->fd', 'segment_is_permanent()',
1201 * and 'pa_pstream_register_memfd_mempool()' for further details.
1202 *
1203 * Caller owns passed @memfd_fd and must close it down when appropriate. */
pa_memimport_attach_memfd(pa_memimport * i,uint32_t shm_id,int memfd_fd,bool writable)1204 int pa_memimport_attach_memfd(pa_memimport *i, uint32_t shm_id, int memfd_fd, bool writable) {
1205 pa_memimport_segment *seg;
1206 int ret = -1;
1207
1208 pa_assert(i);
1209 pa_assert(memfd_fd != -1);
1210
1211 pa_mutex_lock(i->mutex);
1212
1213 if (!(seg = segment_attach(i, PA_MEM_TYPE_SHARED_MEMFD, shm_id, memfd_fd, writable)))
1214 goto finish;
1215
1216 /* n_blocks acts as a segment reference count. To avoid the segment
1217 * being deleted when receiving silent memchunks, etc., mark our
1218 * permanent presence by incrementing that refcount. */
1219 seg->n_blocks++;
1220
1221 pa_assert(segment_is_permanent(seg));
1222 ret = 0;
1223
1224 finish:
1225 pa_mutex_unlock(i->mutex);
1226 return ret;
1227 }
1228
1229 /* Self-locked */
pa_memimport_get(pa_memimport * i,pa_mem_type_t type,uint32_t block_id,uint32_t shm_id,size_t offset,size_t size,bool writable)1230 pa_memblock* pa_memimport_get(pa_memimport *i, pa_mem_type_t type, uint32_t block_id, uint32_t shm_id,
1231 size_t offset, size_t size, bool writable) {
1232 pa_memblock *b = NULL;
1233 pa_memimport_segment *seg;
1234
1235 pa_assert(i);
1236 pa_assert(pa_mem_type_is_shared(type));
1237
1238 pa_mutex_lock(i->mutex);
1239
1240 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
1241 pa_memblock_ref(b);
1242 goto finish;
1243 }
1244
1245 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
1246 goto finish;
1247
1248 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id)))) {
1249 if (type == PA_MEM_TYPE_SHARED_MEMFD) {
1250 pa_log("Bailing out! No cached memimport segment for memfd ID %u", shm_id);
1251 pa_log("Did the other PA endpoint forget registering its memfd pool?");
1252 goto finish;
1253 }
1254
1255 pa_assert(type == PA_MEM_TYPE_SHARED_POSIX);
1256 if (!(seg = segment_attach(i, type, shm_id, -1, writable)))
1257 goto finish;
1258 }
1259
1260 if (writable && !seg->writable) {
1261 pa_log("Cannot import cached segment in write mode - previously mapped as read-only");
1262 goto finish;
1263 }
1264
1265 if (offset+size > seg->memory.size)
1266 goto finish;
1267
1268 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
1269 b = pa_xnew(pa_memblock, 1);
1270
1271 PA_REFCNT_INIT(b);
1272 b->pool = i->pool;
1273 pa_mempool_ref(b->pool);
1274 b->type = PA_MEMBLOCK_IMPORTED;
1275 b->read_only = !writable;
1276 b->is_silence = false;
1277 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
1278 b->length = size;
1279 pa_atomic_store(&b->n_acquired, 0);
1280 pa_atomic_store(&b->please_signal, 0);
1281 b->per_type.imported.id = block_id;
1282 b->per_type.imported.segment = seg;
1283
1284 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1285
1286 seg->n_blocks++;
1287
1288 stat_add(b);
1289
1290 finish:
1291 pa_mutex_unlock(i->mutex);
1292
1293 return b;
1294 }
1295
pa_memimport_process_revoke(pa_memimport * i,uint32_t id)1296 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1297 pa_memblock *b;
1298 int ret = 0;
1299 pa_assert(i);
1300
1301 pa_mutex_lock(i->mutex);
1302
1303 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1304 ret = -1;
1305 goto finish;
1306 }
1307
1308 memblock_replace_import(b);
1309
1310 finish:
1311 pa_mutex_unlock(i->mutex);
1312
1313 return ret;
1314 }
1315
1316 /* For sending blocks to other nodes */
pa_memexport_new(pa_mempool * p,pa_memexport_revoke_cb_t cb,void * userdata)1317 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1318 pa_memexport *e;
1319
1320 static pa_atomic_t export_baseidx = PA_ATOMIC_INIT(0);
1321
1322 pa_assert(p);
1323 pa_assert(cb);
1324
1325 if (!pa_mempool_is_shared(p))
1326 return NULL;
1327
1328 e = pa_xnew(pa_memexport, 1);
1329 e->mutex = pa_mutex_new(true, true);
1330 e->pool = p;
1331 pa_mempool_ref(e->pool);
1332 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1333 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1334 e->n_init = 0;
1335 e->revoke_cb = cb;
1336 e->userdata = userdata;
1337
1338 pa_mutex_lock(p->mutex);
1339
1340 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1341 e->baseidx = (uint32_t) pa_atomic_add(&export_baseidx, PA_MEMEXPORT_SLOTS_MAX);
1342
1343 pa_mutex_unlock(p->mutex);
1344 return e;
1345 }
1346
pa_memexport_free(pa_memexport * e)1347 void pa_memexport_free(pa_memexport *e) {
1348 pa_assert(e);
1349
1350 pa_mutex_lock(e->mutex);
1351 while (e->used_slots)
1352 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots + e->baseidx));
1353 pa_mutex_unlock(e->mutex);
1354
1355 pa_mutex_lock(e->pool->mutex);
1356 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1357 pa_mutex_unlock(e->pool->mutex);
1358
1359 pa_mempool_unref(e->pool);
1360 pa_mutex_free(e->mutex);
1361 pa_xfree(e);
1362 }
1363
1364 /* Self-locked */
pa_memexport_process_release(pa_memexport * e,uint32_t id)1365 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1366 pa_memblock *b;
1367
1368 pa_assert(e);
1369
1370 pa_mutex_lock(e->mutex);
1371
1372 if (id < e->baseidx)
1373 goto fail;
1374 id -= e->baseidx;
1375
1376 if (id >= e->n_init)
1377 goto fail;
1378
1379 if (!e->slots[id].block)
1380 goto fail;
1381
1382 b = e->slots[id].block;
1383 e->slots[id].block = NULL;
1384
1385 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1386 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1387
1388 pa_mutex_unlock(e->mutex);
1389
1390 /* pa_log("Processing release for %u", id); */
1391
1392 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1393 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1394
1395 pa_atomic_dec(&e->pool->stat.n_exported);
1396 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1397
1398 pa_memblock_unref(b);
1399
1400 return 0;
1401
1402 fail:
1403 pa_mutex_unlock(e->mutex);
1404
1405 return -1;
1406 }
1407
1408 /* Self-locked */
memexport_revoke_blocks(pa_memexport * e,pa_memimport * i)1409 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1410 struct memexport_slot *slot, *next;
1411 pa_assert(e);
1412 pa_assert(i);
1413
1414 pa_mutex_lock(e->mutex);
1415
1416 for (slot = e->used_slots; slot; slot = next) {
1417 uint32_t idx;
1418 next = slot->next;
1419
1420 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1421 slot->block->per_type.imported.segment->import != i)
1422 continue;
1423
1424 idx = (uint32_t) (slot - e->slots + e->baseidx);
1425 e->revoke_cb(e, idx, e->userdata);
1426 pa_memexport_process_release(e, idx);
1427 }
1428
1429 pa_mutex_unlock(e->mutex);
1430 }
1431
1432 /* No lock necessary */
memblock_shared_copy(pa_mempool * p,pa_memblock * b)1433 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1434 pa_memblock *n;
1435
1436 pa_assert(p);
1437 pa_assert(b);
1438
1439 if (b->type == PA_MEMBLOCK_IMPORTED ||
1440 b->type == PA_MEMBLOCK_POOL ||
1441 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1442 pa_assert(b->pool == p);
1443 return pa_memblock_ref(b);
1444 }
1445
1446 if (!(n = pa_memblock_new_pool(p, b->length)))
1447 return NULL;
1448
1449 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1450 return n;
1451 }
1452
1453 /* Self-locked */
pa_memexport_put(pa_memexport * e,pa_memblock * b,pa_mem_type_t * type,uint32_t * block_id,uint32_t * shm_id,size_t * offset,size_t * size)1454 int pa_memexport_put(pa_memexport *e, pa_memblock *b, pa_mem_type_t *type, uint32_t *block_id,
1455 uint32_t *shm_id, size_t *offset, size_t * size) {
1456 pa_shm *memory;
1457 struct memexport_slot *slot;
1458 void *data;
1459
1460 pa_assert(e);
1461 pa_assert(b);
1462 pa_assert(type);
1463 pa_assert(block_id);
1464 pa_assert(shm_id);
1465 pa_assert(offset);
1466 pa_assert(size);
1467 pa_assert(b->pool == e->pool);
1468
1469 if (!(b = memblock_shared_copy(e->pool, b)))
1470 return -1;
1471
1472 pa_mutex_lock(e->mutex);
1473
1474 if (e->free_slots) {
1475 slot = e->free_slots;
1476 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1477 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1478 slot = &e->slots[e->n_init++];
1479 else {
1480 pa_mutex_unlock(e->mutex);
1481 pa_memblock_unref(b);
1482 return -1;
1483 }
1484
1485 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1486 slot->block = b;
1487 *block_id = (uint32_t) (slot - e->slots + e->baseidx);
1488
1489 pa_mutex_unlock(e->mutex);
1490 /* pa_log("Got block id %u", *block_id); */
1491
1492 data = pa_memblock_acquire(b);
1493
1494 if (b->type == PA_MEMBLOCK_IMPORTED) {
1495 pa_assert(b->per_type.imported.segment);
1496 memory = &b->per_type.imported.segment->memory;
1497 } else {
1498 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1499 pa_assert(b->pool);
1500 pa_assert(pa_mempool_is_shared(b->pool));
1501 memory = &b->pool->memory;
1502 }
1503
1504 pa_assert(data >= memory->ptr);
1505 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1506
1507 *type = memory->type;
1508 *shm_id = memory->id;
1509 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1510 *size = b->length;
1511
1512 pa_memblock_release(b);
1513
1514 pa_atomic_inc(&e->pool->stat.n_exported);
1515 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1516
1517 return 0;
1518 }
1519