• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***
2   This file is part of PulseAudio.
3 
4   Copyright 2004-2006 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6 
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as
9   published by the Free Software Foundation; either version 2.1 of the
10   License, or (at your option) any later version.
11 
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   Lesser General Public License for more details
16 
17   You should have received a copy of the GNU Lesser General Public
18   License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20 
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24 
25 #undef LOG_TAG
26 #define LOG_TAG "Memblock"
27 
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <unistd.h>
32 #include <signal.h>
33 #include <errno.h>
34 
35 #ifdef HAVE_VALGRIND_MEMCHECK_H
36 #include <valgrind/memcheck.h>
37 #endif
38 
39 #include <pulse/xmalloc.h>
40 #include <pulse/def.h>
41 
42 #include <pulsecore/shm.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/hashmap.h>
45 #include <pulsecore/semaphore.h>
46 #include <pulsecore/mutex.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/refcnt.h>
49 #include <pulsecore/llist.h>
50 #include <pulsecore/flist.h>
51 #include <pulsecore/core-util.h>
52 #include <pulsecore/memtrap.h>
53 
54 #include "log/audio_log.h"
55 
56 #include "memblock.h"
57 
58 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
59  * note that the footprint is usually much smaller, since the data is
60  * stored in SHM and our OS does not commit the memory before we use
61  * it for the first time. */
62 #define PA_MEMPOOL_SLOTS_MAX 1024
63 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
64 
65 #define PA_MEMEXPORT_SLOTS_MAX 128
66 
67 #define PA_MEMIMPORT_SLOTS_MAX 160
68 #define PA_MEMIMPORT_SEGMENTS_MAX 16
69 
70 struct pa_memblock {
71     PA_REFCNT_DECLARE; /* the reference counter */
72     pa_mempool *pool;
73 
74     pa_memblock_type_t type;
75 
76     bool read_only:1;
77     bool is_silence:1;
78 
79     pa_atomic_ptr_t data;
80     size_t length;
81 
82     pa_atomic_t n_acquired;
83     pa_atomic_t please_signal;
84 
85     union {
86         struct {
87             /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
88             pa_free_cb_t free_cb;
89             /* If type == PA_MEMBLOCK_USER this is passed as free_cb argument */
90             void *free_cb_data;
91         } user;
92 
93         struct {
94             uint32_t id;
95             pa_memimport_segment *segment;
96         } imported;
97     } per_type;
98 };
99 
100 struct pa_memimport_segment {
101     pa_memimport *import;
102     pa_shm memory;
103     pa_memtrap *trap;
104     unsigned n_blocks;
105     bool writable;
106 };
107 
108 /*
109  * If true, this segment's lifetime will not be limited by the
110  * number of active blocks (seg->n_blocks) using its shared memory.
111  * Rather, it will exist for the full lifetime of the memimport it
112  * is attached to.
113  *
114  * This is done to support memfd blocks transport.
115  *
116  * To transfer memfd-backed blocks without passing their fd every
117  * time, thus minimizing overhead and avoiding fd leaks, a command
118  * is sent with the memfd fd as ancil data very early on.
119  *
120  * This command has an ID that identifies the memfd region. Further
121  * block references are then exclusively done using this ID. On the
122  * receiving end, such logic is enabled by the memimport's segment
123  * hash and 'permanent' segments below.
124  */
segment_is_permanent(pa_memimport_segment * seg)125 static bool segment_is_permanent(pa_memimport_segment *seg) {
126     pa_assert(seg);
127     return seg->memory.type == PA_MEM_TYPE_SHARED_MEMFD;
128 }
129 
130 /* A collection of multiple segments */
131 struct pa_memimport {
132     pa_mutex *mutex;
133 
134     pa_mempool *pool;
135     pa_hashmap *segments;
136     pa_hashmap *blocks;
137 
138     /* Called whenever an imported memory block is no longer
139      * needed. */
140     pa_memimport_release_cb_t release_cb;
141     void *userdata;
142 
143     PA_LLIST_FIELDS(pa_memimport);
144 };
145 
146 struct memexport_slot {
147     PA_LLIST_FIELDS(struct memexport_slot);
148     pa_memblock *block;
149 };
150 
151 struct pa_memexport {
152     pa_mutex *mutex;
153     pa_mempool *pool;
154 
155     struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
156 
157     PA_LLIST_HEAD(struct memexport_slot, free_slots);
158     PA_LLIST_HEAD(struct memexport_slot, used_slots);
159     unsigned n_init;
160     unsigned baseidx;
161 
162     /* Called whenever a client from which we imported a memory block
163        which we in turn exported to another client dies and we need to
164        revoke the memory block accordingly */
165     pa_memexport_revoke_cb_t revoke_cb;
166     void *userdata;
167 
168     PA_LLIST_FIELDS(pa_memexport);
169 };
170 
171 struct pa_mempool {
172     /* Reference count the mempool
173      *
174      * Any block allocation from the pool itself, or even just imported from
175      * another process through SHM and attached to it (PA_MEMBLOCK_IMPORTED),
176      * shall increase the refcount.
177      *
178      * This is done for per-client mempools: global references to blocks in
179      * the pool, or just to attached ones, can still be lingering around when
180      * the client connection dies and all per-client objects are to be freed.
181      * That is, current PulseAudio design does not guarantee that the client
182      * mempool blocks are referenced only by client-specific objects.
183      *
184      * For further details, please check:
185      * https://lists.freedesktop.org/archives/pulseaudio-discuss/2016-February/025587.html
186      */
187     PA_REFCNT_DECLARE;
188 
189     pa_semaphore *semaphore;
190     pa_mutex *mutex;
191 
192     pa_shm memory;
193 
194     bool global;
195 
196     size_t block_size;
197     unsigned n_blocks;
198     bool is_remote_writable;
199 
200     pa_atomic_t n_init;
201 
202     PA_LLIST_HEAD(pa_memimport, imports);
203     PA_LLIST_HEAD(pa_memexport, exports);
204 
205     /* A list of free slots that may be reused */
206     pa_flist *free_slots;
207 
208     pa_mempool_stat stat;
209 };
210 
211 static void segment_detach(pa_memimport_segment *seg);
212 
213 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
214 
215 /* No lock necessary */
stat_add(pa_memblock * b)216 static void stat_add(pa_memblock*b) {
217     pa_assert(b);
218     pa_assert(b->pool);
219 
220     pa_atomic_inc(&b->pool->stat.n_allocated);
221     pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
222 
223     pa_atomic_inc(&b->pool->stat.n_accumulated);
224     pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
225 
226     if (b->type == PA_MEMBLOCK_IMPORTED) {
227         pa_atomic_inc(&b->pool->stat.n_imported);
228         pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
229     }
230 
231     pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
232     pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
233 }
234 
235 /* No lock necessary */
stat_remove(pa_memblock * b)236 static void stat_remove(pa_memblock *b) {
237     pa_assert(b);
238     pa_assert(b->pool);
239 
240     pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
241     pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
242 
243     pa_atomic_dec(&b->pool->stat.n_allocated);
244     pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
245 
246     if (b->type == PA_MEMBLOCK_IMPORTED) {
247         pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
248         pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
249 
250         pa_atomic_dec(&b->pool->stat.n_imported);
251         pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
252     }
253 
254     pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
255 }
256 
257 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
258 
259 /* No lock necessary */
pa_memblock_new(pa_mempool * p,size_t length)260 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
261     pa_memblock *b;
262 
263     pa_assert(p);
264     pa_assert(length);
265 
266     if (!(b = pa_memblock_new_pool(p, length)))
267         b = memblock_new_appended(p, length);
268 
269     return b;
270 }
271 
272 /* No lock necessary */
memblock_new_appended(pa_mempool * p,size_t length)273 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
274     pa_memblock *b;
275 
276     pa_assert(p);
277     pa_assert(length);
278 
279     /* If -1 is passed as length we choose the size for the caller. */
280 
281     if (length == (size_t) -1)
282         length = pa_mempool_block_size_max(p);
283 
284     b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
285     PA_REFCNT_INIT(b);
286     b->pool = p;
287     pa_mempool_ref(b->pool);
288     b->type = PA_MEMBLOCK_APPENDED;
289     b->read_only = b->is_silence = false;
290     pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
291     b->length = length;
292     pa_atomic_store(&b->n_acquired, 0);
293     pa_atomic_store(&b->please_signal, 0);
294 
295     stat_add(b);
296     return b;
297 }
298 
299 /* No lock necessary */
mempool_allocate_slot(pa_mempool * p)300 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
301     struct mempool_slot *slot;
302     pa_assert(p);
303 
304     if (!(slot = pa_flist_pop(p->free_slots))) {
305         int idx;
306 
307         /* The free list was empty, we have to allocate a new entry */
308 
309         if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
310             pa_atomic_dec(&p->n_init);
311         else
312             slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
313 
314         if (!slot) {
315             if (pa_log_ratelimit(PA_LOG_DEBUG))
316                 AUDIO_DEBUG_LOG("Pool full");
317             pa_atomic_inc(&p->stat.n_pool_full);
318             return NULL;
319         }
320     }
321 
322 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
323 /*     if (PA_UNLIKELY(pa_in_valgrind())) { */
324 /*         VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
325 /*     } */
326 /* #endif */
327 
328     return slot;
329 }
330 
331 /* No lock necessary, totally redundant anyway */
mempool_slot_data(struct mempool_slot * slot)332 static inline void* mempool_slot_data(struct mempool_slot *slot) {
333     return slot;
334 }
335 
336 /* No lock necessary */
mempool_slot_idx(pa_mempool * p,void * ptr)337 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
338     pa_assert(p);
339 
340     pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
341     pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
342 
343     return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
344 }
345 
346 /* No lock necessary */
mempool_slot_by_ptr(pa_mempool * p,void * ptr)347 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
348     unsigned idx;
349 
350     if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
351         return NULL;
352 
353     return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
354 }
355 
356 /* No lock necessary */
pa_mempool_is_remote_writable(pa_mempool * p)357 bool pa_mempool_is_remote_writable(pa_mempool *p) {
358     pa_assert(p);
359     return p->is_remote_writable;
360 }
361 
362 /* No lock necessary */
pa_mempool_set_is_remote_writable(pa_mempool * p,bool writable)363 void pa_mempool_set_is_remote_writable(pa_mempool *p, bool writable) {
364     pa_assert(p);
365     pa_assert(!writable || pa_mempool_is_shared(p));
366     p->is_remote_writable = writable;
367 }
368 
369 /* No lock necessary */
pa_memblock_new_pool(pa_mempool * p,size_t length)370 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
371     pa_memblock *b = NULL;
372     struct mempool_slot *slot;
373     static int mempool_disable = 0;
374 
375     pa_assert(p);
376     pa_assert(length);
377 
378     if (mempool_disable == 0)
379         mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
380 
381     if (mempool_disable > 0)
382         return NULL;
383 
384     /* If -1 is passed as length we choose the size for the caller: we
385      * take the largest size that fits in one of our slots. */
386 
387     if (length == (size_t) -1)
388         length = pa_mempool_block_size_max(p);
389 
390     if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
391 
392         if (!(slot = mempool_allocate_slot(p)))
393             return NULL;
394 
395         b = mempool_slot_data(slot);
396         b->type = PA_MEMBLOCK_POOL;
397         pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
398 
399     } else if (p->block_size >= length) {
400 
401         if (!(slot = mempool_allocate_slot(p)))
402             return NULL;
403 
404         if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
405             b = pa_xnew(pa_memblock, 1);
406 
407         b->type = PA_MEMBLOCK_POOL_EXTERNAL;
408         pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
409 
410     } else {
411         AUDIO_DEBUG_LOG("Memory block too large for pool: %{public}lu > %{public}lu",
412             (unsigned long) length, (unsigned long) p->block_size);
413         pa_atomic_inc(&p->stat.n_too_large_for_pool);
414         return NULL;
415     }
416 
417     PA_REFCNT_INIT(b);
418     b->pool = p;
419     pa_mempool_ref(b->pool);
420     b->read_only = b->is_silence = false;
421     b->length = length;
422     pa_atomic_store(&b->n_acquired, 0);
423     pa_atomic_store(&b->please_signal, 0);
424 
425     stat_add(b);
426     return b;
427 }
428 
429 /* No lock necessary */
pa_memblock_new_fixed(pa_mempool * p,void * d,size_t length,bool read_only)430 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, bool read_only) {
431     pa_memblock *b;
432 
433     pa_assert(p);
434     pa_assert(d);
435     pa_assert(length != (size_t) -1);
436     pa_assert(length);
437 
438     if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
439         b = pa_xnew(pa_memblock, 1);
440 
441     PA_REFCNT_INIT(b);
442     b->pool = p;
443     pa_mempool_ref(b->pool);
444     b->type = PA_MEMBLOCK_FIXED;
445     b->read_only = read_only;
446     b->is_silence = false;
447     pa_atomic_ptr_store(&b->data, d);
448     b->length = length;
449     pa_atomic_store(&b->n_acquired, 0);
450     pa_atomic_store(&b->please_signal, 0);
451 
452     stat_add(b);
453     return b;
454 }
455 
456 /* No lock necessary */
pa_memblock_new_user(pa_mempool * p,void * d,size_t length,pa_free_cb_t free_cb,void * free_cb_data,bool read_only)457 pa_memblock *pa_memblock_new_user(
458         pa_mempool *p,
459         void *d,
460         size_t length,
461         pa_free_cb_t free_cb,
462         void *free_cb_data,
463         bool read_only) {
464     pa_memblock *b;
465 
466     pa_assert(p);
467     pa_assert(d);
468     pa_assert(length);
469     pa_assert(length != (size_t) -1);
470     pa_assert(free_cb);
471 
472     if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
473         b = pa_xnew(pa_memblock, 1);
474 
475     PA_REFCNT_INIT(b);
476     b->pool = p;
477     pa_mempool_ref(b->pool);
478     b->type = PA_MEMBLOCK_USER;
479     b->read_only = read_only;
480     b->is_silence = false;
481     pa_atomic_ptr_store(&b->data, d);
482     b->length = length;
483     pa_atomic_store(&b->n_acquired, 0);
484     pa_atomic_store(&b->please_signal, 0);
485 
486     b->per_type.user.free_cb = free_cb;
487     b->per_type.user.free_cb_data = free_cb_data;
488 
489     stat_add(b);
490     return b;
491 }
492 
493 /* No lock necessary */
pa_memblock_is_ours(pa_memblock * b)494 bool pa_memblock_is_ours(pa_memblock *b) {
495     pa_assert(b);
496     pa_assert(PA_REFCNT_VALUE(b) > 0);
497 
498     return b->type != PA_MEMBLOCK_IMPORTED;
499 }
500 
501 /* No lock necessary */
pa_memblock_is_read_only(pa_memblock * b)502 bool pa_memblock_is_read_only(pa_memblock *b) {
503     pa_assert(b);
504     pa_assert(PA_REFCNT_VALUE(b) > 0);
505 
506     return b->read_only || PA_REFCNT_VALUE(b) > 1;
507 }
508 
509 /* No lock necessary */
pa_memblock_is_silence(pa_memblock * b)510 bool pa_memblock_is_silence(pa_memblock *b) {
511     pa_assert(b);
512     pa_assert(PA_REFCNT_VALUE(b) > 0);
513 
514     return b->is_silence;
515 }
516 
517 /* No lock necessary */
pa_memblock_set_is_silence(pa_memblock * b,bool v)518 void pa_memblock_set_is_silence(pa_memblock *b, bool v) {
519     pa_assert(b);
520     pa_assert(PA_REFCNT_VALUE(b) > 0);
521 
522     b->is_silence = v;
523 }
524 
525 /* No lock necessary */
pa_memblock_ref_is_one(pa_memblock * b)526 bool pa_memblock_ref_is_one(pa_memblock *b) {
527     int r;
528     pa_assert(b);
529 
530     pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
531 
532     return r == 1;
533 }
534 
535 /* No lock necessary */
pa_memblock_acquire(pa_memblock * b)536 void* pa_memblock_acquire(pa_memblock *b) {
537     pa_assert(b);
538     pa_assert(PA_REFCNT_VALUE(b) > 0);
539 
540     pa_atomic_inc(&b->n_acquired);
541 
542     return pa_atomic_ptr_load(&b->data);
543 }
544 
545 /* No lock necessary */
pa_memblock_acquire_chunk(const pa_memchunk * c)546 void *pa_memblock_acquire_chunk(const pa_memchunk *c) {
547     pa_assert(c);
548 
549     return (uint8_t *) pa_memblock_acquire(c->memblock) + c->index;
550 }
551 
552 /* No lock necessary, in corner cases locks by its own */
pa_memblock_release(pa_memblock * b)553 void pa_memblock_release(pa_memblock *b) {
554     int r;
555     pa_assert(b);
556     pa_assert(PA_REFCNT_VALUE(b) > 0);
557 
558     r = pa_atomic_dec(&b->n_acquired);
559     pa_assert(r >= 1);
560 
561     /* Signal a waiting thread that this memblock is no longer used */
562     if (r == 1 && pa_atomic_load(&b->please_signal))
563         pa_semaphore_post(b->pool->semaphore);
564 }
565 
pa_memblock_get_length(pa_memblock * b)566 size_t pa_memblock_get_length(pa_memblock *b) {
567     pa_assert(b);
568     pa_assert(PA_REFCNT_VALUE(b) > 0);
569 
570     return b->length;
571 }
572 
573 /* Note! Always unref the returned pool after use */
pa_memblock_get_pool(pa_memblock * b)574 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
575     pa_assert(b);
576     pa_assert(PA_REFCNT_VALUE(b) > 0);
577     pa_assert(b->pool);
578 
579     pa_mempool_ref(b->pool);
580     return b->pool;
581 }
582 
583 /* No lock necessary */
pa_memblock_ref(pa_memblock * b)584 pa_memblock* pa_memblock_ref(pa_memblock*b) {
585     pa_assert(b);
586     pa_assert(PA_REFCNT_VALUE(b) > 0);
587 
588     PA_REFCNT_INC(b);
589     return b;
590 }
591 
memblock_free(pa_memblock * b)592 static void memblock_free(pa_memblock *b) {
593     pa_mempool *pool;
594 
595     pa_assert(b);
596     pa_assert(b->pool);
597     pa_assert(pa_atomic_load(&b->n_acquired) == 0);
598 
599     pool = b->pool;
600     stat_remove(b);
601 
602     switch (b->type) {
603         case PA_MEMBLOCK_USER :
604             pa_assert(b->per_type.user.free_cb);
605             b->per_type.user.free_cb(b->per_type.user.free_cb_data);
606 
607             /* Fall through */
608 
609         case PA_MEMBLOCK_FIXED:
610             if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
611                 pa_xfree(b);
612 
613             break;
614 
615         case PA_MEMBLOCK_APPENDED:
616 
617             /* We could attach it to unused_memblocks, but that would
618              * probably waste some considerable amount of memory */
619             pa_xfree(b);
620             break;
621 
622         case PA_MEMBLOCK_IMPORTED: {
623             pa_memimport_segment *segment;
624             pa_memimport *import;
625 
626             /* FIXME! This should be implemented lock-free */
627 
628             pa_assert_se(segment = b->per_type.imported.segment);
629             pa_assert_se(import = segment->import);
630 
631             pa_mutex_lock(import->mutex);
632 
633             pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
634 
635             pa_assert(segment->n_blocks >= 1);
636             if (-- segment->n_blocks <= 0)
637                 segment_detach(segment);
638 
639             pa_mutex_unlock(import->mutex);
640 
641             import->release_cb(import, b->per_type.imported.id, import->userdata);
642 
643             if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
644                 pa_xfree(b);
645 
646             break;
647         }
648 
649         case PA_MEMBLOCK_POOL_EXTERNAL:
650         case PA_MEMBLOCK_POOL: {
651             struct mempool_slot *slot;
652             bool call_free;
653 
654             pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
655 
656             call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
657 
658 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
659 /*             if (PA_UNLIKELY(pa_in_valgrind())) { */
660 /*                 VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
661 /*             } */
662 /* #endif */
663 
664             /* The free list dimensions should easily allow all slots
665              * to fit in, hence try harder if pushing this slot into
666              * the free list fails */
667             while (pa_flist_push(b->pool->free_slots, slot) < 0)
668                 ;
669 
670             if (call_free)
671                 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
672                     pa_xfree(b);
673 
674             break;
675         }
676 
677         case PA_MEMBLOCK_TYPE_MAX:
678         default:
679             pa_assert_not_reached();
680     }
681 
682     pa_mempool_unref(pool);
683 }
684 
685 /* No lock necessary */
pa_memblock_unref(pa_memblock * b)686 void pa_memblock_unref(pa_memblock*b) {
687     pa_assert(b);
688     pa_assert(PA_REFCNT_VALUE(b) > 0);
689 
690     if (PA_REFCNT_DEC(b) > 0)
691         return;
692 
693     memblock_free(b);
694 }
695 
696 /* Self locked */
memblock_wait(pa_memblock * b)697 static void memblock_wait(pa_memblock *b) {
698     pa_assert(b);
699 
700     if (pa_atomic_load(&b->n_acquired) > 0) {
701         /* We need to wait until all threads gave up access to the
702          * memory block before we can go on. Unfortunately this means
703          * that we have to lock and wait here. Sniff! */
704 
705         pa_atomic_inc(&b->please_signal);
706 
707         while (pa_atomic_load(&b->n_acquired) > 0)
708             pa_semaphore_wait(b->pool->semaphore);
709 
710         pa_atomic_dec(&b->please_signal);
711     }
712 }
713 
714 /* No lock necessary. This function is not multiple caller safe! */
memblock_make_local(pa_memblock * b)715 static void memblock_make_local(pa_memblock *b) {
716     pa_assert(b);
717 
718     pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
719 
720     if (b->length <= b->pool->block_size) {
721         struct mempool_slot *slot;
722 
723         if ((slot = mempool_allocate_slot(b->pool))) {
724             void *new_data;
725             /* We can move it into a local pool, perfect! */
726 
727             new_data = mempool_slot_data(slot);
728             memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
729             pa_atomic_ptr_store(&b->data, new_data);
730 
731             b->type = PA_MEMBLOCK_POOL_EXTERNAL;
732             b->read_only = false;
733 
734             goto finish;
735         }
736     }
737 
738     /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
739     b->per_type.user.free_cb = pa_xfree;
740     pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
741     b->per_type.user.free_cb_data = pa_atomic_ptr_load(&b->data);
742 
743     b->type = PA_MEMBLOCK_USER;
744     b->read_only = false;
745 
746 finish:
747     pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
748     pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
749     memblock_wait(b);
750 }
751 
752 /* No lock necessary. This function is not multiple caller safe */
pa_memblock_unref_fixed(pa_memblock * b)753 void pa_memblock_unref_fixed(pa_memblock *b) {
754     pa_assert(b);
755     pa_assert(PA_REFCNT_VALUE(b) > 0);
756     pa_assert(b->type == PA_MEMBLOCK_FIXED);
757 
758     if (PA_REFCNT_VALUE(b) > 1)
759         memblock_make_local(b);
760 
761     pa_memblock_unref(b);
762 }
763 
764 /* No lock necessary. */
pa_memblock_will_need(pa_memblock * b)765 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
766     void *p;
767 
768     pa_assert(b);
769     pa_assert(PA_REFCNT_VALUE(b) > 0);
770 
771     p = pa_memblock_acquire(b);
772     pa_will_need(p, b->length);
773     pa_memblock_release(b);
774 
775     return b;
776 }
777 
778 /* Self-locked. This function is not multiple-caller safe */
memblock_replace_import(pa_memblock * b)779 static void memblock_replace_import(pa_memblock *b) {
780     pa_memimport_segment *segment;
781     pa_memimport *import;
782 
783     pa_assert(b);
784     pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
785 
786     pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
787     pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
788     pa_atomic_dec(&b->pool->stat.n_imported);
789     pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
790 
791     pa_assert_se(segment = b->per_type.imported.segment);
792     pa_assert_se(import = segment->import);
793 
794     pa_mutex_lock(import->mutex);
795 
796     pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
797 
798     memblock_make_local(b);
799 
800     pa_assert(segment->n_blocks >= 1);
801     if (-- segment->n_blocks <= 0)
802         segment_detach(segment);
803 
804     pa_mutex_unlock(import->mutex);
805 }
806 
807 /*@per_client: This is a security measure. By default this should
808  * be set to true where the created mempool is never shared with more
809  * than one client in the system. Set this to false if a global
810  * mempool, shared with all existing and future clients, is required.
811  *
812  * NOTE-1: Do not create any further global mempools! They allow data
813  * leaks between clients and thus conflict with the xdg-app containers
814  * model. They also complicate the handling of memfd-based pools.
815  *
816  * NOTE-2: Almost all mempools are now created on a per client basis.
817  * The only exception is the pa_core's mempool which is still shared
818  * between all clients of the system.
819  *
820  * Beside security issues, special marking for global mempools is
821  * required for memfd communication. To avoid fd leaks, memfd pools
822  * are registered with the connection pstream to create an ID<->memfd
823  * mapping on both PA endpoints. Such memory regions are then always
824  * referenced by their IDs and never by their fds and thus their fds
825  * can be quickly closed later.
826  *
827  * Unfortunately this scheme cannot work with global pools since the
828  * ID registration mechanism needs to happen for each newly connected
829  * client, and thus the need for a more special handling. That is,
830  * for the pool's fd to be always open :-(
831  *
832  * TODO-1: Transform the global core mempool to a per-client one
833  * TODO-2: Remove global mempools support */
pa_mempool_new(pa_mem_type_t type,size_t size,bool per_client)834 pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size, bool per_client) {
835     AUDIO_DEBUG_LOG("pa_mempool_new:type %{public}d, size %{public}zu, per_client %{public}d,", type, size, per_client);
836     pa_mempool *p;
837     char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
838     const size_t page_size = pa_page_size();
839 
840     p = pa_xnew0(pa_mempool, 1);
841     PA_REFCNT_INIT(p);
842 
843     p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
844     if (p->block_size < page_size)
845         p->block_size = page_size;
846 
847     if (size <= 0)
848         p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
849     else {
850         p->n_blocks = (unsigned) (size / p->block_size);
851 
852         if (p->n_blocks < 2)
853             p->n_blocks = 2;
854     }
855 
856     if (pa_shm_create_rw(&p->memory, type, p->n_blocks * p->block_size, 0700) < 0) {
857         pa_xfree(p);
858         return NULL;
859     }
860 
861     AUDIO_DEBUG_LOG("Using %{public}s memory pool with %{public}u slots of size %{public}s each, total size is"
862                  "%{public}s, maximum usable slot size is %{public}lu",
863                  pa_mem_type_to_string(type),
864                  p->n_blocks,
865                  pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
866                  pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
867                  (unsigned long) pa_mempool_block_size_max(p));
868 
869     p->global = !per_client;
870 
871     pa_atomic_store(&p->n_init, 0);
872 
873     PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
874     PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
875 
876     p->mutex = pa_mutex_new(true, true);
877     p->semaphore = pa_semaphore_new(0);
878 
879     p->free_slots = pa_flist_new(p->n_blocks);
880 
881     return p;
882 }
883 
mempool_free(pa_mempool * p)884 static void mempool_free(pa_mempool *p) {
885     pa_assert(p);
886 
887     pa_mutex_lock(p->mutex);
888 
889     while (p->imports)
890         pa_memimport_free(p->imports);
891 
892     while (p->exports)
893         pa_memexport_free(p->exports);
894 
895     pa_mutex_unlock(p->mutex);
896 
897     pa_flist_free(p->free_slots, NULL);
898 
899     if (pa_atomic_load(&p->stat.n_allocated) > 0) {
900 
901         /* Ouch, somebody is retaining a memory block reference! */
902 
903 #ifdef DEBUG_REF
904         unsigned i;
905         pa_flist *list;
906 
907         /* Let's try to find at least one of those leaked memory blocks */
908 
909         list = pa_flist_new(p->n_blocks);
910 
911         for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
912             struct mempool_slot *slot;
913             pa_memblock *b, *k;
914 
915             slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
916             b = mempool_slot_data(slot);
917 
918             while ((k = pa_flist_pop(p->free_slots))) {
919                 while (pa_flist_push(list, k) < 0)
920                     ;
921 
922                 if (b == k)
923                     break;
924             }
925 
926             if (!k)
927                 AUDIO_ERR_LOG("REF: Leaked memory block %{public}p", b);
928 
929             while ((k = pa_flist_pop(list)))
930                 while (pa_flist_push(p->free_slots, k) < 0)
931                     ;
932         }
933 
934         pa_flist_free(list, NULL);
935 
936 #endif
937 
938         AUDIO_ERR_LOG("Memory pool destroyed but not all memory blocks freed! %{public}u remain.",
939             pa_atomic_load(&p->stat.n_allocated));
940 
941 /*         PA_DEBUG_TRAP; */
942     }
943 
944     pa_shm_free(&p->memory);
945 
946     pa_mutex_free(p->mutex);
947     pa_semaphore_free(p->semaphore);
948 
949     pa_xfree(p);
950 }
951 
952 /* No lock necessary */
pa_mempool_get_stat(pa_mempool * p)953 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
954     pa_assert(p);
955 
956     return &p->stat;
957 }
958 
959 /* No lock necessary */
pa_mempool_block_size_max(pa_mempool * p)960 size_t pa_mempool_block_size_max(pa_mempool *p) {
961     pa_assert(p);
962 
963     return p->block_size - PA_ALIGN(sizeof(pa_memblock));
964 }
965 
966 /* No lock necessary */
pa_mempool_vacuum(pa_mempool * p)967 void pa_mempool_vacuum(pa_mempool *p) {
968     struct mempool_slot *slot;
969     pa_flist *list;
970 
971     pa_assert(p);
972 
973     list = pa_flist_new(p->n_blocks);
974 
975     while ((slot = pa_flist_pop(p->free_slots)))
976         while (pa_flist_push(list, slot) < 0)
977             ;
978 
979     while ((slot = pa_flist_pop(list))) {
980         pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
981 
982         while (pa_flist_push(p->free_slots, slot))
983             ;
984     }
985 
986     pa_flist_free(list, NULL);
987 }
988 
989 /* No lock necessary */
pa_mempool_is_shared(pa_mempool * p)990 bool pa_mempool_is_shared(pa_mempool *p) {
991     pa_assert(p);
992 
993     return pa_mem_type_is_shared(p->memory.type);
994 }
995 
996 /* No lock necessary */
pa_mempool_is_memfd_backed(const pa_mempool * p)997 bool pa_mempool_is_memfd_backed(const pa_mempool *p) {
998     pa_assert(p);
999 
1000     return (p->memory.type == PA_MEM_TYPE_SHARED_MEMFD);
1001 }
1002 
1003 /* No lock necessary */
pa_mempool_get_shm_id(pa_mempool * p,uint32_t * id)1004 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
1005     pa_assert(p);
1006 
1007     if (!pa_mempool_is_shared(p))
1008         return -1;
1009 
1010     *id = p->memory.id;
1011 
1012     return 0;
1013 }
1014 
pa_mempool_ref(pa_mempool * p)1015 pa_mempool* pa_mempool_ref(pa_mempool *p) {
1016     pa_assert(p);
1017     pa_assert(PA_REFCNT_VALUE(p) > 0);
1018 
1019     PA_REFCNT_INC(p);
1020     return p;
1021 }
1022 
pa_mempool_unref(pa_mempool * p)1023 void pa_mempool_unref(pa_mempool *p) {
1024     pa_assert(p);
1025     pa_assert(PA_REFCNT_VALUE(p) > 0);
1026 
1027     if (PA_REFCNT_DEC(p) <= 0)
1028         mempool_free(p);
1029 }
1030 
1031 /* No lock necessary
1032  * Check pa_mempool_new() for per-client vs. global mempools */
pa_mempool_is_global(pa_mempool * p)1033 bool pa_mempool_is_global(pa_mempool *p) {
1034     pa_assert(p);
1035 
1036     return p->global;
1037 }
1038 
1039 /* No lock necessary
1040  * Check pa_mempool_new() for per-client vs. global mempools */
pa_mempool_is_per_client(pa_mempool * p)1041 bool pa_mempool_is_per_client(pa_mempool *p) {
1042     return !pa_mempool_is_global(p);
1043 }
1044 
1045 /* Self-locked
1046  *
1047  * This is only for per-client mempools!
1048  *
1049  * After this method's return, the caller owns the file descriptor
1050  * and is responsible for closing it in the appropriate time. This
1051  * should only be called once during during a mempool's lifetime.
1052  *
1053  * Check pa_shm->fd and pa_mempool_new() for further context. */
pa_mempool_take_memfd_fd(pa_mempool * p)1054 int pa_mempool_take_memfd_fd(pa_mempool *p) {
1055     int memfd_fd;
1056 
1057     pa_assert(p);
1058     pa_assert(pa_mempool_is_shared(p));
1059     pa_assert(pa_mempool_is_memfd_backed(p));
1060     pa_assert(pa_mempool_is_per_client(p));
1061 
1062     pa_mutex_lock(p->mutex);
1063 
1064     memfd_fd = p->memory.fd;
1065     p->memory.fd = -1;
1066 
1067     pa_mutex_unlock(p->mutex);
1068 
1069     pa_assert(memfd_fd != -1);
1070     return memfd_fd;
1071 }
1072 
1073 /* No lock necessary
1074  *
1075  * This is only for global mempools!
1076  *
1077  * Global mempools have their memfd descriptor always open. DO NOT
1078  * close the returned descriptor by your own.
1079  *
1080  * Check pa_mempool_new() for further context. */
pa_mempool_get_memfd_fd(pa_mempool * p)1081 int pa_mempool_get_memfd_fd(pa_mempool *p) {
1082     int memfd_fd;
1083 
1084     pa_assert(p);
1085     pa_assert(pa_mempool_is_shared(p));
1086     pa_assert(pa_mempool_is_memfd_backed(p));
1087     pa_assert(pa_mempool_is_global(p));
1088 
1089     memfd_fd = p->memory.fd;
1090     pa_assert(memfd_fd != -1);
1091 
1092     return memfd_fd;
1093 }
1094 
1095 /* For receiving blocks from other nodes */
pa_memimport_new(pa_mempool * p,pa_memimport_release_cb_t cb,void * userdata)1096 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
1097     pa_memimport *i;
1098 
1099     pa_assert(p);
1100     pa_assert(cb);
1101 
1102     i = pa_xnew(pa_memimport, 1);
1103     i->mutex = pa_mutex_new(true, true);
1104     i->pool = p;
1105     pa_mempool_ref(i->pool);
1106     i->segments = pa_hashmap_new(NULL, NULL);
1107     i->blocks = pa_hashmap_new(NULL, NULL);
1108     i->release_cb = cb;
1109     i->userdata = userdata;
1110 
1111     pa_mutex_lock(p->mutex);
1112     PA_LLIST_PREPEND(pa_memimport, p->imports, i);
1113     pa_mutex_unlock(p->mutex);
1114 
1115     return i;
1116 }
1117 
1118 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
1119 
1120 /* Should be called locked
1121  * Caller owns passed @memfd_fd and must close it down when appropriate. */
segment_attach(pa_memimport * i,pa_mem_type_t type,uint32_t shm_id,int memfd_fd,bool writable)1122 static pa_memimport_segment* segment_attach(pa_memimport *i, pa_mem_type_t type, uint32_t shm_id,
1123                                             int memfd_fd, bool writable) {
1124     pa_memimport_segment* seg;
1125     pa_assert(pa_mem_type_is_shared(type));
1126 
1127     if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
1128         return NULL;
1129 
1130     seg = pa_xnew0(pa_memimport_segment, 1);
1131 
1132     if (pa_shm_attach(&seg->memory, type, shm_id, memfd_fd, writable) < 0) {
1133         pa_xfree(seg);
1134         return NULL;
1135     }
1136 
1137     seg->writable = writable;
1138     seg->import = i;
1139     seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
1140 
1141     pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
1142     return seg;
1143 }
1144 
1145 /* Should be called locked */
segment_detach(pa_memimport_segment * seg)1146 static void segment_detach(pa_memimport_segment *seg) {
1147     pa_assert(seg);
1148     pa_assert(seg->n_blocks == (segment_is_permanent(seg) ? 1u : 0u));
1149 
1150     pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
1151     pa_shm_free(&seg->memory);
1152 
1153     if (seg->trap)
1154         pa_memtrap_remove(seg->trap);
1155 
1156     pa_xfree(seg);
1157 }
1158 
1159 /* Self-locked. Not multiple-caller safe */
pa_memimport_free(pa_memimport * i)1160 void pa_memimport_free(pa_memimport *i) {
1161     pa_memexport *e;
1162     pa_memblock *b;
1163     pa_memimport_segment *seg;
1164     void *state = NULL;
1165 
1166     pa_assert(i);
1167 
1168     pa_mutex_lock(i->mutex);
1169 
1170     while ((b = pa_hashmap_first(i->blocks)))
1171         memblock_replace_import(b);
1172 
1173     /* Permanent segments exist for the lifetime of the memimport. Now
1174      * that we're freeing the memimport itself, clear them all up.
1175      *
1176      * Careful! segment_detach() internally removes itself from the
1177      * memimport's hash; the same hash we're now using for iteration. */
1178     PA_HASHMAP_FOREACH(seg, i->segments, state) {
1179         if (segment_is_permanent(seg))
1180             segment_detach(seg);
1181     }
1182     pa_assert(pa_hashmap_size(i->segments) == 0);
1183 
1184     pa_mutex_unlock(i->mutex);
1185 
1186     pa_mutex_lock(i->pool->mutex);
1187 
1188     /* If we've exported this block further we need to revoke that export */
1189     for (e = i->pool->exports; e; e = e->next)
1190         memexport_revoke_blocks(e, i);
1191 
1192     PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
1193 
1194     pa_mutex_unlock(i->pool->mutex);
1195 
1196     pa_mempool_unref(i->pool);
1197     pa_hashmap_free(i->blocks);
1198     pa_hashmap_free(i->segments);
1199 
1200     pa_mutex_free(i->mutex);
1201 
1202     pa_xfree(i);
1203 }
1204 
1205 /* Create a new memimport's memfd segment entry, with passed SHM ID
1206  * as key and the newly-created segment (with its mmap()-ed memfd
1207  * memory region) as its value.
1208  *
1209  * Note! check comments at 'pa_shm->fd', 'segment_is_permanent()',
1210  * and 'pa_pstream_register_memfd_mempool()' for further details.
1211  *
1212  * Caller owns passed @memfd_fd and must close it down when appropriate. */
pa_memimport_attach_memfd(pa_memimport * i,uint32_t shm_id,int memfd_fd,bool writable)1213 int pa_memimport_attach_memfd(pa_memimport *i, uint32_t shm_id, int memfd_fd, bool writable) {
1214     pa_memimport_segment *seg;
1215     int ret = -1;
1216 
1217     pa_assert(i);
1218     pa_assert(memfd_fd != -1);
1219 
1220     pa_mutex_lock(i->mutex);
1221 
1222     if (!(seg = segment_attach(i, PA_MEM_TYPE_SHARED_MEMFD, shm_id, memfd_fd, writable)))
1223         goto finish;
1224 
1225     /* n_blocks acts as a segment reference count. To avoid the segment
1226      * being deleted when receiving silent memchunks, etc., mark our
1227      * permanent presence by incrementing that refcount. */
1228     seg->n_blocks++;
1229 
1230     pa_assert(segment_is_permanent(seg));
1231     ret = 0;
1232 
1233 finish:
1234     pa_mutex_unlock(i->mutex);
1235     return ret;
1236 }
1237 
1238 /* Self-locked */
pa_memimport_get(pa_memimport * i,pa_mem_type_t type,uint32_t block_id,uint32_t shm_id,size_t offset,size_t size,bool writable)1239 pa_memblock* pa_memimport_get(pa_memimport *i, pa_mem_type_t type, uint32_t block_id, uint32_t shm_id,
1240                               size_t offset, size_t size, bool writable) {
1241     pa_memblock *b = NULL;
1242     pa_memimport_segment *seg;
1243 
1244     pa_assert(i);
1245     pa_assert(pa_mem_type_is_shared(type));
1246 
1247     pa_mutex_lock(i->mutex);
1248 
1249     if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
1250         pa_memblock_ref(b);
1251         goto finish;
1252     }
1253 
1254     if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
1255         goto finish;
1256 
1257     if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id)))) {
1258         if (type == PA_MEM_TYPE_SHARED_MEMFD) {
1259             AUDIO_ERR_LOG("Bailing out! No cached memimport segment for memfd ID %{public}u", shm_id);
1260             AUDIO_ERR_LOG("Did the other PA endpoint forget registering its memfd pool?");
1261             goto finish;
1262         }
1263 
1264         pa_assert(type == PA_MEM_TYPE_SHARED_POSIX);
1265         if (!(seg = segment_attach(i, type, shm_id, -1, writable)))
1266             goto finish;
1267     }
1268 
1269     if (writable && !seg->writable) {
1270         AUDIO_ERR_LOG("Cannot import cached segment in write mode - previously mapped as read-only");
1271         goto finish;
1272     }
1273 
1274     if (offset+size > seg->memory.size)
1275         goto finish;
1276 
1277     if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
1278         b = pa_xnew(pa_memblock, 1);
1279 
1280     PA_REFCNT_INIT(b);
1281     b->pool = i->pool;
1282     pa_mempool_ref(b->pool);
1283     b->type = PA_MEMBLOCK_IMPORTED;
1284     b->read_only = !writable;
1285     b->is_silence = false;
1286     pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
1287     b->length = size;
1288     pa_atomic_store(&b->n_acquired, 0);
1289     pa_atomic_store(&b->please_signal, 0);
1290     b->per_type.imported.id = block_id;
1291     b->per_type.imported.segment = seg;
1292 
1293     pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1294 
1295     seg->n_blocks++;
1296 
1297     stat_add(b);
1298 
1299 finish:
1300     pa_mutex_unlock(i->mutex);
1301 
1302     return b;
1303 }
1304 
pa_memimport_process_revoke(pa_memimport * i,uint32_t id)1305 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1306     pa_memblock *b;
1307     int ret = 0;
1308     pa_assert(i);
1309 
1310     pa_mutex_lock(i->mutex);
1311 
1312     if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1313         ret = -1;
1314         goto finish;
1315     }
1316 
1317     memblock_replace_import(b);
1318 
1319 finish:
1320     pa_mutex_unlock(i->mutex);
1321 
1322     return ret;
1323 }
1324 
1325 /* For sending blocks to other nodes */
pa_memexport_new(pa_mempool * p,pa_memexport_revoke_cb_t cb,void * userdata)1326 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1327     pa_memexport *e;
1328 
1329     static pa_atomic_t export_baseidx = PA_ATOMIC_INIT(0);
1330 
1331     pa_assert(p);
1332     pa_assert(cb);
1333 
1334     if (!pa_mempool_is_shared(p))
1335         return NULL;
1336 
1337     e = pa_xnew(pa_memexport, 1);
1338     e->mutex = pa_mutex_new(true, true);
1339     e->pool = p;
1340     pa_mempool_ref(e->pool);
1341     PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1342     PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1343     e->n_init = 0;
1344     e->revoke_cb = cb;
1345     e->userdata = userdata;
1346 
1347     pa_mutex_lock(p->mutex);
1348 
1349     PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1350     e->baseidx = (uint32_t) pa_atomic_add(&export_baseidx, PA_MEMEXPORT_SLOTS_MAX);
1351 
1352     pa_mutex_unlock(p->mutex);
1353     return e;
1354 }
1355 
pa_memexport_free(pa_memexport * e)1356 void pa_memexport_free(pa_memexport *e) {
1357     pa_assert(e);
1358 
1359     pa_mutex_lock(e->mutex);
1360     while (e->used_slots)
1361         pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots + e->baseidx));
1362     pa_mutex_unlock(e->mutex);
1363 
1364     pa_mutex_lock(e->pool->mutex);
1365     PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1366     pa_mutex_unlock(e->pool->mutex);
1367 
1368     pa_mempool_unref(e->pool);
1369     pa_mutex_free(e->mutex);
1370     pa_xfree(e);
1371 }
1372 
1373 /* Self-locked */
pa_memexport_process_release(pa_memexport * e,uint32_t id)1374 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1375     pa_memblock *b;
1376 
1377     pa_assert(e);
1378 
1379     pa_mutex_lock(e->mutex);
1380 
1381     if (id < e->baseidx)
1382         goto fail;
1383     id -= e->baseidx;
1384 
1385     if (id >= e->n_init)
1386         goto fail;
1387 
1388     if (!e->slots[id].block)
1389         goto fail;
1390 
1391     b = e->slots[id].block;
1392     e->slots[id].block = NULL;
1393 
1394     PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1395     PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1396 
1397     pa_mutex_unlock(e->mutex);
1398 
1399 /*     pa_log("Processing release for %u", id); */
1400 
1401     pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1402     pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1403 
1404     pa_atomic_dec(&e->pool->stat.n_exported);
1405     pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1406 
1407     pa_memblock_unref(b);
1408 
1409     return 0;
1410 
1411 fail:
1412     pa_mutex_unlock(e->mutex);
1413 
1414     return -1;
1415 }
1416 
1417 /* Self-locked */
memexport_revoke_blocks(pa_memexport * e,pa_memimport * i)1418 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1419     struct memexport_slot *slot, *next;
1420     pa_assert(e);
1421     pa_assert(i);
1422 
1423     pa_mutex_lock(e->mutex);
1424 
1425     for (slot = e->used_slots; slot; slot = next) {
1426         uint32_t idx;
1427         next = slot->next;
1428 
1429         if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1430             slot->block->per_type.imported.segment->import != i)
1431             continue;
1432 
1433         idx = (uint32_t) (slot - e->slots + e->baseidx);
1434         e->revoke_cb(e, idx, e->userdata);
1435         pa_memexport_process_release(e, idx);
1436     }
1437 
1438     pa_mutex_unlock(e->mutex);
1439 }
1440 
1441 /* No lock necessary */
memblock_shared_copy(pa_mempool * p,pa_memblock * b)1442 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1443     pa_memblock *n;
1444 
1445     pa_assert(p);
1446     pa_assert(b);
1447 
1448     if (b->type == PA_MEMBLOCK_IMPORTED ||
1449         b->type == PA_MEMBLOCK_POOL ||
1450         b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1451         pa_assert(b->pool == p);
1452         return pa_memblock_ref(b);
1453     }
1454 
1455     if (!(n = pa_memblock_new_pool(p, b->length)))
1456         return NULL;
1457 
1458     memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1459     return n;
1460 }
1461 
1462 /* Self-locked */
pa_memexport_put(pa_memexport * e,pa_memblock * b,pa_mem_type_t * type,uint32_t * block_id,uint32_t * shm_id,size_t * offset,size_t * size)1463 int pa_memexport_put(pa_memexport *e, pa_memblock *b, pa_mem_type_t *type, uint32_t *block_id,
1464                      uint32_t *shm_id, size_t *offset, size_t * size) {
1465     pa_shm  *memory;
1466     struct memexport_slot *slot;
1467     void *data;
1468 
1469     pa_assert(e);
1470     pa_assert(b);
1471     pa_assert(type);
1472     pa_assert(block_id);
1473     pa_assert(shm_id);
1474     pa_assert(offset);
1475     pa_assert(size);
1476     pa_assert(b->pool == e->pool);
1477 
1478     if (!(b = memblock_shared_copy(e->pool, b)))
1479         return -1;
1480 
1481     pa_mutex_lock(e->mutex);
1482 
1483     if (e->free_slots) {
1484         slot = e->free_slots;
1485         PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1486     } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1487         slot = &e->slots[e->n_init++];
1488     else {
1489         pa_mutex_unlock(e->mutex);
1490         pa_memblock_unref(b);
1491         return -1;
1492     }
1493 
1494     PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1495     slot->block = b;
1496     *block_id = (uint32_t) (slot - e->slots + e->baseidx);
1497 
1498     pa_mutex_unlock(e->mutex);
1499 /*     pa_log("Got block id %u", *block_id); */
1500 
1501     data = pa_memblock_acquire(b);
1502 
1503     if (b->type == PA_MEMBLOCK_IMPORTED) {
1504         pa_assert(b->per_type.imported.segment);
1505         memory = &b->per_type.imported.segment->memory;
1506     } else {
1507         pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1508         pa_assert(b->pool);
1509         pa_assert(pa_mempool_is_shared(b->pool));
1510         memory = &b->pool->memory;
1511     }
1512 
1513     pa_assert(data >= memory->ptr);
1514     pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1515 
1516     *type = memory->type;
1517     *shm_id = memory->id;
1518     *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1519     *size = b->length;
1520 
1521     pa_memblock_release(b);
1522 
1523     pa_atomic_inc(&e->pool->stat.n_exported);
1524     pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1525 
1526     return 0;
1527 }
1528