• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***
2   This file is part of PulseAudio.
3 
4   Copyright 2004-2006 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6 
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as
9   published by the Free Software Foundation; either version 2.1 of the
10   License, or (at your option) any later version.
11 
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   Lesser General Public License for more details
16 
17   You should have received a copy of the GNU Lesser General Public
18   License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20 
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24 
25 #ifndef LOG_TAG
26 #define LOG_TAG "Memblock"
27 #endif
28 
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <unistd.h>
33 #include <signal.h>
34 #include <errno.h>
35 
36 #ifdef HAVE_VALGRIND_MEMCHECK_H
37 #include <valgrind/memcheck.h>
38 #endif
39 
40 #include <pulse/xmalloc.h>
41 #include <pulse/def.h>
42 
43 #include <pulsecore/shm.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/hashmap.h>
46 #include <pulsecore/semaphore.h>
47 #include <pulsecore/mutex.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/refcnt.h>
50 #include <pulsecore/llist.h>
51 #include <pulsecore/flist.h>
52 #include <pulsecore/core-util.h>
53 #include <pulsecore/memtrap.h>
54 
55 #include "log/audio_log.h"
56 
57 #include "memblock.h"
58 
59 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
60  * note that the footprint is usually much smaller, since the data is
61  * stored in SHM and our OS does not commit the memory before we use
62  * it for the first time. */
63 #define PA_MEMPOOL_SLOTS_MAX 1024
64 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
65 
66 #define PA_MEMEXPORT_SLOTS_MAX 128
67 
68 #define PA_MEMIMPORT_SLOTS_MAX 160
69 #define PA_MEMIMPORT_SEGMENTS_MAX 16
70 
71 struct pa_memblock {
72     PA_REFCNT_DECLARE; /* the reference counter */
73     pa_mempool *pool;
74 
75     pa_memblock_type_t type;
76 
77     bool read_only:1;
78     bool is_silence:1;
79 
80     pa_atomic_ptr_t data;
81     size_t length;
82 
83     pa_atomic_t n_acquired;
84     pa_atomic_t please_signal;
85 
86     union {
87         struct {
88             /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
89             pa_free_cb_t free_cb;
90             /* If type == PA_MEMBLOCK_USER this is passed as free_cb argument */
91             void *free_cb_data;
92         } user;
93 
94         struct {
95             uint32_t id;
96             pa_memimport_segment *segment;
97         } imported;
98     } per_type;
99 };
100 
101 struct pa_memimport_segment {
102     pa_memimport *import;
103     pa_shm memory;
104     pa_memtrap *trap;
105     unsigned n_blocks;
106     bool writable;
107 };
108 
109 /*
110  * If true, this segment's lifetime will not be limited by the
111  * number of active blocks (seg->n_blocks) using its shared memory.
112  * Rather, it will exist for the full lifetime of the memimport it
113  * is attached to.
114  *
115  * This is done to support memfd blocks transport.
116  *
117  * To transfer memfd-backed blocks without passing their fd every
118  * time, thus minimizing overhead and avoiding fd leaks, a command
119  * is sent with the memfd fd as ancil data very early on.
120  *
121  * This command has an ID that identifies the memfd region. Further
122  * block references are then exclusively done using this ID. On the
123  * receiving end, such logic is enabled by the memimport's segment
124  * hash and 'permanent' segments below.
125  */
segment_is_permanent(pa_memimport_segment * seg)126 static bool segment_is_permanent(pa_memimport_segment *seg) {
127     pa_assert(seg);
128     return seg->memory.type == PA_MEM_TYPE_SHARED_MEMFD;
129 }
130 
131 /* A collection of multiple segments */
132 struct pa_memimport {
133     pa_mutex *mutex;
134 
135     pa_mempool *pool;
136     pa_hashmap *segments;
137     pa_hashmap *blocks;
138 
139     /* Called whenever an imported memory block is no longer
140      * needed. */
141     pa_memimport_release_cb_t release_cb;
142     void *userdata;
143 
144     PA_LLIST_FIELDS(pa_memimport);
145 };
146 
147 struct memexport_slot {
148     PA_LLIST_FIELDS(struct memexport_slot);
149     pa_memblock *block;
150 };
151 
152 struct pa_memexport {
153     pa_mutex *mutex;
154     pa_mempool *pool;
155 
156     struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
157 
158     PA_LLIST_HEAD(struct memexport_slot, free_slots);
159     PA_LLIST_HEAD(struct memexport_slot, used_slots);
160     unsigned n_init;
161     unsigned baseidx;
162 
163     /* Called whenever a client from which we imported a memory block
164        which we in turn exported to another client dies and we need to
165        revoke the memory block accordingly */
166     pa_memexport_revoke_cb_t revoke_cb;
167     void *userdata;
168 
169     PA_LLIST_FIELDS(pa_memexport);
170 };
171 
172 struct pa_mempool {
173     /* Reference count the mempool
174      *
175      * Any block allocation from the pool itself, or even just imported from
176      * another process through SHM and attached to it (PA_MEMBLOCK_IMPORTED),
177      * shall increase the refcount.
178      *
179      * This is done for per-client mempools: global references to blocks in
180      * the pool, or just to attached ones, can still be lingering around when
181      * the client connection dies and all per-client objects are to be freed.
182      * That is, current PulseAudio design does not guarantee that the client
183      * mempool blocks are referenced only by client-specific objects.
184      *
185      * For further details, please check:
186      * https://lists.freedesktop.org/archives/pulseaudio-discuss/2016-February/025587.html
187      */
188     PA_REFCNT_DECLARE;
189 
190     pa_semaphore *semaphore;
191     pa_mutex *mutex;
192 
193     pa_shm memory;
194 
195     bool global;
196 
197     size_t block_size;
198     unsigned n_blocks;
199     bool is_remote_writable;
200 
201     pa_atomic_t n_init;
202 
203     PA_LLIST_HEAD(pa_memimport, imports);
204     PA_LLIST_HEAD(pa_memexport, exports);
205 
206     /* A list of free slots that may be reused */
207     pa_flist *free_slots;
208 
209     pa_mempool_stat stat;
210 };
211 
212 static void segment_detach(pa_memimport_segment *seg);
213 
214 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
215 
216 /* No lock necessary */
stat_add(pa_memblock * b)217 static void stat_add(pa_memblock*b) {
218     pa_assert(b);
219     pa_assert(b->pool);
220 
221     pa_atomic_inc(&b->pool->stat.n_allocated);
222     pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
223 
224     pa_atomic_inc(&b->pool->stat.n_accumulated);
225     pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
226 
227     if (b->type == PA_MEMBLOCK_IMPORTED) {
228         pa_atomic_inc(&b->pool->stat.n_imported);
229         pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
230     }
231 
232     pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
233     pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
234 }
235 
236 /* No lock necessary */
stat_remove(pa_memblock * b)237 static void stat_remove(pa_memblock *b) {
238     pa_assert(b);
239     pa_assert(b->pool);
240 
241     pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
242     pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
243 
244     pa_atomic_dec(&b->pool->stat.n_allocated);
245     pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
246 
247     if (b->type == PA_MEMBLOCK_IMPORTED) {
248         pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
249         pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
250 
251         pa_atomic_dec(&b->pool->stat.n_imported);
252         pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
253     }
254 
255     pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
256 }
257 
258 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
259 
260 /* No lock necessary */
pa_memblock_new(pa_mempool * p,size_t length)261 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
262     pa_memblock *b;
263 
264     pa_assert(p);
265     pa_assert(length);
266 
267     if (!(b = pa_memblock_new_pool(p, length)))
268         b = memblock_new_appended(p, length);
269 
270     return b;
271 }
272 
273 /* No lock necessary */
memblock_new_appended(pa_mempool * p,size_t length)274 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
275     pa_memblock *b;
276 
277     pa_assert(p);
278     pa_assert(length);
279 
280     /* If -1 is passed as length we choose the size for the caller. */
281 
282     if (length == (size_t) -1)
283         length = pa_mempool_block_size_max(p);
284 
285     b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
286     PA_REFCNT_INIT(b);
287     b->pool = p;
288     pa_mempool_ref(b->pool);
289     b->type = PA_MEMBLOCK_APPENDED;
290     b->read_only = b->is_silence = false;
291     pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
292     b->length = length;
293     pa_atomic_store(&b->n_acquired, 0);
294     pa_atomic_store(&b->please_signal, 0);
295 
296     stat_add(b);
297     return b;
298 }
299 
300 /* No lock necessary */
mempool_allocate_slot(pa_mempool * p)301 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
302     struct mempool_slot *slot;
303     pa_assert(p);
304 
305     if (!(slot = pa_flist_pop(p->free_slots))) {
306         int idx;
307 
308         /* The free list was empty, we have to allocate a new entry */
309 
310         if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
311             pa_atomic_dec(&p->n_init);
312         else
313             slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
314 
315         if (!slot) {
316             if (pa_log_ratelimit(PA_LOG_DEBUG))
317                 AUDIO_DEBUG_LOG("Pool full");
318             pa_atomic_inc(&p->stat.n_pool_full);
319             return NULL;
320         }
321     }
322 
323 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
324 /*     if (PA_UNLIKELY(pa_in_valgrind())) { */
325 /*         VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
326 /*     } */
327 /* #endif */
328 
329     return slot;
330 }
331 
332 /* No lock necessary, totally redundant anyway */
mempool_slot_data(struct mempool_slot * slot)333 static inline void* mempool_slot_data(struct mempool_slot *slot) {
334     return slot;
335 }
336 
337 /* No lock necessary */
mempool_slot_idx(pa_mempool * p,void * ptr)338 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
339     pa_assert(p);
340 
341     pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
342     pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
343 
344     return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
345 }
346 
347 /* No lock necessary */
mempool_slot_by_ptr(pa_mempool * p,void * ptr)348 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
349     unsigned idx;
350 
351     if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
352         return NULL;
353 
354     return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
355 }
356 
357 /* No lock necessary */
pa_mempool_is_remote_writable(pa_mempool * p)358 bool pa_mempool_is_remote_writable(pa_mempool *p) {
359     pa_assert(p);
360     return p->is_remote_writable;
361 }
362 
363 /* No lock necessary */
pa_mempool_set_is_remote_writable(pa_mempool * p,bool writable)364 void pa_mempool_set_is_remote_writable(pa_mempool *p, bool writable) {
365     pa_assert(p);
366     pa_assert(!writable || pa_mempool_is_shared(p));
367     p->is_remote_writable = writable;
368 }
369 
370 /* No lock necessary */
pa_memblock_new_pool(pa_mempool * p,size_t length)371 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
372     pa_memblock *b = NULL;
373     struct mempool_slot *slot;
374     static int mempool_disable = 0;
375 
376     pa_assert(p);
377     pa_assert(length);
378 
379     if (mempool_disable == 0)
380         mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
381 
382     if (mempool_disable > 0)
383         return NULL;
384 
385     /* If -1 is passed as length we choose the size for the caller: we
386      * take the largest size that fits in one of our slots. */
387 
388     if (length == (size_t) -1)
389         length = pa_mempool_block_size_max(p);
390 
391     if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
392 
393         if (!(slot = mempool_allocate_slot(p)))
394             return NULL;
395 
396         b = mempool_slot_data(slot);
397         b->type = PA_MEMBLOCK_POOL;
398         pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
399 
400     } else if (p->block_size >= length) {
401 
402         if (!(slot = mempool_allocate_slot(p)))
403             return NULL;
404 
405         if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
406             b = pa_xnew(pa_memblock, 1);
407 
408         b->type = PA_MEMBLOCK_POOL_EXTERNAL;
409         pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
410 
411     } else {
412         AUDIO_DEBUG_LOG("Memory block too large for pool: %{public}lu > %{public}lu",
413             (unsigned long) length, (unsigned long) p->block_size);
414         pa_atomic_inc(&p->stat.n_too_large_for_pool);
415         return NULL;
416     }
417 
418     PA_REFCNT_INIT(b);
419     b->pool = p;
420     pa_mempool_ref(b->pool);
421     b->read_only = b->is_silence = false;
422     b->length = length;
423     pa_atomic_store(&b->n_acquired, 0);
424     pa_atomic_store(&b->please_signal, 0);
425 
426     stat_add(b);
427     return b;
428 }
429 
430 /* No lock necessary */
pa_memblock_new_fixed(pa_mempool * p,void * d,size_t length,bool read_only)431 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, bool read_only) {
432     pa_memblock *b;
433 
434     pa_assert(p);
435     pa_assert(d);
436     pa_assert(length != (size_t) -1);
437     pa_assert(length);
438 
439     if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
440         b = pa_xnew(pa_memblock, 1);
441 
442     PA_REFCNT_INIT(b);
443     b->pool = p;
444     pa_mempool_ref(b->pool);
445     b->type = PA_MEMBLOCK_FIXED;
446     b->read_only = read_only;
447     b->is_silence = false;
448     pa_atomic_ptr_store(&b->data, d);
449     b->length = length;
450     pa_atomic_store(&b->n_acquired, 0);
451     pa_atomic_store(&b->please_signal, 0);
452 
453     stat_add(b);
454     return b;
455 }
456 
457 /* No lock necessary */
pa_memblock_new_user(pa_mempool * p,void * d,size_t length,pa_free_cb_t free_cb,void * free_cb_data,bool read_only)458 pa_memblock *pa_memblock_new_user(
459         pa_mempool *p,
460         void *d,
461         size_t length,
462         pa_free_cb_t free_cb,
463         void *free_cb_data,
464         bool read_only) {
465     pa_memblock *b;
466 
467     pa_assert(p);
468     pa_assert(d);
469     pa_assert(length);
470     pa_assert(length != (size_t) -1);
471     pa_assert(free_cb);
472 
473     if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
474         b = pa_xnew(pa_memblock, 1);
475 
476     PA_REFCNT_INIT(b);
477     b->pool = p;
478     pa_mempool_ref(b->pool);
479     b->type = PA_MEMBLOCK_USER;
480     b->read_only = read_only;
481     b->is_silence = false;
482     pa_atomic_ptr_store(&b->data, d);
483     b->length = length;
484     pa_atomic_store(&b->n_acquired, 0);
485     pa_atomic_store(&b->please_signal, 0);
486 
487     b->per_type.user.free_cb = free_cb;
488     b->per_type.user.free_cb_data = free_cb_data;
489 
490     stat_add(b);
491     return b;
492 }
493 
494 /* No lock necessary */
pa_memblock_is_ours(pa_memblock * b)495 bool pa_memblock_is_ours(pa_memblock *b) {
496     pa_assert(b);
497     pa_assert(PA_REFCNT_VALUE(b) > 0);
498 
499     return b->type != PA_MEMBLOCK_IMPORTED;
500 }
501 
502 /* No lock necessary */
pa_memblock_is_read_only(pa_memblock * b)503 bool pa_memblock_is_read_only(pa_memblock *b) {
504     pa_assert(b);
505     pa_assert(PA_REFCNT_VALUE(b) > 0);
506 
507     return b->read_only || PA_REFCNT_VALUE(b) > 1;
508 }
509 
510 /* No lock necessary */
pa_memblock_is_silence(pa_memblock * b)511 bool pa_memblock_is_silence(pa_memblock *b) {
512     pa_assert(b);
513     pa_assert(PA_REFCNT_VALUE(b) > 0);
514 
515     return b->is_silence;
516 }
517 
518 /* No lock necessary */
pa_memblock_set_is_silence(pa_memblock * b,bool v)519 void pa_memblock_set_is_silence(pa_memblock *b, bool v) {
520     pa_assert(b);
521     pa_assert(PA_REFCNT_VALUE(b) > 0);
522 
523     b->is_silence = v;
524 }
525 
526 /* No lock necessary */
pa_memblock_ref_is_one(pa_memblock * b)527 bool pa_memblock_ref_is_one(pa_memblock *b) {
528     int r;
529     pa_assert(b);
530 
531     pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
532 
533     return r == 1;
534 }
535 
536 /* No lock necessary */
pa_memblock_acquire(pa_memblock * b)537 void* pa_memblock_acquire(pa_memblock *b) {
538     pa_assert(b);
539     pa_assert(PA_REFCNT_VALUE(b) > 0);
540 
541     pa_atomic_inc(&b->n_acquired);
542 
543     return pa_atomic_ptr_load(&b->data);
544 }
545 
546 /* No lock necessary */
pa_memblock_acquire_chunk(const pa_memchunk * c)547 void *pa_memblock_acquire_chunk(const pa_memchunk *c) {
548     pa_assert(c);
549 
550     return (uint8_t *) pa_memblock_acquire(c->memblock) + c->index;
551 }
552 
553 /* No lock necessary, in corner cases locks by its own */
pa_memblock_release(pa_memblock * b)554 void pa_memblock_release(pa_memblock *b) {
555     int r;
556     pa_assert(b);
557     pa_assert(PA_REFCNT_VALUE(b) > 0);
558 
559     r = pa_atomic_dec(&b->n_acquired);
560     pa_assert(r >= 1);
561 
562     /* Signal a waiting thread that this memblock is no longer used */
563     if (r == 1 && pa_atomic_load(&b->please_signal))
564         pa_semaphore_post(b->pool->semaphore);
565 }
566 
pa_memblock_get_length(pa_memblock * b)567 size_t pa_memblock_get_length(pa_memblock *b) {
568     pa_assert(b);
569     pa_assert(PA_REFCNT_VALUE(b) > 0);
570 
571     return b->length;
572 }
573 
574 /* Note! Always unref the returned pool after use */
pa_memblock_get_pool(pa_memblock * b)575 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
576     pa_assert(b);
577     pa_assert(PA_REFCNT_VALUE(b) > 0);
578     pa_assert(b->pool);
579 
580     pa_mempool_ref(b->pool);
581     return b->pool;
582 }
583 
584 /* No lock necessary */
pa_memblock_ref(pa_memblock * b)585 pa_memblock* pa_memblock_ref(pa_memblock*b) {
586     pa_assert(b);
587     pa_assert(PA_REFCNT_VALUE(b) > 0);
588 
589     PA_REFCNT_INC(b);
590     return b;
591 }
592 
memblock_free(pa_memblock * b)593 static void memblock_free(pa_memblock *b) {
594     pa_mempool *pool;
595 
596     pa_assert(b);
597     pa_assert(b->pool);
598     pa_assert(pa_atomic_load(&b->n_acquired) == 0);
599 
600     pool = b->pool;
601     stat_remove(b);
602 
603     switch (b->type) {
604         case PA_MEMBLOCK_USER :
605             pa_assert(b->per_type.user.free_cb);
606             b->per_type.user.free_cb(b->per_type.user.free_cb_data);
607 
608             /* Fall through */
609 
610         case PA_MEMBLOCK_FIXED:
611             if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
612                 pa_xfree(b);
613 
614             break;
615 
616         case PA_MEMBLOCK_APPENDED:
617 
618             /* We could attach it to unused_memblocks, but that would
619              * probably waste some considerable amount of memory */
620             pa_xfree(b);
621             break;
622 
623         case PA_MEMBLOCK_IMPORTED: {
624             pa_memimport_segment *segment;
625             pa_memimport *import;
626 
627             /* FIXME! This should be implemented lock-free */
628 
629             pa_assert_se(segment = b->per_type.imported.segment);
630             pa_assert_se(import = segment->import);
631 
632             pa_mutex_lock(import->mutex);
633 
634             pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
635 
636             pa_assert(segment->n_blocks >= 1);
637             if (-- segment->n_blocks <= 0)
638                 segment_detach(segment);
639 
640             pa_mutex_unlock(import->mutex);
641 
642             import->release_cb(import, b->per_type.imported.id, import->userdata);
643 
644             if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
645                 pa_xfree(b);
646 
647             break;
648         }
649 
650         case PA_MEMBLOCK_POOL_EXTERNAL:
651         case PA_MEMBLOCK_POOL: {
652             struct mempool_slot *slot;
653             bool call_free;
654 
655             pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
656 
657             call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
658 
659 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
660 /*             if (PA_UNLIKELY(pa_in_valgrind())) { */
661 /*                 VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
662 /*             } */
663 /* #endif */
664 
665             /* The free list dimensions should easily allow all slots
666              * to fit in, hence try harder if pushing this slot into
667              * the free list fails */
668             while (pa_flist_push(b->pool->free_slots, slot) < 0)
669                 ;
670 
671             if (call_free)
672                 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
673                     pa_xfree(b);
674 
675             break;
676         }
677 
678         case PA_MEMBLOCK_TYPE_MAX:
679         default:
680             pa_assert_not_reached();
681     }
682 
683     pa_mempool_unref(pool);
684 }
685 
686 /* No lock necessary */
pa_memblock_unref(pa_memblock * b)687 void pa_memblock_unref(pa_memblock*b) {
688     pa_assert(b);
689     pa_assert(PA_REFCNT_VALUE(b) > 0);
690 
691     if (PA_REFCNT_DEC(b) > 0)
692         return;
693 
694     memblock_free(b);
695 }
696 
697 /* Self locked */
memblock_wait(pa_memblock * b)698 static void memblock_wait(pa_memblock *b) {
699     pa_assert(b);
700 
701     if (pa_atomic_load(&b->n_acquired) > 0) {
702         /* We need to wait until all threads gave up access to the
703          * memory block before we can go on. Unfortunately this means
704          * that we have to lock and wait here. Sniff! */
705 
706         pa_atomic_inc(&b->please_signal);
707 
708         while (pa_atomic_load(&b->n_acquired) > 0)
709             pa_semaphore_wait(b->pool->semaphore);
710 
711         pa_atomic_dec(&b->please_signal);
712     }
713 }
714 
715 /* No lock necessary. This function is not multiple caller safe! */
memblock_make_local(pa_memblock * b)716 static void memblock_make_local(pa_memblock *b) {
717     pa_assert(b);
718 
719     pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
720 
721     if (b->length <= b->pool->block_size) {
722         struct mempool_slot *slot;
723 
724         if ((slot = mempool_allocate_slot(b->pool))) {
725             void *new_data;
726             /* We can move it into a local pool, perfect! */
727 
728             new_data = mempool_slot_data(slot);
729             memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
730             pa_atomic_ptr_store(&b->data, new_data);
731 
732             b->type = PA_MEMBLOCK_POOL_EXTERNAL;
733             b->read_only = false;
734 
735             goto finish;
736         }
737     }
738 
739     /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
740     b->per_type.user.free_cb = pa_xfree;
741     pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
742     b->per_type.user.free_cb_data = pa_atomic_ptr_load(&b->data);
743 
744     b->type = PA_MEMBLOCK_USER;
745     b->read_only = false;
746 
747 finish:
748     pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
749     pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
750     memblock_wait(b);
751 }
752 
753 /* No lock necessary. This function is not multiple caller safe */
pa_memblock_unref_fixed(pa_memblock * b)754 void pa_memblock_unref_fixed(pa_memblock *b) {
755     pa_assert(b);
756     pa_assert(PA_REFCNT_VALUE(b) > 0);
757     pa_assert(b->type == PA_MEMBLOCK_FIXED);
758 
759     if (PA_REFCNT_VALUE(b) > 1)
760         memblock_make_local(b);
761 
762     pa_memblock_unref(b);
763 }
764 
765 /* No lock necessary. */
pa_memblock_will_need(pa_memblock * b)766 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
767     void *p;
768 
769     pa_assert(b);
770     pa_assert(PA_REFCNT_VALUE(b) > 0);
771 
772     p = pa_memblock_acquire(b);
773     pa_will_need(p, b->length);
774     pa_memblock_release(b);
775 
776     return b;
777 }
778 
779 /* Self-locked. This function is not multiple-caller safe */
memblock_replace_import(pa_memblock * b)780 static void memblock_replace_import(pa_memblock *b) {
781     pa_memimport_segment *segment;
782     pa_memimport *import;
783 
784     pa_assert(b);
785     pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
786 
787     pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
788     pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
789     pa_atomic_dec(&b->pool->stat.n_imported);
790     pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
791 
792     pa_assert_se(segment = b->per_type.imported.segment);
793     pa_assert_se(import = segment->import);
794 
795     pa_mutex_lock(import->mutex);
796 
797     pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
798 
799     memblock_make_local(b);
800 
801     pa_assert(segment->n_blocks >= 1);
802     if (-- segment->n_blocks <= 0)
803         segment_detach(segment);
804 
805     pa_mutex_unlock(import->mutex);
806 }
807 
808 /*@per_client: This is a security measure. By default this should
809  * be set to true where the created mempool is never shared with more
810  * than one client in the system. Set this to false if a global
811  * mempool, shared with all existing and future clients, is required.
812  *
813  * NOTE-1: Do not create any further global mempools! They allow data
814  * leaks between clients and thus conflict with the xdg-app containers
815  * model. They also complicate the handling of memfd-based pools.
816  *
817  * NOTE-2: Almost all mempools are now created on a per client basis.
818  * The only exception is the pa_core's mempool which is still shared
819  * between all clients of the system.
820  *
821  * Beside security issues, special marking for global mempools is
822  * required for memfd communication. To avoid fd leaks, memfd pools
823  * are registered with the connection pstream to create an ID<->memfd
824  * mapping on both PA endpoints. Such memory regions are then always
825  * referenced by their IDs and never by their fds and thus their fds
826  * can be quickly closed later.
827  *
828  * Unfortunately this scheme cannot work with global pools since the
829  * ID registration mechanism needs to happen for each newly connected
830  * client, and thus the need for a more special handling. That is,
831  * for the pool's fd to be always open :-(
832  *
833  * TODO-1: Transform the global core mempool to a per-client one
834  * TODO-2: Remove global mempools support */
pa_mempool_new(pa_mem_type_t type,size_t size,bool per_client)835 pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size, bool per_client) {
836     AUDIO_DEBUG_LOG("pa_mempool_new:type %{public}d, size %{public}zu, per_client %{public}d,", type, size, per_client);
837     pa_mempool *p;
838     char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
839     const size_t page_size = pa_page_size();
840 
841     p = pa_xnew0(pa_mempool, 1);
842     PA_REFCNT_INIT(p);
843 
844     p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
845     if (p->block_size < page_size)
846         p->block_size = page_size;
847 
848     if (size <= 0)
849         p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
850     else {
851         p->n_blocks = (unsigned) (size / p->block_size);
852 
853         if (p->n_blocks < 2)
854             p->n_blocks = 2;
855     }
856 
857     if (pa_shm_create_rw(&p->memory, type, p->n_blocks * p->block_size, 0700) < 0) {
858         pa_xfree(p);
859         return NULL;
860     }
861 
862     AUDIO_DEBUG_LOG("Using %{public}s memory pool with %{public}u slots of size %{public}s each, total size is"
863                  "%{public}s, maximum usable slot size is %{public}lu",
864                  pa_mem_type_to_string(type),
865                  p->n_blocks,
866                  pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
867                  pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
868                  (unsigned long) pa_mempool_block_size_max(p));
869 
870     p->global = !per_client;
871 
872     pa_atomic_store(&p->n_init, 0);
873 
874     PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
875     PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
876 
877     p->mutex = pa_mutex_new(true, true);
878     p->semaphore = pa_semaphore_new(0);
879 
880     p->free_slots = pa_flist_new(p->n_blocks);
881 
882     return p;
883 }
884 
mempool_free(pa_mempool * p)885 static void mempool_free(pa_mempool *p) {
886     pa_assert(p);
887 
888     pa_mutex_lock(p->mutex);
889 
890     while (p->imports)
891         pa_memimport_free(p->imports);
892 
893     while (p->exports)
894         pa_memexport_free(p->exports);
895 
896     pa_mutex_unlock(p->mutex);
897 
898     pa_flist_free(p->free_slots, NULL);
899 
900     if (pa_atomic_load(&p->stat.n_allocated) > 0) {
901 
902         /* Ouch, somebody is retaining a memory block reference! */
903 
904 #ifdef DEBUG_REF
905         unsigned i;
906         pa_flist *list;
907 
908         /* Let's try to find at least one of those leaked memory blocks */
909 
910         list = pa_flist_new(p->n_blocks);
911 
912         for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
913             struct mempool_slot *slot;
914             pa_memblock *b, *k;
915 
916             slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
917             b = mempool_slot_data(slot);
918 
919             while ((k = pa_flist_pop(p->free_slots))) {
920                 while (pa_flist_push(list, k) < 0)
921                     ;
922 
923                 if (b == k)
924                     break;
925             }
926 
927             if (!k)
928                 AUDIO_ERR_LOG("REF: Leaked memory block %{public}p", b);
929 
930             while ((k = pa_flist_pop(list)))
931                 while (pa_flist_push(p->free_slots, k) < 0)
932                     ;
933         }
934 
935         pa_flist_free(list, NULL);
936 
937 #endif
938 
939         AUDIO_ERR_LOG("Memory pool destroyed but not all memory blocks freed! %{public}u remain.",
940             pa_atomic_load(&p->stat.n_allocated));
941 
942 /*         PA_DEBUG_TRAP; */
943     }
944 
945     pa_shm_free(&p->memory);
946 
947     pa_mutex_free(p->mutex);
948     pa_semaphore_free(p->semaphore);
949 
950     pa_xfree(p);
951 }
952 
953 /* No lock necessary */
pa_mempool_get_stat(pa_mempool * p)954 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
955     pa_assert(p);
956 
957     return &p->stat;
958 }
959 
960 /* No lock necessary */
pa_mempool_block_size_max(pa_mempool * p)961 size_t pa_mempool_block_size_max(pa_mempool *p) {
962     pa_assert(p);
963 
964     return p->block_size - PA_ALIGN(sizeof(pa_memblock));
965 }
966 
967 /* No lock necessary */
pa_mempool_vacuum(pa_mempool * p)968 void pa_mempool_vacuum(pa_mempool *p) {
969     struct mempool_slot *slot;
970     pa_flist *list;
971 
972     pa_assert(p);
973 
974     list = pa_flist_new(p->n_blocks);
975 
976     while ((slot = pa_flist_pop(p->free_slots)))
977         while (pa_flist_push(list, slot) < 0)
978             ;
979 
980     while ((slot = pa_flist_pop(list))) {
981         pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
982 
983         while (pa_flist_push(p->free_slots, slot))
984             ;
985     }
986 
987     pa_flist_free(list, NULL);
988 }
989 
990 /* No lock necessary */
pa_mempool_is_shared(pa_mempool * p)991 bool pa_mempool_is_shared(pa_mempool *p) {
992     pa_assert(p);
993 
994     return pa_mem_type_is_shared(p->memory.type);
995 }
996 
997 /* No lock necessary */
pa_mempool_is_memfd_backed(const pa_mempool * p)998 bool pa_mempool_is_memfd_backed(const pa_mempool *p) {
999     pa_assert(p);
1000 
1001     return (p->memory.type == PA_MEM_TYPE_SHARED_MEMFD);
1002 }
1003 
1004 /* No lock necessary */
pa_mempool_get_shm_id(pa_mempool * p,uint32_t * id)1005 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
1006     pa_assert(p);
1007 
1008     if (!pa_mempool_is_shared(p))
1009         return -1;
1010 
1011     *id = p->memory.id;
1012 
1013     return 0;
1014 }
1015 
pa_mempool_ref(pa_mempool * p)1016 pa_mempool* pa_mempool_ref(pa_mempool *p) {
1017     pa_assert(p);
1018     pa_assert(PA_REFCNT_VALUE(p) > 0);
1019 
1020     PA_REFCNT_INC(p);
1021     return p;
1022 }
1023 
pa_mempool_unref(pa_mempool * p)1024 void pa_mempool_unref(pa_mempool *p) {
1025     pa_assert(p);
1026     pa_assert(PA_REFCNT_VALUE(p) > 0);
1027 
1028     if (PA_REFCNT_DEC(p) <= 0)
1029         mempool_free(p);
1030 }
1031 
1032 /* No lock necessary
1033  * Check pa_mempool_new() for per-client vs. global mempools */
pa_mempool_is_global(pa_mempool * p)1034 bool pa_mempool_is_global(pa_mempool *p) {
1035     pa_assert(p);
1036 
1037     return p->global;
1038 }
1039 
1040 /* No lock necessary
1041  * Check pa_mempool_new() for per-client vs. global mempools */
pa_mempool_is_per_client(pa_mempool * p)1042 bool pa_mempool_is_per_client(pa_mempool *p) {
1043     return !pa_mempool_is_global(p);
1044 }
1045 
1046 /* Self-locked
1047  *
1048  * This is only for per-client mempools!
1049  *
1050  * After this method's return, the caller owns the file descriptor
1051  * and is responsible for closing it in the appropriate time. This
1052  * should only be called once during during a mempool's lifetime.
1053  *
1054  * Check pa_shm->fd and pa_mempool_new() for further context. */
pa_mempool_take_memfd_fd(pa_mempool * p)1055 int pa_mempool_take_memfd_fd(pa_mempool *p) {
1056     int memfd_fd;
1057 
1058     pa_assert(p);
1059     pa_assert(pa_mempool_is_shared(p));
1060     pa_assert(pa_mempool_is_memfd_backed(p));
1061     pa_assert(pa_mempool_is_per_client(p));
1062 
1063     pa_mutex_lock(p->mutex);
1064 
1065     memfd_fd = p->memory.fd;
1066     p->memory.fd = -1;
1067 
1068     pa_mutex_unlock(p->mutex);
1069 
1070     pa_assert(memfd_fd != -1);
1071     return memfd_fd;
1072 }
1073 
1074 /* No lock necessary
1075  *
1076  * This is only for global mempools!
1077  *
1078  * Global mempools have their memfd descriptor always open. DO NOT
1079  * close the returned descriptor by your own.
1080  *
1081  * Check pa_mempool_new() for further context. */
pa_mempool_get_memfd_fd(pa_mempool * p)1082 int pa_mempool_get_memfd_fd(pa_mempool *p) {
1083     int memfd_fd;
1084 
1085     pa_assert(p);
1086     pa_assert(pa_mempool_is_shared(p));
1087     pa_assert(pa_mempool_is_memfd_backed(p));
1088     pa_assert(pa_mempool_is_global(p));
1089 
1090     memfd_fd = p->memory.fd;
1091     pa_assert(memfd_fd != -1);
1092 
1093     return memfd_fd;
1094 }
1095 
1096 /* For receiving blocks from other nodes */
pa_memimport_new(pa_mempool * p,pa_memimport_release_cb_t cb,void * userdata)1097 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
1098     pa_memimport *i;
1099 
1100     pa_assert(p);
1101     pa_assert(cb);
1102 
1103     i = pa_xnew(pa_memimport, 1);
1104     i->mutex = pa_mutex_new(true, true);
1105     i->pool = p;
1106     pa_mempool_ref(i->pool);
1107     i->segments = pa_hashmap_new(NULL, NULL);
1108     i->blocks = pa_hashmap_new(NULL, NULL);
1109     i->release_cb = cb;
1110     i->userdata = userdata;
1111 
1112     pa_mutex_lock(p->mutex);
1113     PA_LLIST_PREPEND(pa_memimport, p->imports, i);
1114     pa_mutex_unlock(p->mutex);
1115 
1116     return i;
1117 }
1118 
1119 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
1120 
1121 /* Should be called locked
1122  * Caller owns passed @memfd_fd and must close it down when appropriate. */
segment_attach(pa_memimport * i,pa_mem_type_t type,uint32_t shm_id,int memfd_fd,bool writable)1123 static pa_memimport_segment* segment_attach(pa_memimport *i, pa_mem_type_t type, uint32_t shm_id,
1124                                             int memfd_fd, bool writable) {
1125     pa_memimport_segment* seg;
1126     pa_assert(pa_mem_type_is_shared(type));
1127 
1128     if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
1129         return NULL;
1130 
1131     seg = pa_xnew0(pa_memimport_segment, 1);
1132 
1133     if (pa_shm_attach(&seg->memory, type, shm_id, memfd_fd, writable) < 0) {
1134         pa_xfree(seg);
1135         return NULL;
1136     }
1137 
1138     seg->writable = writable;
1139     seg->import = i;
1140     seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
1141 
1142     pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
1143     return seg;
1144 }
1145 
1146 /* Should be called locked */
segment_detach(pa_memimport_segment * seg)1147 static void segment_detach(pa_memimport_segment *seg) {
1148     pa_assert(seg);
1149     pa_assert(seg->n_blocks == (segment_is_permanent(seg) ? 1u : 0u));
1150 
1151     pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
1152     pa_shm_free(&seg->memory);
1153 
1154     if (seg->trap)
1155         pa_memtrap_remove(seg->trap);
1156 
1157     pa_xfree(seg);
1158 }
1159 
1160 /* Self-locked. Not multiple-caller safe */
pa_memimport_free(pa_memimport * i)1161 void pa_memimport_free(pa_memimport *i) {
1162     pa_memexport *e;
1163     pa_memblock *b;
1164     pa_memimport_segment *seg;
1165     void *state = NULL;
1166 
1167     pa_assert(i);
1168 
1169     pa_mutex_lock(i->mutex);
1170 
1171     while ((b = pa_hashmap_first(i->blocks)))
1172         memblock_replace_import(b);
1173 
1174     /* Permanent segments exist for the lifetime of the memimport. Now
1175      * that we're freeing the memimport itself, clear them all up.
1176      *
1177      * Careful! segment_detach() internally removes itself from the
1178      * memimport's hash; the same hash we're now using for iteration. */
1179     PA_HASHMAP_FOREACH(seg, i->segments, state) {
1180         if (segment_is_permanent(seg))
1181             segment_detach(seg);
1182     }
1183     pa_assert(pa_hashmap_size(i->segments) == 0);
1184 
1185     pa_mutex_unlock(i->mutex);
1186 
1187     pa_mutex_lock(i->pool->mutex);
1188 
1189     /* If we've exported this block further we need to revoke that export */
1190     for (e = i->pool->exports; e; e = e->next)
1191         memexport_revoke_blocks(e, i);
1192 
1193     PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
1194 
1195     pa_mutex_unlock(i->pool->mutex);
1196 
1197     pa_mempool_unref(i->pool);
1198     pa_hashmap_free(i->blocks);
1199     pa_hashmap_free(i->segments);
1200 
1201     pa_mutex_free(i->mutex);
1202 
1203     pa_xfree(i);
1204 }
1205 
1206 /* Create a new memimport's memfd segment entry, with passed SHM ID
1207  * as key and the newly-created segment (with its mmap()-ed memfd
1208  * memory region) as its value.
1209  *
1210  * Note! check comments at 'pa_shm->fd', 'segment_is_permanent()',
1211  * and 'pa_pstream_register_memfd_mempool()' for further details.
1212  *
1213  * Caller owns passed @memfd_fd and must close it down when appropriate. */
pa_memimport_attach_memfd(pa_memimport * i,uint32_t shm_id,int memfd_fd,bool writable)1214 int pa_memimport_attach_memfd(pa_memimport *i, uint32_t shm_id, int memfd_fd, bool writable) {
1215     pa_memimport_segment *seg;
1216     int ret = -1;
1217 
1218     pa_assert(i);
1219     pa_assert(memfd_fd != -1);
1220 
1221     pa_mutex_lock(i->mutex);
1222 
1223     if (!(seg = segment_attach(i, PA_MEM_TYPE_SHARED_MEMFD, shm_id, memfd_fd, writable)))
1224         goto finish;
1225 
1226     /* n_blocks acts as a segment reference count. To avoid the segment
1227      * being deleted when receiving silent memchunks, etc., mark our
1228      * permanent presence by incrementing that refcount. */
1229     seg->n_blocks++;
1230 
1231     pa_assert(segment_is_permanent(seg));
1232     ret = 0;
1233 
1234 finish:
1235     pa_mutex_unlock(i->mutex);
1236     return ret;
1237 }
1238 
1239 /* Self-locked */
pa_memimport_get(pa_memimport * i,pa_mem_type_t type,uint32_t block_id,uint32_t shm_id,size_t offset,size_t size,bool writable)1240 pa_memblock* pa_memimport_get(pa_memimport *i, pa_mem_type_t type, uint32_t block_id, uint32_t shm_id,
1241                               size_t offset, size_t size, bool writable) {
1242     pa_memblock *b = NULL;
1243     pa_memimport_segment *seg;
1244 
1245     pa_assert(i);
1246     pa_assert(pa_mem_type_is_shared(type));
1247 
1248     pa_mutex_lock(i->mutex);
1249 
1250     if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
1251         pa_memblock_ref(b);
1252         goto finish;
1253     }
1254 
1255     if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
1256         goto finish;
1257 
1258     if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id)))) {
1259         if (type == PA_MEM_TYPE_SHARED_MEMFD) {
1260             AUDIO_ERR_LOG("Bailing out! No cached memimport segment for memfd ID %{public}u", shm_id);
1261             AUDIO_ERR_LOG("Did the other PA endpoint forget registering its memfd pool?");
1262             goto finish;
1263         }
1264 
1265         pa_assert(type == PA_MEM_TYPE_SHARED_POSIX);
1266         if (!(seg = segment_attach(i, type, shm_id, -1, writable)))
1267             goto finish;
1268     }
1269 
1270     if (writable && !seg->writable) {
1271         AUDIO_ERR_LOG("Cannot import cached segment in write mode - previously mapped as read-only");
1272         goto finish;
1273     }
1274 
1275     if (offset+size > seg->memory.size)
1276         goto finish;
1277 
1278     if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
1279         b = pa_xnew(pa_memblock, 1);
1280 
1281     PA_REFCNT_INIT(b);
1282     b->pool = i->pool;
1283     pa_mempool_ref(b->pool);
1284     b->type = PA_MEMBLOCK_IMPORTED;
1285     b->read_only = !writable;
1286     b->is_silence = false;
1287     pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
1288     b->length = size;
1289     pa_atomic_store(&b->n_acquired, 0);
1290     pa_atomic_store(&b->please_signal, 0);
1291     b->per_type.imported.id = block_id;
1292     b->per_type.imported.segment = seg;
1293 
1294     pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1295 
1296     seg->n_blocks++;
1297 
1298     stat_add(b);
1299 
1300 finish:
1301     pa_mutex_unlock(i->mutex);
1302 
1303     return b;
1304 }
1305 
pa_memimport_process_revoke(pa_memimport * i,uint32_t id)1306 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1307     pa_memblock *b;
1308     int ret = 0;
1309     pa_assert(i);
1310 
1311     pa_mutex_lock(i->mutex);
1312 
1313     if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1314         ret = -1;
1315         goto finish;
1316     }
1317 
1318     memblock_replace_import(b);
1319 
1320 finish:
1321     pa_mutex_unlock(i->mutex);
1322 
1323     return ret;
1324 }
1325 
1326 /* For sending blocks to other nodes */
pa_memexport_new(pa_mempool * p,pa_memexport_revoke_cb_t cb,void * userdata)1327 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1328     pa_memexport *e;
1329 
1330     static pa_atomic_t export_baseidx = PA_ATOMIC_INIT(0);
1331 
1332     pa_assert(p);
1333     pa_assert(cb);
1334 
1335     if (!pa_mempool_is_shared(p))
1336         return NULL;
1337 
1338     e = pa_xnew(pa_memexport, 1);
1339     e->mutex = pa_mutex_new(true, true);
1340     e->pool = p;
1341     pa_mempool_ref(e->pool);
1342     PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1343     PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1344     e->n_init = 0;
1345     e->revoke_cb = cb;
1346     e->userdata = userdata;
1347 
1348     pa_mutex_lock(p->mutex);
1349 
1350     PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1351     e->baseidx = (uint32_t) pa_atomic_add(&export_baseidx, PA_MEMEXPORT_SLOTS_MAX);
1352 
1353     pa_mutex_unlock(p->mutex);
1354     return e;
1355 }
1356 
pa_memexport_free(pa_memexport * e)1357 void pa_memexport_free(pa_memexport *e) {
1358     pa_assert(e);
1359 
1360     pa_mutex_lock(e->mutex);
1361     while (e->used_slots)
1362         pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots + e->baseidx));
1363     pa_mutex_unlock(e->mutex);
1364 
1365     pa_mutex_lock(e->pool->mutex);
1366     PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1367     pa_mutex_unlock(e->pool->mutex);
1368 
1369     pa_mempool_unref(e->pool);
1370     pa_mutex_free(e->mutex);
1371     pa_xfree(e);
1372 }
1373 
1374 /* Self-locked */
pa_memexport_process_release(pa_memexport * e,uint32_t id)1375 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1376     pa_memblock *b;
1377 
1378     pa_assert(e);
1379 
1380     pa_mutex_lock(e->mutex);
1381 
1382     if (id < e->baseidx)
1383         goto fail;
1384     id -= e->baseidx;
1385 
1386     if (id >= e->n_init)
1387         goto fail;
1388 
1389     if (!e->slots[id].block)
1390         goto fail;
1391 
1392     b = e->slots[id].block;
1393     e->slots[id].block = NULL;
1394 
1395     PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1396     PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1397 
1398     pa_mutex_unlock(e->mutex);
1399 
1400 /*     pa_log("Processing release for %u", id); */
1401 
1402     pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1403     pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1404 
1405     pa_atomic_dec(&e->pool->stat.n_exported);
1406     pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1407 
1408     pa_memblock_unref(b);
1409 
1410     return 0;
1411 
1412 fail:
1413     pa_mutex_unlock(e->mutex);
1414 
1415     return -1;
1416 }
1417 
1418 /* Self-locked */
memexport_revoke_blocks(pa_memexport * e,pa_memimport * i)1419 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1420     struct memexport_slot *slot, *next;
1421     pa_assert(e);
1422     pa_assert(i);
1423 
1424     pa_mutex_lock(e->mutex);
1425 
1426     for (slot = e->used_slots; slot; slot = next) {
1427         uint32_t idx;
1428         next = slot->next;
1429 
1430         if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1431             slot->block->per_type.imported.segment->import != i)
1432             continue;
1433 
1434         idx = (uint32_t) (slot - e->slots + e->baseidx);
1435         e->revoke_cb(e, idx, e->userdata);
1436         pa_memexport_process_release(e, idx);
1437     }
1438 
1439     pa_mutex_unlock(e->mutex);
1440 }
1441 
1442 /* No lock necessary */
memblock_shared_copy(pa_mempool * p,pa_memblock * b)1443 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1444     pa_memblock *n;
1445 
1446     pa_assert(p);
1447     pa_assert(b);
1448 
1449     if (b->type == PA_MEMBLOCK_IMPORTED ||
1450         b->type == PA_MEMBLOCK_POOL ||
1451         b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1452         pa_assert(b->pool == p);
1453         return pa_memblock_ref(b);
1454     }
1455 
1456     if (!(n = pa_memblock_new_pool(p, b->length)))
1457         return NULL;
1458 
1459     memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1460     return n;
1461 }
1462 
1463 /* Self-locked */
pa_memexport_put(pa_memexport * e,pa_memblock * b,pa_mem_type_t * type,uint32_t * block_id,uint32_t * shm_id,size_t * offset,size_t * size)1464 int pa_memexport_put(pa_memexport *e, pa_memblock *b, pa_mem_type_t *type, uint32_t *block_id,
1465                      uint32_t *shm_id, size_t *offset, size_t * size) {
1466     pa_shm  *memory;
1467     struct memexport_slot *slot;
1468     void *data;
1469 
1470     pa_assert(e);
1471     pa_assert(b);
1472     pa_assert(type);
1473     pa_assert(block_id);
1474     pa_assert(shm_id);
1475     pa_assert(offset);
1476     pa_assert(size);
1477     pa_assert(b->pool == e->pool);
1478 
1479     if (!(b = memblock_shared_copy(e->pool, b)))
1480         return -1;
1481 
1482     pa_mutex_lock(e->mutex);
1483 
1484     if (e->free_slots) {
1485         slot = e->free_slots;
1486         PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1487     } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1488         slot = &e->slots[e->n_init++];
1489     else {
1490         pa_mutex_unlock(e->mutex);
1491         pa_memblock_unref(b);
1492         return -1;
1493     }
1494 
1495     PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1496     slot->block = b;
1497     *block_id = (uint32_t) (slot - e->slots + e->baseidx);
1498 
1499     pa_mutex_unlock(e->mutex);
1500 /*     pa_log("Got block id %u", *block_id); */
1501 
1502     data = pa_memblock_acquire(b);
1503 
1504     if (b->type == PA_MEMBLOCK_IMPORTED) {
1505         pa_assert(b->per_type.imported.segment);
1506         memory = &b->per_type.imported.segment->memory;
1507     } else {
1508         pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1509         pa_assert(b->pool);
1510         pa_assert(pa_mempool_is_shared(b->pool));
1511         memory = &b->pool->memory;
1512     }
1513 
1514     pa_assert(data >= memory->ptr);
1515     pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1516 
1517     *type = memory->type;
1518     *shm_id = memory->id;
1519     *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1520     *size = b->length;
1521 
1522     pa_memblock_release(b);
1523 
1524     pa_atomic_inc(&e->pool->stat.n_exported);
1525     pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1526 
1527     return 0;
1528 }
1529