• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***
2   This file is part of PulseAudio.
3 
4   Copyright 2004-2006 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6 
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as
9   published by the Free Software Foundation; either version 2.1 of the
10   License, or (at your option) any later version.
11 
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   Lesser General Public License for more details
16 
17   You should have received a copy of the GNU Lesser General Public
18   License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20 
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24 
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <signal.h>
30 #include <errno.h>
31 
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35 
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
38 
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/mutex.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/refcnt.h>
46 #include <pulsecore/llist.h>
47 #include <pulsecore/flist.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/memtrap.h>
50 
51 #include "log/audio_log.h"
52 
53 #include "memblock.h"
54 
55 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
56  * note that the footprint is usually much smaller, since the data is
57  * stored in SHM and our OS does not commit the memory before we use
58  * it for the first time. */
59 #define PA_MEMPOOL_SLOTS_MAX 1024
60 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
61 
62 #define PA_MEMEXPORT_SLOTS_MAX 128
63 
64 #define PA_MEMIMPORT_SLOTS_MAX 160
65 #define PA_MEMIMPORT_SEGMENTS_MAX 16
66 
67 struct pa_memblock {
68     PA_REFCNT_DECLARE; /* the reference counter */
69     pa_mempool *pool;
70 
71     pa_memblock_type_t type;
72 
73     bool read_only:1;
74     bool is_silence:1;
75 
76     pa_atomic_ptr_t data;
77     size_t length;
78 
79     pa_atomic_t n_acquired;
80     pa_atomic_t please_signal;
81 
82     union {
83         struct {
84             /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
85             pa_free_cb_t free_cb;
86             /* If type == PA_MEMBLOCK_USER this is passed as free_cb argument */
87             void *free_cb_data;
88         } user;
89 
90         struct {
91             uint32_t id;
92             pa_memimport_segment *segment;
93         } imported;
94     } per_type;
95 };
96 
97 struct pa_memimport_segment {
98     pa_memimport *import;
99     pa_shm memory;
100     pa_memtrap *trap;
101     unsigned n_blocks;
102     bool writable;
103 };
104 
105 /*
106  * If true, this segment's lifetime will not be limited by the
107  * number of active blocks (seg->n_blocks) using its shared memory.
108  * Rather, it will exist for the full lifetime of the memimport it
109  * is attached to.
110  *
111  * This is done to support memfd blocks transport.
112  *
113  * To transfer memfd-backed blocks without passing their fd every
114  * time, thus minimizing overhead and avoiding fd leaks, a command
115  * is sent with the memfd fd as ancil data very early on.
116  *
117  * This command has an ID that identifies the memfd region. Further
118  * block references are then exclusively done using this ID. On the
119  * receiving end, such logic is enabled by the memimport's segment
120  * hash and 'permanent' segments below.
121  */
segment_is_permanent(pa_memimport_segment * seg)122 static bool segment_is_permanent(pa_memimport_segment *seg) {
123     pa_assert(seg);
124     return seg->memory.type == PA_MEM_TYPE_SHARED_MEMFD;
125 }
126 
127 /* A collection of multiple segments */
128 struct pa_memimport {
129     pa_mutex *mutex;
130 
131     pa_mempool *pool;
132     pa_hashmap *segments;
133     pa_hashmap *blocks;
134 
135     /* Called whenever an imported memory block is no longer
136      * needed. */
137     pa_memimport_release_cb_t release_cb;
138     void *userdata;
139 
140     PA_LLIST_FIELDS(pa_memimport);
141 };
142 
143 struct memexport_slot {
144     PA_LLIST_FIELDS(struct memexport_slot);
145     pa_memblock *block;
146 };
147 
148 struct pa_memexport {
149     pa_mutex *mutex;
150     pa_mempool *pool;
151 
152     struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
153 
154     PA_LLIST_HEAD(struct memexport_slot, free_slots);
155     PA_LLIST_HEAD(struct memexport_slot, used_slots);
156     unsigned n_init;
157     unsigned baseidx;
158 
159     /* Called whenever a client from which we imported a memory block
160        which we in turn exported to another client dies and we need to
161        revoke the memory block accordingly */
162     pa_memexport_revoke_cb_t revoke_cb;
163     void *userdata;
164 
165     PA_LLIST_FIELDS(pa_memexport);
166 };
167 
168 struct pa_mempool {
169     /* Reference count the mempool
170      *
171      * Any block allocation from the pool itself, or even just imported from
172      * another process through SHM and attached to it (PA_MEMBLOCK_IMPORTED),
173      * shall increase the refcount.
174      *
175      * This is done for per-client mempools: global references to blocks in
176      * the pool, or just to attached ones, can still be lingering around when
177      * the client connection dies and all per-client objects are to be freed.
178      * That is, current PulseAudio design does not guarantee that the client
179      * mempool blocks are referenced only by client-specific objects.
180      *
181      * For further details, please check:
182      * https://lists.freedesktop.org/archives/pulseaudio-discuss/2016-February/025587.html
183      */
184     PA_REFCNT_DECLARE;
185 
186     pa_semaphore *semaphore;
187     pa_mutex *mutex;
188 
189     pa_shm memory;
190 
191     bool global;
192 
193     size_t block_size;
194     unsigned n_blocks;
195     bool is_remote_writable;
196 
197     pa_atomic_t n_init;
198 
199     PA_LLIST_HEAD(pa_memimport, imports);
200     PA_LLIST_HEAD(pa_memexport, exports);
201 
202     /* A list of free slots that may be reused */
203     pa_flist *free_slots;
204 
205     pa_mempool_stat stat;
206 };
207 
208 static void segment_detach(pa_memimport_segment *seg);
209 
210 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
211 
212 /* No lock necessary */
stat_add(pa_memblock * b)213 static void stat_add(pa_memblock*b) {
214     pa_assert(b);
215     pa_assert(b->pool);
216 
217     pa_atomic_inc(&b->pool->stat.n_allocated);
218     pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
219 
220     pa_atomic_inc(&b->pool->stat.n_accumulated);
221     pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
222 
223     if (b->type == PA_MEMBLOCK_IMPORTED) {
224         pa_atomic_inc(&b->pool->stat.n_imported);
225         pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
226     }
227 
228     pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
229     pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
230 }
231 
232 /* No lock necessary */
stat_remove(pa_memblock * b)233 static void stat_remove(pa_memblock *b) {
234     pa_assert(b);
235     pa_assert(b->pool);
236 
237     pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
238     pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
239 
240     pa_atomic_dec(&b->pool->stat.n_allocated);
241     pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
242 
243     if (b->type == PA_MEMBLOCK_IMPORTED) {
244         pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
245         pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
246 
247         pa_atomic_dec(&b->pool->stat.n_imported);
248         pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
249     }
250 
251     pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
252 }
253 
254 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
255 
256 /* No lock necessary */
pa_memblock_new(pa_mempool * p,size_t length)257 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
258     pa_memblock *b;
259 
260     pa_assert(p);
261     pa_assert(length);
262 
263     if (!(b = pa_memblock_new_pool(p, length)))
264         b = memblock_new_appended(p, length);
265 
266     return b;
267 }
268 
269 /* No lock necessary */
memblock_new_appended(pa_mempool * p,size_t length)270 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
271     pa_memblock *b;
272 
273     pa_assert(p);
274     pa_assert(length);
275 
276     /* If -1 is passed as length we choose the size for the caller. */
277 
278     if (length == (size_t) -1)
279         length = pa_mempool_block_size_max(p);
280 
281     b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
282     PA_REFCNT_INIT(b);
283     b->pool = p;
284     pa_mempool_ref(b->pool);
285     b->type = PA_MEMBLOCK_APPENDED;
286     b->read_only = b->is_silence = false;
287     pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
288     b->length = length;
289     pa_atomic_store(&b->n_acquired, 0);
290     pa_atomic_store(&b->please_signal, 0);
291 
292     stat_add(b);
293     return b;
294 }
295 
296 /* No lock necessary */
mempool_allocate_slot(pa_mempool * p)297 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
298     struct mempool_slot *slot;
299     pa_assert(p);
300 
301     if (!(slot = pa_flist_pop(p->free_slots))) {
302         int idx;
303 
304         /* The free list was empty, we have to allocate a new entry */
305 
306         if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
307             pa_atomic_dec(&p->n_init);
308         else
309             slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
310 
311         if (!slot) {
312             if (pa_log_ratelimit(PA_LOG_DEBUG))
313                 AUDIO_DEBUG_LOG("Pool full");
314             pa_atomic_inc(&p->stat.n_pool_full);
315             return NULL;
316         }
317     }
318 
319 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
320 /*     if (PA_UNLIKELY(pa_in_valgrind())) { */
321 /*         VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
322 /*     } */
323 /* #endif */
324 
325     return slot;
326 }
327 
328 /* No lock necessary, totally redundant anyway */
mempool_slot_data(struct mempool_slot * slot)329 static inline void* mempool_slot_data(struct mempool_slot *slot) {
330     return slot;
331 }
332 
333 /* No lock necessary */
mempool_slot_idx(pa_mempool * p,void * ptr)334 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
335     pa_assert(p);
336 
337     pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
338     pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
339 
340     return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
341 }
342 
343 /* No lock necessary */
mempool_slot_by_ptr(pa_mempool * p,void * ptr)344 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
345     unsigned idx;
346 
347     if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
348         return NULL;
349 
350     return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
351 }
352 
353 /* No lock necessary */
pa_mempool_is_remote_writable(pa_mempool * p)354 bool pa_mempool_is_remote_writable(pa_mempool *p) {
355     pa_assert(p);
356     return p->is_remote_writable;
357 }
358 
359 /* No lock necessary */
pa_mempool_set_is_remote_writable(pa_mempool * p,bool writable)360 void pa_mempool_set_is_remote_writable(pa_mempool *p, bool writable) {
361     pa_assert(p);
362     pa_assert(!writable || pa_mempool_is_shared(p));
363     p->is_remote_writable = writable;
364 }
365 
366 /* No lock necessary */
pa_memblock_new_pool(pa_mempool * p,size_t length)367 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
368     pa_memblock *b = NULL;
369     struct mempool_slot *slot;
370     static int mempool_disable = 0;
371 
372     pa_assert(p);
373     pa_assert(length);
374 
375     if (mempool_disable == 0)
376         mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
377 
378     if (mempool_disable > 0)
379         return NULL;
380 
381     /* If -1 is passed as length we choose the size for the caller: we
382      * take the largest size that fits in one of our slots. */
383 
384     if (length == (size_t) -1)
385         length = pa_mempool_block_size_max(p);
386 
387     if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
388 
389         if (!(slot = mempool_allocate_slot(p)))
390             return NULL;
391 
392         b = mempool_slot_data(slot);
393         b->type = PA_MEMBLOCK_POOL;
394         pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
395 
396     } else if (p->block_size >= length) {
397 
398         if (!(slot = mempool_allocate_slot(p)))
399             return NULL;
400 
401         if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
402             b = pa_xnew(pa_memblock, 1);
403 
404         b->type = PA_MEMBLOCK_POOL_EXTERNAL;
405         pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
406 
407     } else {
408         AUDIO_DEBUG_LOG("Memory block too large for pool: %{public}lu > %{public}lu",
409             (unsigned long) length, (unsigned long) p->block_size);
410         pa_atomic_inc(&p->stat.n_too_large_for_pool);
411         return NULL;
412     }
413 
414     PA_REFCNT_INIT(b);
415     b->pool = p;
416     pa_mempool_ref(b->pool);
417     b->read_only = b->is_silence = false;
418     b->length = length;
419     pa_atomic_store(&b->n_acquired, 0);
420     pa_atomic_store(&b->please_signal, 0);
421 
422     stat_add(b);
423     return b;
424 }
425 
426 /* No lock necessary */
pa_memblock_new_fixed(pa_mempool * p,void * d,size_t length,bool read_only)427 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, bool read_only) {
428     pa_memblock *b;
429 
430     pa_assert(p);
431     pa_assert(d);
432     pa_assert(length != (size_t) -1);
433     pa_assert(length);
434 
435     if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
436         b = pa_xnew(pa_memblock, 1);
437 
438     PA_REFCNT_INIT(b);
439     b->pool = p;
440     pa_mempool_ref(b->pool);
441     b->type = PA_MEMBLOCK_FIXED;
442     b->read_only = read_only;
443     b->is_silence = false;
444     pa_atomic_ptr_store(&b->data, d);
445     b->length = length;
446     pa_atomic_store(&b->n_acquired, 0);
447     pa_atomic_store(&b->please_signal, 0);
448 
449     stat_add(b);
450     return b;
451 }
452 
453 /* No lock necessary */
pa_memblock_new_user(pa_mempool * p,void * d,size_t length,pa_free_cb_t free_cb,void * free_cb_data,bool read_only)454 pa_memblock *pa_memblock_new_user(
455         pa_mempool *p,
456         void *d,
457         size_t length,
458         pa_free_cb_t free_cb,
459         void *free_cb_data,
460         bool read_only) {
461     pa_memblock *b;
462 
463     pa_assert(p);
464     pa_assert(d);
465     pa_assert(length);
466     pa_assert(length != (size_t) -1);
467     pa_assert(free_cb);
468 
469     if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
470         b = pa_xnew(pa_memblock, 1);
471 
472     PA_REFCNT_INIT(b);
473     b->pool = p;
474     pa_mempool_ref(b->pool);
475     b->type = PA_MEMBLOCK_USER;
476     b->read_only = read_only;
477     b->is_silence = false;
478     pa_atomic_ptr_store(&b->data, d);
479     b->length = length;
480     pa_atomic_store(&b->n_acquired, 0);
481     pa_atomic_store(&b->please_signal, 0);
482 
483     b->per_type.user.free_cb = free_cb;
484     b->per_type.user.free_cb_data = free_cb_data;
485 
486     stat_add(b);
487     return b;
488 }
489 
490 /* No lock necessary */
pa_memblock_is_ours(pa_memblock * b)491 bool pa_memblock_is_ours(pa_memblock *b) {
492     pa_assert(b);
493     pa_assert(PA_REFCNT_VALUE(b) > 0);
494 
495     return b->type != PA_MEMBLOCK_IMPORTED;
496 }
497 
498 /* No lock necessary */
pa_memblock_is_read_only(pa_memblock * b)499 bool pa_memblock_is_read_only(pa_memblock *b) {
500     pa_assert(b);
501     pa_assert(PA_REFCNT_VALUE(b) > 0);
502 
503     return b->read_only || PA_REFCNT_VALUE(b) > 1;
504 }
505 
506 /* No lock necessary */
pa_memblock_is_silence(pa_memblock * b)507 bool pa_memblock_is_silence(pa_memblock *b) {
508     pa_assert(b);
509     pa_assert(PA_REFCNT_VALUE(b) > 0);
510 
511     return b->is_silence;
512 }
513 
514 /* No lock necessary */
pa_memblock_set_is_silence(pa_memblock * b,bool v)515 void pa_memblock_set_is_silence(pa_memblock *b, bool v) {
516     pa_assert(b);
517     pa_assert(PA_REFCNT_VALUE(b) > 0);
518 
519     b->is_silence = v;
520 }
521 
522 /* No lock necessary */
pa_memblock_ref_is_one(pa_memblock * b)523 bool pa_memblock_ref_is_one(pa_memblock *b) {
524     int r;
525     pa_assert(b);
526 
527     pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
528 
529     return r == 1;
530 }
531 
532 /* No lock necessary */
pa_memblock_acquire(pa_memblock * b)533 void* pa_memblock_acquire(pa_memblock *b) {
534     pa_assert(b);
535     pa_assert(PA_REFCNT_VALUE(b) > 0);
536 
537     pa_atomic_inc(&b->n_acquired);
538 
539     return pa_atomic_ptr_load(&b->data);
540 }
541 
542 /* No lock necessary */
pa_memblock_acquire_chunk(const pa_memchunk * c)543 void *pa_memblock_acquire_chunk(const pa_memchunk *c) {
544     pa_assert(c);
545 
546     return (uint8_t *) pa_memblock_acquire(c->memblock) + c->index;
547 }
548 
549 /* No lock necessary, in corner cases locks by its own */
pa_memblock_release(pa_memblock * b)550 void pa_memblock_release(pa_memblock *b) {
551     int r;
552     pa_assert(b);
553     pa_assert(PA_REFCNT_VALUE(b) > 0);
554 
555     r = pa_atomic_dec(&b->n_acquired);
556     pa_assert(r >= 1);
557 
558     /* Signal a waiting thread that this memblock is no longer used */
559     if (r == 1 && pa_atomic_load(&b->please_signal))
560         pa_semaphore_post(b->pool->semaphore);
561 }
562 
pa_memblock_get_length(pa_memblock * b)563 size_t pa_memblock_get_length(pa_memblock *b) {
564     pa_assert(b);
565     pa_assert(PA_REFCNT_VALUE(b) > 0);
566 
567     return b->length;
568 }
569 
570 /* Note! Always unref the returned pool after use */
pa_memblock_get_pool(pa_memblock * b)571 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
572     pa_assert(b);
573     pa_assert(PA_REFCNT_VALUE(b) > 0);
574     pa_assert(b->pool);
575 
576     pa_mempool_ref(b->pool);
577     return b->pool;
578 }
579 
580 /* No lock necessary */
pa_memblock_ref(pa_memblock * b)581 pa_memblock* pa_memblock_ref(pa_memblock*b) {
582     pa_assert(b);
583     pa_assert(PA_REFCNT_VALUE(b) > 0);
584 
585     PA_REFCNT_INC(b);
586     return b;
587 }
588 
memblock_free(pa_memblock * b)589 static void memblock_free(pa_memblock *b) {
590     pa_mempool *pool;
591 
592     pa_assert(b);
593     pa_assert(b->pool);
594     pa_assert(pa_atomic_load(&b->n_acquired) == 0);
595 
596     pool = b->pool;
597     stat_remove(b);
598 
599     switch (b->type) {
600         case PA_MEMBLOCK_USER :
601             pa_assert(b->per_type.user.free_cb);
602             b->per_type.user.free_cb(b->per_type.user.free_cb_data);
603 
604             /* Fall through */
605 
606         case PA_MEMBLOCK_FIXED:
607             if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
608                 pa_xfree(b);
609 
610             break;
611 
612         case PA_MEMBLOCK_APPENDED:
613 
614             /* We could attach it to unused_memblocks, but that would
615              * probably waste some considerable amount of memory */
616             pa_xfree(b);
617             break;
618 
619         case PA_MEMBLOCK_IMPORTED: {
620             pa_memimport_segment *segment;
621             pa_memimport *import;
622 
623             /* FIXME! This should be implemented lock-free */
624 
625             pa_assert_se(segment = b->per_type.imported.segment);
626             pa_assert_se(import = segment->import);
627 
628             pa_mutex_lock(import->mutex);
629 
630             pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
631 
632             pa_assert(segment->n_blocks >= 1);
633             if (-- segment->n_blocks <= 0)
634                 segment_detach(segment);
635 
636             pa_mutex_unlock(import->mutex);
637 
638             import->release_cb(import, b->per_type.imported.id, import->userdata);
639 
640             if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
641                 pa_xfree(b);
642 
643             break;
644         }
645 
646         case PA_MEMBLOCK_POOL_EXTERNAL:
647         case PA_MEMBLOCK_POOL: {
648             struct mempool_slot *slot;
649             bool call_free;
650 
651             pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
652 
653             call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
654 
655 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
656 /*             if (PA_UNLIKELY(pa_in_valgrind())) { */
657 /*                 VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
658 /*             } */
659 /* #endif */
660 
661             /* The free list dimensions should easily allow all slots
662              * to fit in, hence try harder if pushing this slot into
663              * the free list fails */
664             while (pa_flist_push(b->pool->free_slots, slot) < 0)
665                 ;
666 
667             if (call_free)
668                 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
669                     pa_xfree(b);
670 
671             break;
672         }
673 
674         case PA_MEMBLOCK_TYPE_MAX:
675         default:
676             pa_assert_not_reached();
677     }
678 
679     pa_mempool_unref(pool);
680 }
681 
682 /* No lock necessary */
pa_memblock_unref(pa_memblock * b)683 void pa_memblock_unref(pa_memblock*b) {
684     pa_assert(b);
685     pa_assert(PA_REFCNT_VALUE(b) > 0);
686 
687     if (PA_REFCNT_DEC(b) > 0)
688         return;
689 
690     memblock_free(b);
691 }
692 
693 /* Self locked */
memblock_wait(pa_memblock * b)694 static void memblock_wait(pa_memblock *b) {
695     pa_assert(b);
696 
697     if (pa_atomic_load(&b->n_acquired) > 0) {
698         /* We need to wait until all threads gave up access to the
699          * memory block before we can go on. Unfortunately this means
700          * that we have to lock and wait here. Sniff! */
701 
702         pa_atomic_inc(&b->please_signal);
703 
704         while (pa_atomic_load(&b->n_acquired) > 0)
705             pa_semaphore_wait(b->pool->semaphore);
706 
707         pa_atomic_dec(&b->please_signal);
708     }
709 }
710 
711 /* No lock necessary. This function is not multiple caller safe! */
memblock_make_local(pa_memblock * b)712 static void memblock_make_local(pa_memblock *b) {
713     pa_assert(b);
714 
715     pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
716 
717     if (b->length <= b->pool->block_size) {
718         struct mempool_slot *slot;
719 
720         if ((slot = mempool_allocate_slot(b->pool))) {
721             void *new_data;
722             /* We can move it into a local pool, perfect! */
723 
724             new_data = mempool_slot_data(slot);
725             memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
726             pa_atomic_ptr_store(&b->data, new_data);
727 
728             b->type = PA_MEMBLOCK_POOL_EXTERNAL;
729             b->read_only = false;
730 
731             goto finish;
732         }
733     }
734 
735     /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
736     b->per_type.user.free_cb = pa_xfree;
737     pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
738     b->per_type.user.free_cb_data = pa_atomic_ptr_load(&b->data);
739 
740     b->type = PA_MEMBLOCK_USER;
741     b->read_only = false;
742 
743 finish:
744     pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
745     pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
746     memblock_wait(b);
747 }
748 
749 /* No lock necessary. This function is not multiple caller safe */
pa_memblock_unref_fixed(pa_memblock * b)750 void pa_memblock_unref_fixed(pa_memblock *b) {
751     pa_assert(b);
752     pa_assert(PA_REFCNT_VALUE(b) > 0);
753     pa_assert(b->type == PA_MEMBLOCK_FIXED);
754 
755     if (PA_REFCNT_VALUE(b) > 1)
756         memblock_make_local(b);
757 
758     pa_memblock_unref(b);
759 }
760 
761 /* No lock necessary. */
pa_memblock_will_need(pa_memblock * b)762 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
763     void *p;
764 
765     pa_assert(b);
766     pa_assert(PA_REFCNT_VALUE(b) > 0);
767 
768     p = pa_memblock_acquire(b);
769     pa_will_need(p, b->length);
770     pa_memblock_release(b);
771 
772     return b;
773 }
774 
775 /* Self-locked. This function is not multiple-caller safe */
memblock_replace_import(pa_memblock * b)776 static void memblock_replace_import(pa_memblock *b) {
777     pa_memimport_segment *segment;
778     pa_memimport *import;
779 
780     pa_assert(b);
781     pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
782 
783     pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
784     pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
785     pa_atomic_dec(&b->pool->stat.n_imported);
786     pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
787 
788     pa_assert_se(segment = b->per_type.imported.segment);
789     pa_assert_se(import = segment->import);
790 
791     pa_mutex_lock(import->mutex);
792 
793     pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
794 
795     memblock_make_local(b);
796 
797     pa_assert(segment->n_blocks >= 1);
798     if (-- segment->n_blocks <= 0)
799         segment_detach(segment);
800 
801     pa_mutex_unlock(import->mutex);
802 }
803 
804 /*@per_client: This is a security measure. By default this should
805  * be set to true where the created mempool is never shared with more
806  * than one client in the system. Set this to false if a global
807  * mempool, shared with all existing and future clients, is required.
808  *
809  * NOTE-1: Do not create any further global mempools! They allow data
810  * leaks between clients and thus conflict with the xdg-app containers
811  * model. They also complicate the handling of memfd-based pools.
812  *
813  * NOTE-2: Almost all mempools are now created on a per client basis.
814  * The only exception is the pa_core's mempool which is still shared
815  * between all clients of the system.
816  *
817  * Beside security issues, special marking for global mempools is
818  * required for memfd communication. To avoid fd leaks, memfd pools
819  * are registered with the connection pstream to create an ID<->memfd
820  * mapping on both PA endpoints. Such memory regions are then always
821  * referenced by their IDs and never by their fds and thus their fds
822  * can be quickly closed later.
823  *
824  * Unfortunately this scheme cannot work with global pools since the
825  * ID registration mechanism needs to happen for each newly connected
826  * client, and thus the need for a more special handling. That is,
827  * for the pool's fd to be always open :-(
828  *
829  * TODO-1: Transform the global core mempool to a per-client one
830  * TODO-2: Remove global mempools support */
pa_mempool_new(pa_mem_type_t type,size_t size,bool per_client)831 pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size, bool per_client) {
832     AUDIO_DEBUG_LOG("pa_mempool_new:type %{public}d, size %{public}zu, per_client %{public}d,", type, size, per_client);
833     pa_mempool *p;
834     char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
835     const size_t page_size = pa_page_size();
836 
837     p = pa_xnew0(pa_mempool, 1);
838     PA_REFCNT_INIT(p);
839 
840     p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
841     if (p->block_size < page_size)
842         p->block_size = page_size;
843 
844     if (size <= 0)
845         p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
846     else {
847         p->n_blocks = (unsigned) (size / p->block_size);
848 
849         if (p->n_blocks < 2)
850             p->n_blocks = 2;
851     }
852 
853     if (pa_shm_create_rw(&p->memory, type, p->n_blocks * p->block_size, 0700) < 0) {
854         pa_xfree(p);
855         return NULL;
856     }
857 
858     AUDIO_DEBUG_LOG("Using %{public}s memory pool with %{public}u slots of size %{public}s each, total size is"
859                  "%{public}s, maximum usable slot size is %{public}lu",
860                  pa_mem_type_to_string(type),
861                  p->n_blocks,
862                  pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
863                  pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
864                  (unsigned long) pa_mempool_block_size_max(p));
865 
866     p->global = !per_client;
867 
868     pa_atomic_store(&p->n_init, 0);
869 
870     PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
871     PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
872 
873     p->mutex = pa_mutex_new(true, true);
874     p->semaphore = pa_semaphore_new(0);
875 
876     p->free_slots = pa_flist_new(p->n_blocks);
877 
878     return p;
879 }
880 
mempool_free(pa_mempool * p)881 static void mempool_free(pa_mempool *p) {
882     pa_assert(p);
883 
884     pa_mutex_lock(p->mutex);
885 
886     while (p->imports)
887         pa_memimport_free(p->imports);
888 
889     while (p->exports)
890         pa_memexport_free(p->exports);
891 
892     pa_mutex_unlock(p->mutex);
893 
894     pa_flist_free(p->free_slots, NULL);
895 
896     if (pa_atomic_load(&p->stat.n_allocated) > 0) {
897 
898         /* Ouch, somebody is retaining a memory block reference! */
899 
900 #ifdef DEBUG_REF
901         unsigned i;
902         pa_flist *list;
903 
904         /* Let's try to find at least one of those leaked memory blocks */
905 
906         list = pa_flist_new(p->n_blocks);
907 
908         for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
909             struct mempool_slot *slot;
910             pa_memblock *b, *k;
911 
912             slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
913             b = mempool_slot_data(slot);
914 
915             while ((k = pa_flist_pop(p->free_slots))) {
916                 while (pa_flist_push(list, k) < 0)
917                     ;
918 
919                 if (b == k)
920                     break;
921             }
922 
923             if (!k)
924                 AUDIO_ERR_LOG("REF: Leaked memory block %{public}p", b);
925 
926             while ((k = pa_flist_pop(list)))
927                 while (pa_flist_push(p->free_slots, k) < 0)
928                     ;
929         }
930 
931         pa_flist_free(list, NULL);
932 
933 #endif
934 
935         AUDIO_ERR_LOG("Memory pool destroyed but not all memory blocks freed! %{public}u remain.",
936             pa_atomic_load(&p->stat.n_allocated));
937 
938 /*         PA_DEBUG_TRAP; */
939     }
940 
941     pa_shm_free(&p->memory);
942 
943     pa_mutex_free(p->mutex);
944     pa_semaphore_free(p->semaphore);
945 
946     pa_xfree(p);
947 }
948 
949 /* No lock necessary */
pa_mempool_get_stat(pa_mempool * p)950 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
951     pa_assert(p);
952 
953     return &p->stat;
954 }
955 
956 /* No lock necessary */
pa_mempool_block_size_max(pa_mempool * p)957 size_t pa_mempool_block_size_max(pa_mempool *p) {
958     pa_assert(p);
959 
960     return p->block_size - PA_ALIGN(sizeof(pa_memblock));
961 }
962 
963 /* No lock necessary */
pa_mempool_vacuum(pa_mempool * p)964 void pa_mempool_vacuum(pa_mempool *p) {
965     struct mempool_slot *slot;
966     pa_flist *list;
967 
968     pa_assert(p);
969 
970     list = pa_flist_new(p->n_blocks);
971 
972     while ((slot = pa_flist_pop(p->free_slots)))
973         while (pa_flist_push(list, slot) < 0)
974             ;
975 
976     while ((slot = pa_flist_pop(list))) {
977         pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
978 
979         while (pa_flist_push(p->free_slots, slot))
980             ;
981     }
982 
983     pa_flist_free(list, NULL);
984 }
985 
986 /* No lock necessary */
pa_mempool_is_shared(pa_mempool * p)987 bool pa_mempool_is_shared(pa_mempool *p) {
988     pa_assert(p);
989 
990     return pa_mem_type_is_shared(p->memory.type);
991 }
992 
993 /* No lock necessary */
pa_mempool_is_memfd_backed(const pa_mempool * p)994 bool pa_mempool_is_memfd_backed(const pa_mempool *p) {
995     pa_assert(p);
996 
997     return (p->memory.type == PA_MEM_TYPE_SHARED_MEMFD);
998 }
999 
1000 /* No lock necessary */
pa_mempool_get_shm_id(pa_mempool * p,uint32_t * id)1001 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
1002     pa_assert(p);
1003 
1004     if (!pa_mempool_is_shared(p))
1005         return -1;
1006 
1007     *id = p->memory.id;
1008 
1009     return 0;
1010 }
1011 
pa_mempool_ref(pa_mempool * p)1012 pa_mempool* pa_mempool_ref(pa_mempool *p) {
1013     pa_assert(p);
1014     pa_assert(PA_REFCNT_VALUE(p) > 0);
1015 
1016     PA_REFCNT_INC(p);
1017     return p;
1018 }
1019 
pa_mempool_unref(pa_mempool * p)1020 void pa_mempool_unref(pa_mempool *p) {
1021     pa_assert(p);
1022     pa_assert(PA_REFCNT_VALUE(p) > 0);
1023 
1024     if (PA_REFCNT_DEC(p) <= 0)
1025         mempool_free(p);
1026 }
1027 
1028 /* No lock necessary
1029  * Check pa_mempool_new() for per-client vs. global mempools */
pa_mempool_is_global(pa_mempool * p)1030 bool pa_mempool_is_global(pa_mempool *p) {
1031     pa_assert(p);
1032 
1033     return p->global;
1034 }
1035 
1036 /* No lock necessary
1037  * Check pa_mempool_new() for per-client vs. global mempools */
pa_mempool_is_per_client(pa_mempool * p)1038 bool pa_mempool_is_per_client(pa_mempool *p) {
1039     return !pa_mempool_is_global(p);
1040 }
1041 
1042 /* Self-locked
1043  *
1044  * This is only for per-client mempools!
1045  *
1046  * After this method's return, the caller owns the file descriptor
1047  * and is responsible for closing it in the appropriate time. This
1048  * should only be called once during during a mempool's lifetime.
1049  *
1050  * Check pa_shm->fd and pa_mempool_new() for further context. */
pa_mempool_take_memfd_fd(pa_mempool * p)1051 int pa_mempool_take_memfd_fd(pa_mempool *p) {
1052     int memfd_fd;
1053 
1054     pa_assert(p);
1055     pa_assert(pa_mempool_is_shared(p));
1056     pa_assert(pa_mempool_is_memfd_backed(p));
1057     pa_assert(pa_mempool_is_per_client(p));
1058 
1059     pa_mutex_lock(p->mutex);
1060 
1061     memfd_fd = p->memory.fd;
1062     p->memory.fd = -1;
1063 
1064     pa_mutex_unlock(p->mutex);
1065 
1066     pa_assert(memfd_fd != -1);
1067     return memfd_fd;
1068 }
1069 
1070 /* No lock necessary
1071  *
1072  * This is only for global mempools!
1073  *
1074  * Global mempools have their memfd descriptor always open. DO NOT
1075  * close the returned descriptor by your own.
1076  *
1077  * Check pa_mempool_new() for further context. */
pa_mempool_get_memfd_fd(pa_mempool * p)1078 int pa_mempool_get_memfd_fd(pa_mempool *p) {
1079     int memfd_fd;
1080 
1081     pa_assert(p);
1082     pa_assert(pa_mempool_is_shared(p));
1083     pa_assert(pa_mempool_is_memfd_backed(p));
1084     pa_assert(pa_mempool_is_global(p));
1085 
1086     memfd_fd = p->memory.fd;
1087     pa_assert(memfd_fd != -1);
1088 
1089     return memfd_fd;
1090 }
1091 
1092 /* For receiving blocks from other nodes */
pa_memimport_new(pa_mempool * p,pa_memimport_release_cb_t cb,void * userdata)1093 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
1094     pa_memimport *i;
1095 
1096     pa_assert(p);
1097     pa_assert(cb);
1098 
1099     i = pa_xnew(pa_memimport, 1);
1100     i->mutex = pa_mutex_new(true, true);
1101     i->pool = p;
1102     pa_mempool_ref(i->pool);
1103     i->segments = pa_hashmap_new(NULL, NULL);
1104     i->blocks = pa_hashmap_new(NULL, NULL);
1105     i->release_cb = cb;
1106     i->userdata = userdata;
1107 
1108     pa_mutex_lock(p->mutex);
1109     PA_LLIST_PREPEND(pa_memimport, p->imports, i);
1110     pa_mutex_unlock(p->mutex);
1111 
1112     return i;
1113 }
1114 
1115 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
1116 
1117 /* Should be called locked
1118  * Caller owns passed @memfd_fd and must close it down when appropriate. */
segment_attach(pa_memimport * i,pa_mem_type_t type,uint32_t shm_id,int memfd_fd,bool writable)1119 static pa_memimport_segment* segment_attach(pa_memimport *i, pa_mem_type_t type, uint32_t shm_id,
1120                                             int memfd_fd, bool writable) {
1121     pa_memimport_segment* seg;
1122     pa_assert(pa_mem_type_is_shared(type));
1123 
1124     if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
1125         return NULL;
1126 
1127     seg = pa_xnew0(pa_memimport_segment, 1);
1128 
1129     if (pa_shm_attach(&seg->memory, type, shm_id, memfd_fd, writable) < 0) {
1130         pa_xfree(seg);
1131         return NULL;
1132     }
1133 
1134     seg->writable = writable;
1135     seg->import = i;
1136     seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
1137 
1138     pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
1139     return seg;
1140 }
1141 
1142 /* Should be called locked */
segment_detach(pa_memimport_segment * seg)1143 static void segment_detach(pa_memimport_segment *seg) {
1144     pa_assert(seg);
1145     pa_assert(seg->n_blocks == (segment_is_permanent(seg) ? 1u : 0u));
1146 
1147     pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
1148     pa_shm_free(&seg->memory);
1149 
1150     if (seg->trap)
1151         pa_memtrap_remove(seg->trap);
1152 
1153     pa_xfree(seg);
1154 }
1155 
1156 /* Self-locked. Not multiple-caller safe */
pa_memimport_free(pa_memimport * i)1157 void pa_memimport_free(pa_memimport *i) {
1158     pa_memexport *e;
1159     pa_memblock *b;
1160     pa_memimport_segment *seg;
1161     void *state = NULL;
1162 
1163     pa_assert(i);
1164 
1165     pa_mutex_lock(i->mutex);
1166 
1167     while ((b = pa_hashmap_first(i->blocks)))
1168         memblock_replace_import(b);
1169 
1170     /* Permanent segments exist for the lifetime of the memimport. Now
1171      * that we're freeing the memimport itself, clear them all up.
1172      *
1173      * Careful! segment_detach() internally removes itself from the
1174      * memimport's hash; the same hash we're now using for iteration. */
1175     PA_HASHMAP_FOREACH(seg, i->segments, state) {
1176         if (segment_is_permanent(seg))
1177             segment_detach(seg);
1178     }
1179     pa_assert(pa_hashmap_size(i->segments) == 0);
1180 
1181     pa_mutex_unlock(i->mutex);
1182 
1183     pa_mutex_lock(i->pool->mutex);
1184 
1185     /* If we've exported this block further we need to revoke that export */
1186     for (e = i->pool->exports; e; e = e->next)
1187         memexport_revoke_blocks(e, i);
1188 
1189     PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
1190 
1191     pa_mutex_unlock(i->pool->mutex);
1192 
1193     pa_mempool_unref(i->pool);
1194     pa_hashmap_free(i->blocks);
1195     pa_hashmap_free(i->segments);
1196 
1197     pa_mutex_free(i->mutex);
1198 
1199     pa_xfree(i);
1200 }
1201 
1202 /* Create a new memimport's memfd segment entry, with passed SHM ID
1203  * as key and the newly-created segment (with its mmap()-ed memfd
1204  * memory region) as its value.
1205  *
1206  * Note! check comments at 'pa_shm->fd', 'segment_is_permanent()',
1207  * and 'pa_pstream_register_memfd_mempool()' for further details.
1208  *
1209  * Caller owns passed @memfd_fd and must close it down when appropriate. */
pa_memimport_attach_memfd(pa_memimport * i,uint32_t shm_id,int memfd_fd,bool writable)1210 int pa_memimport_attach_memfd(pa_memimport *i, uint32_t shm_id, int memfd_fd, bool writable) {
1211     pa_memimport_segment *seg;
1212     int ret = -1;
1213 
1214     pa_assert(i);
1215     pa_assert(memfd_fd != -1);
1216 
1217     pa_mutex_lock(i->mutex);
1218 
1219     if (!(seg = segment_attach(i, PA_MEM_TYPE_SHARED_MEMFD, shm_id, memfd_fd, writable)))
1220         goto finish;
1221 
1222     /* n_blocks acts as a segment reference count. To avoid the segment
1223      * being deleted when receiving silent memchunks, etc., mark our
1224      * permanent presence by incrementing that refcount. */
1225     seg->n_blocks++;
1226 
1227     pa_assert(segment_is_permanent(seg));
1228     ret = 0;
1229 
1230 finish:
1231     pa_mutex_unlock(i->mutex);
1232     return ret;
1233 }
1234 
1235 /* Self-locked */
pa_memimport_get(pa_memimport * i,pa_mem_type_t type,uint32_t block_id,uint32_t shm_id,size_t offset,size_t size,bool writable)1236 pa_memblock* pa_memimport_get(pa_memimport *i, pa_mem_type_t type, uint32_t block_id, uint32_t shm_id,
1237                               size_t offset, size_t size, bool writable) {
1238     pa_memblock *b = NULL;
1239     pa_memimport_segment *seg;
1240 
1241     pa_assert(i);
1242     pa_assert(pa_mem_type_is_shared(type));
1243 
1244     pa_mutex_lock(i->mutex);
1245 
1246     if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
1247         pa_memblock_ref(b);
1248         goto finish;
1249     }
1250 
1251     if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
1252         goto finish;
1253 
1254     if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id)))) {
1255         if (type == PA_MEM_TYPE_SHARED_MEMFD) {
1256             AUDIO_ERR_LOG("Bailing out! No cached memimport segment for memfd ID %{public}u", shm_id);
1257             AUDIO_ERR_LOG("Did the other PA endpoint forget registering its memfd pool?");
1258             goto finish;
1259         }
1260 
1261         pa_assert(type == PA_MEM_TYPE_SHARED_POSIX);
1262         if (!(seg = segment_attach(i, type, shm_id, -1, writable)))
1263             goto finish;
1264     }
1265 
1266     if (writable && !seg->writable) {
1267         AUDIO_ERR_LOG("Cannot import cached segment in write mode - previously mapped as read-only");
1268         goto finish;
1269     }
1270 
1271     if (offset+size > seg->memory.size)
1272         goto finish;
1273 
1274     if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
1275         b = pa_xnew(pa_memblock, 1);
1276 
1277     PA_REFCNT_INIT(b);
1278     b->pool = i->pool;
1279     pa_mempool_ref(b->pool);
1280     b->type = PA_MEMBLOCK_IMPORTED;
1281     b->read_only = !writable;
1282     b->is_silence = false;
1283     pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
1284     b->length = size;
1285     pa_atomic_store(&b->n_acquired, 0);
1286     pa_atomic_store(&b->please_signal, 0);
1287     b->per_type.imported.id = block_id;
1288     b->per_type.imported.segment = seg;
1289 
1290     pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1291 
1292     seg->n_blocks++;
1293 
1294     stat_add(b);
1295 
1296 finish:
1297     pa_mutex_unlock(i->mutex);
1298 
1299     return b;
1300 }
1301 
pa_memimport_process_revoke(pa_memimport * i,uint32_t id)1302 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1303     pa_memblock *b;
1304     int ret = 0;
1305     pa_assert(i);
1306 
1307     pa_mutex_lock(i->mutex);
1308 
1309     if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1310         ret = -1;
1311         goto finish;
1312     }
1313 
1314     memblock_replace_import(b);
1315 
1316 finish:
1317     pa_mutex_unlock(i->mutex);
1318 
1319     return ret;
1320 }
1321 
1322 /* For sending blocks to other nodes */
pa_memexport_new(pa_mempool * p,pa_memexport_revoke_cb_t cb,void * userdata)1323 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1324     pa_memexport *e;
1325 
1326     static pa_atomic_t export_baseidx = PA_ATOMIC_INIT(0);
1327 
1328     pa_assert(p);
1329     pa_assert(cb);
1330 
1331     if (!pa_mempool_is_shared(p))
1332         return NULL;
1333 
1334     e = pa_xnew(pa_memexport, 1);
1335     e->mutex = pa_mutex_new(true, true);
1336     e->pool = p;
1337     pa_mempool_ref(e->pool);
1338     PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1339     PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1340     e->n_init = 0;
1341     e->revoke_cb = cb;
1342     e->userdata = userdata;
1343 
1344     pa_mutex_lock(p->mutex);
1345 
1346     PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1347     e->baseidx = (uint32_t) pa_atomic_add(&export_baseidx, PA_MEMEXPORT_SLOTS_MAX);
1348 
1349     pa_mutex_unlock(p->mutex);
1350     return e;
1351 }
1352 
pa_memexport_free(pa_memexport * e)1353 void pa_memexport_free(pa_memexport *e) {
1354     pa_assert(e);
1355 
1356     pa_mutex_lock(e->mutex);
1357     while (e->used_slots)
1358         pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots + e->baseidx));
1359     pa_mutex_unlock(e->mutex);
1360 
1361     pa_mutex_lock(e->pool->mutex);
1362     PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1363     pa_mutex_unlock(e->pool->mutex);
1364 
1365     pa_mempool_unref(e->pool);
1366     pa_mutex_free(e->mutex);
1367     pa_xfree(e);
1368 }
1369 
1370 /* Self-locked */
pa_memexport_process_release(pa_memexport * e,uint32_t id)1371 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1372     pa_memblock *b;
1373 
1374     pa_assert(e);
1375 
1376     pa_mutex_lock(e->mutex);
1377 
1378     if (id < e->baseidx)
1379         goto fail;
1380     id -= e->baseidx;
1381 
1382     if (id >= e->n_init)
1383         goto fail;
1384 
1385     if (!e->slots[id].block)
1386         goto fail;
1387 
1388     b = e->slots[id].block;
1389     e->slots[id].block = NULL;
1390 
1391     PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1392     PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1393 
1394     pa_mutex_unlock(e->mutex);
1395 
1396 /*     pa_log("Processing release for %u", id); */
1397 
1398     pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1399     pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1400 
1401     pa_atomic_dec(&e->pool->stat.n_exported);
1402     pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1403 
1404     pa_memblock_unref(b);
1405 
1406     return 0;
1407 
1408 fail:
1409     pa_mutex_unlock(e->mutex);
1410 
1411     return -1;
1412 }
1413 
1414 /* Self-locked */
memexport_revoke_blocks(pa_memexport * e,pa_memimport * i)1415 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1416     struct memexport_slot *slot, *next;
1417     pa_assert(e);
1418     pa_assert(i);
1419 
1420     pa_mutex_lock(e->mutex);
1421 
1422     for (slot = e->used_slots; slot; slot = next) {
1423         uint32_t idx;
1424         next = slot->next;
1425 
1426         if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1427             slot->block->per_type.imported.segment->import != i)
1428             continue;
1429 
1430         idx = (uint32_t) (slot - e->slots + e->baseidx);
1431         e->revoke_cb(e, idx, e->userdata);
1432         pa_memexport_process_release(e, idx);
1433     }
1434 
1435     pa_mutex_unlock(e->mutex);
1436 }
1437 
1438 /* No lock necessary */
memblock_shared_copy(pa_mempool * p,pa_memblock * b)1439 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1440     pa_memblock *n;
1441 
1442     pa_assert(p);
1443     pa_assert(b);
1444 
1445     if (b->type == PA_MEMBLOCK_IMPORTED ||
1446         b->type == PA_MEMBLOCK_POOL ||
1447         b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1448         pa_assert(b->pool == p);
1449         return pa_memblock_ref(b);
1450     }
1451 
1452     if (!(n = pa_memblock_new_pool(p, b->length)))
1453         return NULL;
1454 
1455     memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1456     return n;
1457 }
1458 
1459 /* Self-locked */
pa_memexport_put(pa_memexport * e,pa_memblock * b,pa_mem_type_t * type,uint32_t * block_id,uint32_t * shm_id,size_t * offset,size_t * size)1460 int pa_memexport_put(pa_memexport *e, pa_memblock *b, pa_mem_type_t *type, uint32_t *block_id,
1461                      uint32_t *shm_id, size_t *offset, size_t * size) {
1462     pa_shm  *memory;
1463     struct memexport_slot *slot;
1464     void *data;
1465 
1466     pa_assert(e);
1467     pa_assert(b);
1468     pa_assert(type);
1469     pa_assert(block_id);
1470     pa_assert(shm_id);
1471     pa_assert(offset);
1472     pa_assert(size);
1473     pa_assert(b->pool == e->pool);
1474 
1475     if (!(b = memblock_shared_copy(e->pool, b)))
1476         return -1;
1477 
1478     pa_mutex_lock(e->mutex);
1479 
1480     if (e->free_slots) {
1481         slot = e->free_slots;
1482         PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1483     } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1484         slot = &e->slots[e->n_init++];
1485     else {
1486         pa_mutex_unlock(e->mutex);
1487         pa_memblock_unref(b);
1488         return -1;
1489     }
1490 
1491     PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1492     slot->block = b;
1493     *block_id = (uint32_t) (slot - e->slots + e->baseidx);
1494 
1495     pa_mutex_unlock(e->mutex);
1496 /*     pa_log("Got block id %u", *block_id); */
1497 
1498     data = pa_memblock_acquire(b);
1499 
1500     if (b->type == PA_MEMBLOCK_IMPORTED) {
1501         pa_assert(b->per_type.imported.segment);
1502         memory = &b->per_type.imported.segment->memory;
1503     } else {
1504         pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1505         pa_assert(b->pool);
1506         pa_assert(pa_mempool_is_shared(b->pool));
1507         memory = &b->pool->memory;
1508     }
1509 
1510     pa_assert(data >= memory->ptr);
1511     pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1512 
1513     *type = memory->type;
1514     *shm_id = memory->id;
1515     *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1516     *size = b->length;
1517 
1518     pa_memblock_release(b);
1519 
1520     pa_atomic_inc(&e->pool->stat.n_exported);
1521     pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1522 
1523     return 0;
1524 }
1525