pulsecore: Specially mark global mempools

Color global mempools with a special mark. This special marking
is needed for handling memfd-backed pools.

To avoid fd leaks, memfd pools are registered with the connection
pstream to create an ID<->memfd mapping on both PA endpoints.
Such memory regions are then always referenced by their IDs and
never by their fds, and so their fds can be safely closed later.

Unfortunately this scheme cannot work with global pools since the
registration ID<->memfd mechanism needs to happen for each newly
connected client, and thus the need for a more special handling.
That is, for the pool's fd to be always open :-(

Almost all mempools are now created on a per-client basis. The
only exception is the pa_core's mempool which is still shared
between all clients of the system.

Signed-off-by: Ahmed S. Darwish <darwish.07@gmail.com>
This commit is contained in:
Ahmed S. Darwish 2016-03-13 01:09:39 +02:00 committed by David Henningsson
parent f8714af56b
commit ee2db62277
15 changed files with 124 additions and 19 deletions

View file

@ -172,11 +172,11 @@ pa_context *pa_context_new_with_proplist(pa_mainloop_api *mainloop, const char *
c->srb_template.writefd = -1;
type = !c->conf->disable_shm ? PA_MEM_TYPE_SHARED_POSIX : PA_MEM_TYPE_PRIVATE;
if (!(c->mempool = pa_mempool_new(type, c->conf->shm_size))) {
if (!(c->mempool = pa_mempool_new(type, c->conf->shm_size, true))) {
if (!c->conf->disable_shm) {
pa_log_warn("Failed to allocate shared memory pool. Falling back to a normal private one.");
c->mempool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, c->conf->shm_size);
c->mempool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, c->conf->shm_size, true);
}
if (!c->mempool) {

View file

@ -69,14 +69,14 @@ pa_core* pa_core_new(pa_mainloop_api *m, bool shared, size_t shm_size) {
pa_assert(m);
if (shared) {
if (!(pool = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, shm_size))) {
if (!(pool = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, shm_size, false))) {
pa_log_warn("Failed to allocate shared memory pool. Falling back to a normal memory pool.");
shared = false;
}
}
if (!shared) {
if (!(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, shm_size))) {
if (!(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, shm_size, false))) {
pa_log("pa_mempool_new() failed.");
return NULL;
}

View file

@ -185,6 +185,9 @@ struct pa_mempool {
pa_mutex *mutex;
pa_shm memory;
bool global;
size_t block_size;
unsigned n_blocks;
bool is_remote_writable;
@ -795,7 +798,34 @@ static void memblock_replace_import(pa_memblock *b) {
pa_mutex_unlock(import->mutex);
}
pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size) {
/*@per_client: This is a security measure. By default this should
* be set to true where the created mempool is never shared with more
* than one client in the system. Set this to false if a global
* mempool, shared with all existing and future clients, is required.
*
* NOTE-1: Do not create any further global mempools! They allow data
* leaks between clients and thus conflict with the xdg-app containers
* model. They also complicate the handling of memfd-based pools.
*
* NOTE-2: Almost all mempools are now created on a per client basis.
* The only exception is the pa_core's mempool which is still shared
* between all clients of the system.
*
* Beside security issues, special marking for global mempools is
* required for memfd communication. To avoid fd leaks, memfd pools
* are registered with the connection pstream to create an ID<->memfd
* mapping on both PA endpoints. Such memory regions are then always
* referenced by their IDs and never by their fds and thus their fds
* can be quickly closed later.
*
* Unfortunately this scheme cannot work with global pools since the
* ID registration mechanism needs to happen for each newly connected
* client, and thus the need for a more special handling. That is,
* for the pool's fd to be always open :-(
*
* TODO-1: Transform the global core mempool to a per-client one
* TODO-2: Remove global mempools support */
pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size, bool per_client) {
pa_mempool *p;
char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
@ -827,6 +857,8 @@ pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size) {
pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
(unsigned long) pa_mempool_block_size_max(p));
p->global = !per_client;
pa_atomic_store(&p->n_init, 0);
PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
@ -986,6 +1018,70 @@ void pa_mempool_unref(pa_mempool *p) {
mempool_free(p);
}
/* No lock necessary
* Check pa_mempool_new() for per-client vs. global mempools */
bool pa_mempool_is_global(pa_mempool *p) {
pa_assert(p);
return p->global;
}
/* No lock necessary
* Check pa_mempool_new() for per-client vs. global mempools */
bool pa_mempool_is_per_client(pa_mempool *p) {
return !pa_mempool_is_global(p);
}
/* Self-locked
*
* This is only for per-client mempools!
*
* After this method's return, the caller owns the file descriptor
* and is responsible for closing it in the appropriate time. This
* should only be called once during during a mempool's lifetime.
*
* Check pa_shm->fd and pa_mempool_new() for further context. */
int pa_mempool_take_memfd_fd(pa_mempool *p) {
int memfd_fd;
pa_assert(p);
pa_assert(pa_mempool_is_shared(p));
pa_assert(pa_mempool_is_memfd_backed(p));
pa_assert(pa_mempool_is_per_client(p));
pa_mutex_lock(p->mutex);
memfd_fd = p->memory.fd;
p->memory.fd = -1;
pa_mutex_unlock(p->mutex);
pa_assert(memfd_fd != -1);
return memfd_fd;
}
/* No lock necessary
*
* This is only for global mempools!
*
* Global mempools have their memfd descriptor always open. DO NOT
* close the returned descriptor by your own.
*
* Check pa_mempool_new() for further context. */
int pa_mempool_get_memfd_fd(pa_mempool *p) {
int memfd_fd;
pa_assert(p);
pa_assert(pa_mempool_is_shared(p));
pa_assert(pa_mempool_is_memfd_backed(p));
pa_assert(pa_mempool_is_global(p));
memfd_fd = p->memory.fd;
pa_assert(memfd_fd != -1);
return memfd_fd;
}
/* For receiving blocks from other nodes */
pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
pa_memimport *i;

View file

@ -124,7 +124,7 @@ pa_mempool * pa_memblock_get_pool(pa_memblock *b);
pa_memblock *pa_memblock_will_need(pa_memblock *b);
/* The memory block manager */
pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size);
pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size, bool per_client);
void pa_mempool_unref(pa_mempool *p);
pa_mempool* pa_mempool_ref(pa_mempool *p);
const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p);
@ -132,10 +132,15 @@ void pa_mempool_vacuum(pa_mempool *p);
int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id);
bool pa_mempool_is_shared(pa_mempool *p);
bool pa_mempool_is_memfd_backed(const pa_mempool *p);
bool pa_mempool_is_global(pa_mempool *p);
bool pa_mempool_is_per_client(pa_mempool *p);
bool pa_mempool_is_remote_writable(pa_mempool *p);
void pa_mempool_set_is_remote_writable(pa_mempool *p, bool writable);
size_t pa_mempool_block_size_max(pa_mempool *p);
int pa_mempool_take_memfd_fd(pa_mempool *p);
int pa_mempool_get_memfd_fd(pa_mempool *p);
/* For receiving blocks from other nodes */
pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata);
void pa_memimport_free(pa_memimport *i);

View file

@ -2622,7 +2622,7 @@ static void setup_srbchannel(pa_native_connection *c) {
return;
}
if (!(c->rw_mempool = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, c->protocol->core->shm_size))) {
if (!(c->rw_mempool = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, c->protocol->core->shm_size, true))) {
pa_log_warn("Disabling srbchannel, reason: Failed to allocate shared "
"writable memory pool.");
return;

View file

@ -41,7 +41,11 @@ typedef struct pa_shm {
*
* When we don't have ownership for the memfd fd in question (e.g.
* pa_shm_attach()), or the file descriptor has now been closed,
* this is set to -1. */
* this is set to -1.
*
* For the special case of a global mempool, we keep this fd
* always open. Check comments on top of pa_mempool_new() for
* rationale. */
int fd;
} pa_shm;

View file

@ -76,7 +76,7 @@ static void run_mix_test(
samples_ref = out_ref + (8 - align);
nsamples = channels * (SAMPLES - (8 - align));
fail_unless((pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0)) != NULL, NULL);
fail_unless((pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true)) != NULL, NULL);
pa_random(samples0, nsamples * sizeof(int16_t));
c0.memblock = pa_memblock_new_fixed(pool, samples0, nsamples * sizeof(int16_t), false);

View file

@ -136,7 +136,7 @@ START_TEST (lfe_filter_test) {
a.format = PA_SAMPLE_S16NE;
lft.ss = &a;
pa_assert_se(lft.pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0));
pa_assert_se(lft.pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true));
/* We prepare pseudo-random input audio samples for lfe-filter rewind testing*/
ori_sample_ptr = pa_xmalloc(pa_frame_size(lft.ss) * TOTAL_SAMPLES);

View file

@ -36,7 +36,7 @@ int main(int argc, char *argv[]) {
pa_mcalign *a;
pa_memchunk c;
p = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0);
p = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true);
a = pa_mcalign_new(11);

View file

@ -81,11 +81,11 @@ START_TEST (memblock_test) {
const char txt[] = "This is a test!";
pool_a = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
pool_a = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0, true);
fail_unless(pool_a != NULL);
pool_b = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
pool_b = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0, true);
fail_unless(pool_b != NULL);
pool_c = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
pool_c = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0, true);
fail_unless(pool_c != NULL);
pa_mempool_get_shm_id(pool_a, &id_a);

View file

@ -108,7 +108,7 @@ START_TEST (memblockq_test) {
pa_log_set_level(PA_LOG_DEBUG);
p = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0);
p = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true);
silence.memblock = pa_memblock_new_fixed(p, (char*) "__", 2, 1);
fail_unless(silence.memblock != NULL);

View file

@ -286,7 +286,7 @@ START_TEST (mix_test) {
if (!getenv("MAKE_CHECK"))
pa_log_set_level(PA_LOG_DEBUG);
fail_unless((pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0)) != NULL, NULL);
fail_unless((pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true)) != NULL, NULL);
a.channels = 1;
a.rate = 44100;

View file

@ -51,7 +51,7 @@ int main(int argc, char *argv[]) {
pa_log_set_level(PA_LOG_DEBUG);
pa_assert_se(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0));
pa_assert_se(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true));
for (i = 0; maps[i].channels > 0; i++)
for (j = 0; maps[j].channels > 0; j++) {

View file

@ -404,7 +404,7 @@ int main(int argc, char *argv[]) {
}
ret = 0;
pa_assert_se(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0));
pa_assert_se(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true));
if (!all_formats) {

View file

@ -85,7 +85,7 @@ START_TEST (srbchannel_test) {
int pipefd[4];
pa_mainloop *ml = pa_mainloop_new();
pa_mempool *mp = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
pa_mempool *mp = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0, true);
pa_iochannel *io1, *io2;
pa_pstream *p1, *p2;
pa_srbchannel *sr1, *sr2;