From dbedd09d4294b62284980520a1e2271bb5bf4afe Mon Sep 17 00:00:00 2001 From: Wim Taymans Date: Wed, 3 Apr 2024 15:02:30 +0200 Subject: [PATCH] settings: add link.min-buffers option Add link.min-buffers option to set the minimum amount of buffers to create for links. Set this by default to 2 because we really need two now in case the sink xruns and does async mixing. Make this an option because in low-memory cases and when xruns are not expected, we can set this to 1. --- doc/dox/config/pipewire.conf.5.md | 4 ++++ src/daemon/minimal.conf.in | 1 + src/daemon/pipewire-vulkan.conf.in | 2 -- src/daemon/pipewire.conf.in | 1 + src/pipewire/buffers.c | 13 +++++++------ src/pipewire/private.h | 1 + src/pipewire/settings.c | 5 +++++ 7 files changed, 19 insertions(+), 8 deletions(-) diff --git a/doc/dox/config/pipewire.conf.5.md b/doc/dox/config/pipewire.conf.5.md index 4a143cc6c..3542f19be 100644 --- a/doc/dox/config/pipewire.conf.5.md +++ b/doc/dox/config/pipewire.conf.5.md @@ -217,6 +217,10 @@ Default video rate denominator @PAR@ pipewire.conf library.name.system = support/libspa-support The name of the shared library to use for the system functions for the main thread. +@PAR@ pipewire.conf link.min-buffers = 2 +The minimum number of buffers to negotiate between nodes. Using 1 buffer will consume +less memory but might cause glitches when using async nodes. + @PAR@ pipewire.conf link.max-buffers = 64 The maximum number of buffers to negotiate between nodes. Note that version < 3 clients can only support 16 buffers. More buffers is almost always worse than less, latency diff --git a/src/daemon/minimal.conf.in b/src/daemon/minimal.conf.in index 15ef3dbb1..b108fe580 100644 --- a/src/daemon/minimal.conf.in +++ b/src/daemon/minimal.conf.in @@ -13,6 +13,7 @@ context.properties = { #library.name.system = support/libspa-support #context.data-loop.library.name.system = support/libspa-support #support.dbus = true + #link.min-buffers = 2 #link.max-buffers = 64 link.max-buffers = 16 # version < 3 clients can't handle more #mem.warn-mlock = false diff --git a/src/daemon/pipewire-vulkan.conf.in b/src/daemon/pipewire-vulkan.conf.in index eae71d35c..b00c8d353 100644 --- a/src/daemon/pipewire-vulkan.conf.in +++ b/src/daemon/pipewire-vulkan.conf.in @@ -9,8 +9,6 @@ context.properties = { #library.name.system = support/libspa-support #context.data-loop.library.name.system = support/libspa-support #support.dbus = true - #link.max-buffers = 64 - #link.max-buffers = 16 # version < 3 clients can't handle more #mem.warn-mlock = false #mem.allow-mlock = true #mem.mlock-all = false diff --git a/src/daemon/pipewire.conf.in b/src/daemon/pipewire.conf.in index e6e216281..da527615f 100644 --- a/src/daemon/pipewire.conf.in +++ b/src/daemon/pipewire.conf.in @@ -13,6 +13,7 @@ context.properties = { #library.name.system = support/libspa-support #context.data-loop.library.name.system = support/libspa-support #support.dbus = true + #link.min-buffers = 2 #link.max-buffers = 64 link.max-buffers = 16 # version < 3 clients can't handle more #mem.warn-mlock = false diff --git a/src/pipewire/buffers.c b/src/pipewire/buffers.c index 96531b9b9..90b4bf3c1 100644 --- a/src/pipewire/buffers.c +++ b/src/pipewire/buffers.c @@ -98,8 +98,8 @@ static int alloc_buffers(struct pw_mempool *pool, data = NULL; } - pw_log_debug("%p: layout buffers skel:%p data:%p buffers:%p", - allocation, skel, data, buffers); + pw_log_debug("%p: layout buffers skel:%p data:%p n_buffers:%d buffers:%p", + allocation, skel, data, n_buffers, buffers); spa_buffer_alloc_layout_array(&info, n_buffers, buffers, skel, data); allocation->mem = m; @@ -295,9 +295,6 @@ int pw_buffers_negotiate(struct pw_context *context, uint32_t flags, align = SPA_MAX(align, qalign); types = qtypes; - if (SPA_FLAG_IS_SET(flags, PW_BUFFERS_FLAG_ASYNC)) - max_buffers = SPA_MAX(2u, max_buffers); - pw_log_debug("%p: %d %d %d %d %d %d -> %d %zd %zd %d %zd %d", result, qblocks, qminsize, qstride, qmax_buffers, qalign, qtypes, blocks, minsize, stride, max_buffers, align, types); @@ -306,9 +303,13 @@ int pw_buffers_negotiate(struct pw_context *context, uint32_t flags, if (i == n_params) { pw_log_warn("%p: no buffers param", result); minsize = context->settings.clock_quantum_limit; - max_buffers = 2; + max_buffers = 2u; } + if (SPA_FLAG_IS_SET(flags, PW_BUFFERS_FLAG_ASYNC)) + max_buffers = SPA_MAX(2u, max_buffers); + max_buffers = SPA_MAX(context->settings.link_min_buffers, max_buffers); + if (SPA_FLAG_IS_SET(flags, PW_BUFFERS_FLAG_SHARED_MEM)) { if (types != SPA_ID_INVALID) SPA_FLAG_CLEAR(types, 1<log_level = get_default_int(p, "log.level", pw_log_level); d->clock_power_of_two_quantum = get_default_bool(p, "clock.power-of-two-quantum", DEFAULT_CLOCK_POWER_OF_TWO_QUANTUM); + d->link_min_buffers = get_default_int(p, "link.min-buffers", DEFAULT_LINK_MIN_BUFFERS); d->link_max_buffers = get_default_int(p, "link.max-buffers", DEFAULT_LINK_MAX_BUFFERS); d->mem_warn_mlock = get_default_bool(p, "mem.warn-mlock", DEFAULT_MEM_WARN_MLOCK); d->mem_allow_mlock = get_default_bool(p, "mem.allow-mlock", DEFAULT_MEM_ALLOW_MLOCK); @@ -229,6 +231,9 @@ void pw_settings_init(struct pw_context *this) d->check_quantum = get_default_bool(p, "settings.check-quantum", DEFAULT_CHECK_QUANTUM); d->check_rate = get_default_bool(p, "settings.check-rate", DEFAULT_CHECK_RATE); + d->link_min_buffers = SPA_MAX(d->link_min_buffers, 1u); + d->link_max_buffers = SPA_MAX(d->link_max_buffers, d->link_min_buffers); + d->clock_quantum_limit = SPA_CLAMP(d->clock_quantum_limit, CLOCK_QUANTUM_FLOOR, CLOCK_QUANTUM_LIMIT); d->clock_quantum_floor = SPA_CLAMP(d->clock_quantum_floor,