mirror of
https://gitlab.freedesktop.org/pulseaudio/pulseaudio.git
synced 2025-11-24 06:59:57 -05:00
update master
This commit is contained in:
commit
7138fa0272
227 changed files with 67492 additions and 3107 deletions
|
|
@ -21,6 +21,7 @@
|
|||
License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
|
||||
***/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <pulsecore/macro.h>
|
||||
|
||||
/*
|
||||
|
|
@ -103,10 +104,10 @@ static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
|
|||
}
|
||||
|
||||
typedef struct pa_atomic_ptr {
|
||||
volatile unsigned long value;
|
||||
volatile uintptr_t value;
|
||||
} pa_atomic_ptr_t;
|
||||
|
||||
#define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
|
||||
#define PA_ATOMIC_PTR_INIT(v) { .value = (uintptr_t) (v) }
|
||||
|
||||
#ifdef HAVE_ATOMIC_BUILTINS_MEMORY_MODEL
|
||||
|
||||
|
|
@ -117,7 +118,7 @@ static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
|
|||
}
|
||||
|
||||
static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void* p) {
|
||||
__atomic_store_n(&a->value, (unsigned long) p, __ATOMIC_SEQ_CST);
|
||||
__atomic_store_n(&a->value, (uintptr_t) p, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
@ -128,14 +129,14 @@ static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
|
|||
}
|
||||
|
||||
static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
|
||||
a->value = (unsigned long) p;
|
||||
a->value = (uintptr_t) p;
|
||||
__sync_synchronize();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
|
||||
return __sync_bool_compare_and_swap(&a->value, (long) old_p, (long) new_p);
|
||||
return __sync_bool_compare_and_swap(&a->value, (uintptr_t) old_p, (uintptr_t) new_p);
|
||||
}
|
||||
|
||||
#elif defined(__NetBSD__) && defined(HAVE_SYS_ATOMIC_H)
|
||||
|
|
@ -284,7 +285,7 @@ static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* n
|
|||
|
||||
#elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
|
||||
|
||||
#warn "The native atomic operations implementation for AMD64 has not been tested thoroughly. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: test the native atomic operations implementation for AMD64, fix libatomic_ops, or upgrade your GCC."
|
||||
#warning "The native atomic operations implementation for AMD64 has not been tested thoroughly. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: test the native atomic operations implementation for AMD64, fix libatomic_ops, or upgrade your GCC."
|
||||
|
||||
/* Adapted from glibc */
|
||||
|
||||
|
|
|
|||
|
|
@ -467,10 +467,17 @@ int pa_card_suspend(pa_card *c, bool suspend, pa_suspend_cause_t cause) {
|
|||
}
|
||||
|
||||
static int card_message_handler(const char *object_path, const char *message, const pa_json_object *parameters, char **response, void *userdata) {
|
||||
<<<<<<< HEAD
|
||||
pa_card *c;
|
||||
char *message_handler_path;
|
||||
|
||||
pa_assert(c = (pa_card *) userdata);
|
||||
=======
|
||||
pa_card *c = userdata;
|
||||
char *message_handler_path;
|
||||
|
||||
pa_assert(c);
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
pa_assert(message);
|
||||
pa_assert(response);
|
||||
|
||||
|
|
|
|||
|
|
@ -424,7 +424,7 @@ static int pa_cli_command_info(pa_core *c, pa_tokenizer *t, pa_strbuf *buf, bool
|
|||
|
||||
static int pa_cli_command_load(pa_core *c, pa_tokenizer *t, pa_strbuf *buf, bool *fail) {
|
||||
const char *name;
|
||||
pa_error_code_t err;
|
||||
int err;
|
||||
pa_module *m = NULL;
|
||||
|
||||
pa_core_assert_ref(c);
|
||||
|
|
@ -438,7 +438,7 @@ static int pa_cli_command_load(pa_core *c, pa_tokenizer *t, pa_strbuf *buf, bool
|
|||
}
|
||||
|
||||
if ((err = pa_module_load(&m, c, name, pa_tokenizer_get(t, 2))) < 0) {
|
||||
if (err == PA_ERR_EXIST) {
|
||||
if (err == -PA_ERR_EXIST) {
|
||||
pa_strbuf_puts(buf, "Module already loaded; ignoring.\n");
|
||||
} else {
|
||||
pa_strbuf_puts(buf, "Module load failed.\n");
|
||||
|
|
@ -1038,7 +1038,9 @@ static int pa_cli_command_sink_default(pa_core *c, pa_tokenizer *t, pa_strbuf *b
|
|||
return -1;
|
||||
}
|
||||
|
||||
if ((s = pa_namereg_get(c, n, PA_NAMEREG_SINK)))
|
||||
if (pa_streq(n, "@NONE@"))
|
||||
pa_core_set_configured_default_sink(c, NULL);
|
||||
else if ((s = pa_namereg_get(c, n, PA_NAMEREG_SINK)))
|
||||
pa_core_set_configured_default_sink(c, s->name);
|
||||
else
|
||||
pa_strbuf_printf(buf, "Sink %s does not exist.\n", n);
|
||||
|
|
@ -1060,7 +1062,9 @@ static int pa_cli_command_source_default(pa_core *c, pa_tokenizer *t, pa_strbuf
|
|||
return -1;
|
||||
}
|
||||
|
||||
if ((s = pa_namereg_get(c, n, PA_NAMEREG_SOURCE)))
|
||||
if (pa_streq(n, "@NONE@"))
|
||||
pa_core_set_configured_default_source(c, NULL);
|
||||
else if ((s = pa_namereg_get(c, n, PA_NAMEREG_SOURCE)))
|
||||
pa_core_set_configured_default_source(c, s->name);
|
||||
else
|
||||
pa_strbuf_printf(buf, "Source %s does not exist.\n", n);
|
||||
|
|
|
|||
|
|
@ -65,19 +65,7 @@ pa_usec_t pa_rtclock_age(const struct timeval *tv) {
|
|||
|
||||
struct timeval *pa_rtclock_get(struct timeval *tv) {
|
||||
|
||||
#if defined(OS_IS_DARWIN)
|
||||
uint64_t val, abs_time = mach_absolute_time();
|
||||
Nanoseconds nanos;
|
||||
|
||||
nanos = AbsoluteToNanoseconds(*(AbsoluteTime *) &abs_time);
|
||||
val = *(uint64_t *) &nanos;
|
||||
|
||||
tv->tv_sec = val / PA_NSEC_PER_SEC;
|
||||
tv->tv_usec = (val % PA_NSEC_PER_SEC) / PA_NSEC_PER_USEC;
|
||||
|
||||
return tv;
|
||||
|
||||
#elif defined(HAVE_CLOCK_GETTIME)
|
||||
#if defined(HAVE_CLOCK_GETTIME)
|
||||
struct timespec ts;
|
||||
|
||||
#ifdef CLOCK_MONOTONIC
|
||||
|
|
@ -97,6 +85,17 @@ struct timeval *pa_rtclock_get(struct timeval *tv) {
|
|||
tv->tv_sec = ts.tv_sec;
|
||||
tv->tv_usec = ts.tv_nsec / PA_NSEC_PER_USEC;
|
||||
|
||||
return tv;
|
||||
#elif defined(OS_IS_DARWIN)
|
||||
uint64_t val, abs_time = mach_absolute_time();
|
||||
Nanoseconds nanos;
|
||||
|
||||
nanos = AbsoluteToNanoseconds(*(AbsoluteTime *) &abs_time);
|
||||
val = *(uint64_t *) &nanos;
|
||||
|
||||
tv->tv_sec = val / PA_NSEC_PER_SEC;
|
||||
tv->tv_usec = (val % PA_NSEC_PER_SEC) / PA_NSEC_PER_USEC;
|
||||
|
||||
return tv;
|
||||
#elif defined(OS_IS_WIN32)
|
||||
if (counter_freq > 0) {
|
||||
|
|
|
|||
|
|
@ -407,6 +407,7 @@ finish:
|
|||
* by the caller. */
|
||||
ssize_t pa_read(int fd, void *buf, size_t count, int *type) {
|
||||
|
||||
errno = 0;
|
||||
#ifdef OS_IS_WIN32
|
||||
|
||||
if (!type || *type == 0) {
|
||||
|
|
|
|||
|
|
@ -40,6 +40,10 @@
|
|||
#include <pulsecore/log.h>
|
||||
#include <pulsecore/macro.h>
|
||||
#include <pulsecore/strbuf.h>
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
#include <pulsecore/namereg.h>
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
|
||||
#include "core.h"
|
||||
|
||||
|
|
@ -86,9 +90,15 @@ static char *message_handler_list(pa_core *c) {
|
|||
}
|
||||
|
||||
static int core_message_handler(const char *object_path, const char *message, const pa_json_object *parameters, char **response, void *userdata) {
|
||||
<<<<<<< HEAD
|
||||
pa_core *c;
|
||||
|
||||
pa_assert(c = (pa_core *) userdata);
|
||||
=======
|
||||
pa_core *c = userdata;
|
||||
|
||||
pa_assert(c);
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
pa_assert(message);
|
||||
pa_assert(response);
|
||||
pa_assert(pa_safe_streq(object_path, "/core"));
|
||||
|
|
@ -149,6 +159,10 @@ pa_core* pa_core_new(pa_mainloop_api *m, bool shared, bool enable_memfd, size_t
|
|||
|
||||
c->default_source = NULL;
|
||||
c->default_sink = NULL;
|
||||
c->configured_default_source = NULL;
|
||||
c->configured_default_sink = NULL;
|
||||
c->policy_default_source = NULL;
|
||||
c->policy_default_sink = NULL;
|
||||
|
||||
c->default_sample_spec.format = PA_SAMPLE_S16NE;
|
||||
c->default_sample_spec.rate = 44100;
|
||||
|
|
@ -261,6 +275,8 @@ static void core_free(pa_object *o) {
|
|||
pa_assert(!c->default_sink);
|
||||
pa_xfree(c->configured_default_source);
|
||||
pa_xfree(c->configured_default_sink);
|
||||
pa_xfree(c->policy_default_source);
|
||||
pa_xfree(c->policy_default_sink);
|
||||
|
||||
pa_silence_cache_done(&c->silence_cache);
|
||||
pa_mempool_unref(c->mempool);
|
||||
|
|
@ -271,6 +287,36 @@ static void core_free(pa_object *o) {
|
|||
pa_xfree(c);
|
||||
}
|
||||
|
||||
static bool is_sink_available(pa_core *core, const char *sink_name) {
|
||||
pa_sink *sink;
|
||||
|
||||
if (!(sink = pa_namereg_get(core, sink_name, PA_NAMEREG_SINK)))
|
||||
return false;
|
||||
|
||||
if (!PA_SINK_IS_LINKED(sink->state))
|
||||
return false;
|
||||
|
||||
if (sink->active_port && sink->active_port->available == PA_AVAILABLE_NO)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool is_source_available(pa_core *core, const char *source_name) {
|
||||
pa_source *source;
|
||||
|
||||
if (!(source = pa_namereg_get(core, source_name, PA_NAMEREG_SOURCE)))
|
||||
return false;
|
||||
|
||||
if (!PA_SOURCE_IS_LINKED(source->state))
|
||||
return false;
|
||||
|
||||
if (source->active_port && source->active_port->available == PA_AVAILABLE_NO)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void pa_core_set_configured_default_sink(pa_core *core, const char *sink) {
|
||||
char *old_sink;
|
||||
|
||||
|
|
@ -278,13 +324,21 @@ void pa_core_set_configured_default_sink(pa_core *core, const char *sink) {
|
|||
|
||||
old_sink = pa_xstrdup(core->configured_default_sink);
|
||||
|
||||
if (pa_safe_streq(sink, old_sink))
|
||||
/* The default sink was overwritten by the policy default sink, but the user is
|
||||
* now setting a new default manually. Clear the policy default sink. */
|
||||
if (core->policy_default_sink && is_sink_available(core, core->policy_default_sink)) {
|
||||
pa_xfree(core->policy_default_sink);
|
||||
core->policy_default_sink = NULL;
|
||||
|
||||
} else if (pa_safe_streq(sink, old_sink))
|
||||
goto finish;
|
||||
|
||||
pa_xfree(core->configured_default_sink);
|
||||
core->configured_default_sink = pa_xstrdup(sink);
|
||||
pa_log_info("configured_default_sink: %s -> %s",
|
||||
old_sink ? old_sink : "(unset)", sink ? sink : "(unset)");
|
||||
if (!pa_safe_streq(sink, old_sink)) {
|
||||
pa_log_info("configured_default_sink: %s -> %s",
|
||||
old_sink ? old_sink : "(unset)", sink ? sink : "(unset)");
|
||||
}
|
||||
pa_subscription_post(core, PA_SUBSCRIPTION_EVENT_SERVER | PA_SUBSCRIPTION_EVENT_CHANGE, PA_INVALID_INDEX);
|
||||
|
||||
pa_core_update_default_sink(core);
|
||||
|
|
@ -300,12 +354,64 @@ void pa_core_set_configured_default_source(pa_core *core, const char *source) {
|
|||
|
||||
old_source = pa_xstrdup(core->configured_default_source);
|
||||
|
||||
if (pa_safe_streq(source, old_source))
|
||||
/* The default source was overwritten by the policy default source, but the user is
|
||||
* now setting a new default manually. Clear the policy default source. */
|
||||
if (core->policy_default_source && is_source_available(core, core->policy_default_source)) {
|
||||
pa_xfree(core->policy_default_source);
|
||||
core->policy_default_source = NULL;
|
||||
|
||||
} else if (pa_safe_streq(source, old_source))
|
||||
goto finish;
|
||||
|
||||
pa_xfree(core->configured_default_source);
|
||||
core->configured_default_source = pa_xstrdup(source);
|
||||
pa_log_info("configured_default_source: %s -> %s",
|
||||
if (!pa_safe_streq(source, old_source)) {
|
||||
pa_log_info("configured_default_source: %s -> %s",
|
||||
old_source ? old_source : "(unset)", source ? source : "(unset)");
|
||||
}
|
||||
pa_subscription_post(core, PA_SUBSCRIPTION_EVENT_SERVER | PA_SUBSCRIPTION_EVENT_CHANGE, PA_INVALID_INDEX);
|
||||
|
||||
pa_core_update_default_source(core);
|
||||
|
||||
finish:
|
||||
pa_xfree(old_source);
|
||||
}
|
||||
|
||||
void pa_core_set_policy_default_sink(pa_core *core, const char *sink) {
|
||||
char *old_sink;
|
||||
|
||||
pa_assert(core);
|
||||
|
||||
old_sink = pa_xstrdup(core->policy_default_sink);
|
||||
|
||||
if (pa_safe_streq(sink, old_sink))
|
||||
goto finish;
|
||||
|
||||
pa_xfree(core->policy_default_sink);
|
||||
core->policy_default_sink = pa_xstrdup(sink);
|
||||
pa_log_info("policy_default_sink: %s -> %s",
|
||||
old_sink ? old_sink : "(unset)", sink ? sink : "(unset)");
|
||||
pa_subscription_post(core, PA_SUBSCRIPTION_EVENT_SERVER | PA_SUBSCRIPTION_EVENT_CHANGE, PA_INVALID_INDEX);
|
||||
|
||||
pa_core_update_default_sink(core);
|
||||
|
||||
finish:
|
||||
pa_xfree(old_sink);
|
||||
}
|
||||
|
||||
void pa_core_set_policy_default_source(pa_core *core, const char *source) {
|
||||
char *old_source;
|
||||
|
||||
pa_assert(core);
|
||||
|
||||
old_source = pa_xstrdup(core->policy_default_source);
|
||||
|
||||
if (pa_safe_streq(source, old_source))
|
||||
goto finish;
|
||||
|
||||
pa_xfree(core->policy_default_source);
|
||||
core->policy_default_source = pa_xstrdup(source);
|
||||
pa_log_info("policy_default_source: %s -> %s",
|
||||
old_source ? old_source : "(unset)", source ? source : "(unset)");
|
||||
pa_subscription_post(core, PA_SUBSCRIPTION_EVENT_SERVER | PA_SUBSCRIPTION_EVENT_CHANGE, PA_INVALID_INDEX);
|
||||
|
||||
|
|
@ -331,7 +437,14 @@ static int compare_sinks(pa_sink *a, pa_sink *b) {
|
|||
&& (!a->active_port || a->active_port->available != PA_AVAILABLE_NO))
|
||||
return 1;
|
||||
|
||||
/* The configured default sink is preferred over any other sink. */
|
||||
/* The policy default sink is preferred over any other sink. */
|
||||
if (pa_safe_streq(b->name, core->policy_default_sink))
|
||||
return -1;
|
||||
if (pa_safe_streq(a->name, core->policy_default_sink))
|
||||
return 1;
|
||||
|
||||
/* The configured default sink is preferred over any other sink
|
||||
* except the policy default sink. */
|
||||
if (pa_safe_streq(b->name, core->configured_default_sink))
|
||||
return -1;
|
||||
if (pa_safe_streq(a->name, core->configured_default_sink))
|
||||
|
|
@ -412,7 +525,14 @@ static int compare_sources(pa_source *a, pa_source *b) {
|
|||
&& (!a->active_port || a->active_port->available != PA_AVAILABLE_NO))
|
||||
return 1;
|
||||
|
||||
/* The configured default source is preferred over any other source. */
|
||||
/* The policy default source is preferred over any other source. */
|
||||
if (pa_safe_streq(b->name, core->policy_default_source))
|
||||
return -1;
|
||||
if (pa_safe_streq(a->name, core->policy_default_source))
|
||||
return 1;
|
||||
|
||||
/* The configured default source is preferred over any other source
|
||||
* except the policy default source. */
|
||||
if (pa_safe_streq(b->name, core->configured_default_source))
|
||||
return -1;
|
||||
if (pa_safe_streq(a->name, core->configured_default_source))
|
||||
|
|
|
|||
|
|
@ -176,6 +176,13 @@ struct pa_core {
|
|||
char *configured_default_sink;
|
||||
char *configured_default_source;
|
||||
|
||||
/* The default sink/source set by some policy module. This will override
|
||||
* the user configured default sink/source, so that the default will
|
||||
* return to the user configured sink/source once the sink/source set by
|
||||
* the policy module is no longer available. */
|
||||
char *policy_default_sink;
|
||||
char *policy_default_source;
|
||||
|
||||
/* The effective default sink/source. If no sink or source is explicitly
|
||||
* configured as the default, we pick the device that ranks highest
|
||||
* according to the compare_sinks() and compare_sources() functions in
|
||||
|
|
@ -249,6 +256,8 @@ pa_core* pa_core_new(pa_mainloop_api *m, bool shared, bool enable_memfd, size_t
|
|||
|
||||
void pa_core_set_configured_default_sink(pa_core *core, const char *sink);
|
||||
void pa_core_set_configured_default_source(pa_core *core, const char *source);
|
||||
void pa_core_set_policy_default_sink(pa_core *core, const char *sink);
|
||||
void pa_core_set_policy_default_source(pa_core *core, const char *source);
|
||||
|
||||
/* These should be called whenever something changes that may affect the
|
||||
* default sink or source choice.
|
||||
|
|
|
|||
|
|
@ -34,7 +34,11 @@
|
|||
typedef struct pa_creds pa_creds;
|
||||
typedef struct pa_cmsg_ancil_data pa_cmsg_ancil_data;
|
||||
|
||||
<<<<<<< HEAD
|
||||
#if defined(SCM_CREDENTIALS) || defined(SCM_CREDS)
|
||||
=======
|
||||
#if (defined(SCM_CREDENTIALS) || defined(SCM_CREDS)) && !defined(OS_IS_DARWIN)
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
|
||||
#define HAVE_CREDS 1
|
||||
|
||||
|
|
|
|||
|
|
@ -737,7 +737,7 @@ pa_proplist *pa_dbus_get_proplist_arg(DBusConnection *c, DBusMessage *msg, DBusM
|
|||
pa_assert(msg);
|
||||
pa_assert(iter);
|
||||
|
||||
pa_assert(signature = dbus_message_iter_get_signature(iter));
|
||||
pa_assert_se(signature = dbus_message_iter_get_signature(iter));
|
||||
pa_assert_se(pa_streq(signature, "a{say}"));
|
||||
|
||||
dbus_free(signature);
|
||||
|
|
|
|||
|
|
@ -258,6 +258,20 @@ void* pa_idxset_get_by_data(pa_idxset*s, const void *p, uint32_t *idx) {
|
|||
return e->data;
|
||||
}
|
||||
|
||||
bool pa_idxset_contains(pa_idxset *s, const void *p) {
|
||||
unsigned hash;
|
||||
struct idxset_entry *e;
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
hash = s->hash_func(p) % NBUCKETS;
|
||||
|
||||
if (!(e = data_scan(s, hash, p)))
|
||||
return false;
|
||||
|
||||
return e->data == p;
|
||||
}
|
||||
|
||||
void* pa_idxset_remove_by_index(pa_idxset*s, uint32_t idx) {
|
||||
struct idxset_entry *e;
|
||||
unsigned hash;
|
||||
|
|
@ -367,6 +381,39 @@ at_end:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void *pa_idxset_reverse_iterate(pa_idxset *s, void **state, uint32_t *idx) {
|
||||
struct idxset_entry *e;
|
||||
|
||||
pa_assert(s);
|
||||
pa_assert(state);
|
||||
|
||||
if (*state == (void*) -1)
|
||||
goto at_end;
|
||||
|
||||
if ((!*state && !s->iterate_list_tail))
|
||||
goto at_end;
|
||||
|
||||
e = *state ? *state : s->iterate_list_tail;
|
||||
|
||||
if (e->iterate_previous)
|
||||
*state = e->iterate_previous;
|
||||
else
|
||||
*state = (void*) -1;
|
||||
|
||||
if (idx)
|
||||
*idx = e->idx;
|
||||
|
||||
return e->data;
|
||||
|
||||
at_end:
|
||||
*state = (void *) -1;
|
||||
|
||||
if (idx)
|
||||
*idx = PA_IDXSET_INVALID;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void* pa_idxset_steal_first(pa_idxset *s, uint32_t *idx) {
|
||||
void *data;
|
||||
|
||||
|
|
@ -385,6 +432,24 @@ void* pa_idxset_steal_first(pa_idxset *s, uint32_t *idx) {
|
|||
return data;
|
||||
}
|
||||
|
||||
void* pa_idxset_steal_last(pa_idxset *s, uint32_t *idx) {
|
||||
void *data;
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
if (!s->iterate_list_tail)
|
||||
return NULL;
|
||||
|
||||
data = s->iterate_list_tail->data;
|
||||
|
||||
if (idx)
|
||||
*idx = s->iterate_list_tail->idx;
|
||||
|
||||
remove_entry(s, s->iterate_list_tail);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
void* pa_idxset_first(pa_idxset *s, uint32_t *idx) {
|
||||
pa_assert(s);
|
||||
|
||||
|
|
@ -400,6 +465,21 @@ void* pa_idxset_first(pa_idxset *s, uint32_t *idx) {
|
|||
return s->iterate_list_head->data;
|
||||
}
|
||||
|
||||
void* pa_idxset_last(pa_idxset *s, uint32_t *idx) {
|
||||
pa_assert(s);
|
||||
|
||||
if (!s->iterate_list_tail) {
|
||||
if (idx)
|
||||
*idx = PA_IDXSET_INVALID;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (idx)
|
||||
*idx = s->iterate_list_tail->idx;
|
||||
|
||||
return s->iterate_list_tail->data;
|
||||
}
|
||||
|
||||
void *pa_idxset_next(pa_idxset *s, uint32_t *idx) {
|
||||
struct idxset_entry *e;
|
||||
unsigned hash;
|
||||
|
|
@ -444,6 +524,50 @@ void *pa_idxset_next(pa_idxset *s, uint32_t *idx) {
|
|||
}
|
||||
}
|
||||
|
||||
void *pa_idxset_previous(pa_idxset *s, uint32_t *idx) {
|
||||
struct idxset_entry *e;
|
||||
unsigned hash;
|
||||
|
||||
pa_assert(s);
|
||||
pa_assert(idx);
|
||||
|
||||
if (*idx == PA_IDXSET_INVALID)
|
||||
return NULL;
|
||||
|
||||
hash = *idx % NBUCKETS;
|
||||
|
||||
if ((e = index_scan(s, hash, *idx))) {
|
||||
|
||||
e = e->iterate_previous;
|
||||
|
||||
if (e) {
|
||||
*idx = e->idx;
|
||||
return e->data;
|
||||
} else {
|
||||
*idx = PA_IDXSET_INVALID;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
/* If the entry passed doesn't exist anymore we try to find
|
||||
* the preceding one. */
|
||||
|
||||
for ((*idx)--; *idx < s->current_index; (*idx)--) {
|
||||
|
||||
hash = *idx % NBUCKETS;
|
||||
|
||||
if ((e = index_scan(s, hash, *idx))) {
|
||||
*idx = e->idx;
|
||||
return e->data;
|
||||
}
|
||||
}
|
||||
|
||||
*idx = PA_IDXSET_INVALID;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned pa_idxset_size(pa_idxset*s) {
|
||||
pa_assert(s);
|
||||
|
||||
|
|
@ -456,6 +580,40 @@ bool pa_idxset_isempty(pa_idxset *s) {
|
|||
return s->n_entries == 0;
|
||||
}
|
||||
|
||||
bool pa_idxset_isdisjoint(pa_idxset *s, pa_idxset *t) {
|
||||
struct idxset_entry *i;
|
||||
|
||||
pa_assert(s);
|
||||
pa_assert(t);
|
||||
|
||||
for (i = s->iterate_list_head; i; i = i->iterate_next)
|
||||
if (pa_idxset_contains(t, i->data))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool pa_idxset_issubset(pa_idxset *s, pa_idxset *t) {
|
||||
struct idxset_entry *i;
|
||||
|
||||
pa_assert(s);
|
||||
pa_assert(t);
|
||||
|
||||
for (i = s->iterate_list_head; i; i = i->iterate_next)
|
||||
if (!pa_idxset_contains(t, i->data))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool pa_idxset_issuperset(pa_idxset *s, pa_idxset *t) {
|
||||
return pa_idxset_issubset(t, s);
|
||||
}
|
||||
|
||||
bool pa_idxset_equals(pa_idxset *s, pa_idxset *t) {
|
||||
return pa_idxset_issubset(s, t) && pa_idxset_issuperset(s, t);
|
||||
}
|
||||
|
||||
pa_idxset *pa_idxset_copy(pa_idxset *s, pa_copy_func_t copy_func) {
|
||||
pa_idxset *copy;
|
||||
struct idxset_entry *i;
|
||||
|
|
|
|||
|
|
@ -66,6 +66,9 @@ void* pa_idxset_get_by_index(pa_idxset*s, uint32_t idx);
|
|||
/* Get the entry by its data. The index is returned in *idx */
|
||||
void* pa_idxset_get_by_data(pa_idxset*s, const void *p, uint32_t *idx);
|
||||
|
||||
/* Return true if item is in idxset */
|
||||
bool pa_idxset_contains(pa_idxset *s, const void *p);
|
||||
|
||||
/* Similar to pa_idxset_get_by_index(), but removes the entry from the idxset. */
|
||||
void* pa_idxset_remove_by_index(pa_idxset*s, uint32_t idx);
|
||||
|
||||
|
|
@ -85,18 +88,25 @@ void* pa_idxset_rrobin(pa_idxset *s, uint32_t *idx);
|
|||
|
||||
/* Iterate through the idxset. At first iteration state should be NULL */
|
||||
void *pa_idxset_iterate(pa_idxset *s, void **state, uint32_t *idx);
|
||||
void *pa_idxset_reverse_iterate(pa_idxset *s, void **state, uint32_t *idx);
|
||||
|
||||
/* Return the oldest entry in the idxset and remove it. If idx is not NULL fill in its index in *idx */
|
||||
/* Return the oldest or newest entry in the idxset and remove it.
|
||||
* If idx is not NULL fill in its index in *idx */
|
||||
void* pa_idxset_steal_first(pa_idxset *s, uint32_t *idx);
|
||||
void* pa_idxset_steal_last(pa_idxset *s, uint32_t *idx);
|
||||
|
||||
/* Return the oldest entry in the idxset. Fill in its index in *idx. */
|
||||
/* Return the oldest or newest entry in the idxset.
|
||||
* Fill in its index in *idx. */
|
||||
void* pa_idxset_first(pa_idxset *s, uint32_t *idx);
|
||||
void* pa_idxset_last(pa_idxset *s, uint32_t *idx);
|
||||
|
||||
/* Return the entry following the entry indexed by *idx. After the
|
||||
* call *index contains the index of the returned
|
||||
* object. pa_idxset_first() and pa_idxset_next() may be used to
|
||||
* iterate through the set.*/
|
||||
/* Return the entry following or preceding the entry indexed by *idx.
|
||||
* After the call *index contains the index of the returned object.
|
||||
* pa_idxset_first() and pa_idxset_next() may be used to iterate through
|
||||
* the set. pa_idxset_last() and pa_idxset_previous() may be used to
|
||||
* iterate through the set in reverse. */
|
||||
void *pa_idxset_next(pa_idxset *s, uint32_t *idx);
|
||||
void *pa_idxset_previous(pa_idxset *s, uint32_t *idx);
|
||||
|
||||
/* Return the current number of entries in the idxset */
|
||||
unsigned pa_idxset_size(pa_idxset*s);
|
||||
|
|
@ -104,6 +114,18 @@ unsigned pa_idxset_size(pa_idxset*s);
|
|||
/* Return true of the idxset is empty */
|
||||
bool pa_idxset_isempty(pa_idxset *s);
|
||||
|
||||
/* Return true if s and t have no entries in common */
|
||||
bool pa_idxset_isdisjoint(pa_idxset *s, pa_idxset *t);
|
||||
|
||||
/* Return true if all entries in s are also in t */
|
||||
bool pa_idxset_issubset(pa_idxset *s, pa_idxset *t);
|
||||
|
||||
/* Return true if all entries in t are also in s */
|
||||
bool pa_idxset_issuperset(pa_idxset *s, pa_idxset *t);
|
||||
|
||||
/* Return true if s and t have all entries in common */
|
||||
bool pa_idxset_equals(pa_idxset *s, pa_idxset *t);
|
||||
|
||||
/* Duplicate the idxset. This will not copy the actual indexes. If copy_func is
|
||||
* set, each entry is copied using the provided function, otherwise a shallow
|
||||
* copy will be made. */
|
||||
|
|
|
|||
|
|
@ -261,7 +261,11 @@ ssize_t pa_iochannel_read(pa_iochannel*io, void*data, size_t l) {
|
|||
|
||||
#ifdef HAVE_CREDS
|
||||
|
||||
<<<<<<< HEAD
|
||||
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
||||
=======
|
||||
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__GNU__)
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
typedef struct cmsgcred pa_ucred_t;
|
||||
#define SCM_CREDENTIALS SCM_CREDS
|
||||
#else
|
||||
|
|
@ -291,14 +295,22 @@ bool pa_iochannel_creds_supported(pa_iochannel *io) {
|
|||
}
|
||||
|
||||
int pa_iochannel_creds_enable(pa_iochannel *io) {
|
||||
<<<<<<< HEAD
|
||||
#if !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
|
||||
=======
|
||||
#if !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) && !defined(__GNU__)
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
int t = 1;
|
||||
#endif
|
||||
|
||||
pa_assert(io);
|
||||
pa_assert(io->ifd >= 0);
|
||||
|
||||
<<<<<<< HEAD
|
||||
#if !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
|
||||
=======
|
||||
#if !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) && !defined(__GNU__)
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
if (setsockopt(io->ifd, SOL_SOCKET, SO_PASSCRED, &t, sizeof(t)) < 0) {
|
||||
pa_log_error("setsockopt(SOL_SOCKET, SO_PASSCRED): %s", pa_cstrerror(errno));
|
||||
return -1;
|
||||
|
|
@ -334,7 +346,11 @@ ssize_t pa_iochannel_write_with_creds(pa_iochannel*io, const void*data, size_t l
|
|||
|
||||
u = (pa_ucred_t*) CMSG_DATA(&cmsg.hdr);
|
||||
|
||||
<<<<<<< HEAD
|
||||
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
||||
=======
|
||||
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__GNU__)
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
// the kernel fills everything
|
||||
#else
|
||||
u->pid = getpid();
|
||||
|
|
@ -457,7 +473,11 @@ ssize_t pa_iochannel_read_with_ancil_data(pa_iochannel*io, void*data, size_t l,
|
|||
pa_ucred_t u;
|
||||
pa_assert(cmh->cmsg_len == CMSG_LEN(sizeof(pa_ucred_t)));
|
||||
memcpy(&u, CMSG_DATA(cmh), sizeof(pa_ucred_t));
|
||||
<<<<<<< HEAD
|
||||
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
||||
=======
|
||||
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__GNU__)
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
ancil_data->creds.gid = u.cmcred_gid;
|
||||
ancil_data->creds.uid = u.cmcred_uid;
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -286,7 +286,6 @@ int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
|
|||
pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
|
||||
|
||||
pa_assert(uchunk->length % bq->base == 0);
|
||||
pa_assert(uchunk->index % bq->base == 0);
|
||||
|
||||
if (!can_push(bq, uchunk->length))
|
||||
return -1;
|
||||
|
|
@ -354,6 +353,7 @@ int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
|
|||
|
||||
/* Drop it from the new entry */
|
||||
p->index = q->index + (int64_t) d;
|
||||
p->chunk.index = d;
|
||||
p->chunk.length -= d;
|
||||
|
||||
/* Add it to the list */
|
||||
|
|
|
|||
|
|
@ -66,4 +66,8 @@ static inline int memfd_create(const char *name, unsigned int flags) {
|
|||
|
||||
#endif /* HAVE_MEMFD && !HAVE_MEMFD_CREATE */
|
||||
|
||||
#ifndef MFD_NOEXEC_SEAL
|
||||
#define MFD_NOEXEC_SEAL 0x0008U
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ libpulsecore_sources = [
|
|||
'sink.c',
|
||||
'sink-input.c',
|
||||
'sioman.c',
|
||||
'socket-server.c',
|
||||
'sound-file-stream.c',
|
||||
'sound-file.c',
|
||||
'source.c',
|
||||
|
|
@ -101,6 +102,7 @@ libpulsecore_headers = [
|
|||
'sink-input.h',
|
||||
'sink.h',
|
||||
'sioman.h',
|
||||
'socket-server.h',
|
||||
'sound-file-stream.h',
|
||||
'sound-file.h',
|
||||
'source-output.h',
|
||||
|
|
@ -222,7 +224,11 @@ libpulsecore = shared_library('pulsecore-' + pa_version_major_minor,
|
|||
install_rpath : privlibdir,
|
||||
install_dir : privlibdir,
|
||||
link_with : libpulsecore_simd_lib,
|
||||
<<<<<<< HEAD
|
||||
dependencies : [libm_dep, libpulsecommon_dep, ltdl_dep, shm_dep, sndfile_dep, database_dep, dbus_dep, libatomic_ops_dep, orc_dep, samplerate_dep, soxr_dep, speex_dep, x11_dep, libintl_dep, platform_dep, platform_socket_dep,],
|
||||
=======
|
||||
dependencies : [libm_dep, libpulsecommon_dep, ltdl_dep, shm_dep, sndfile_dep, database_dep, dbus_dep, libatomic_ops_dep, orc_dep, samplerate_dep, soxr_dep, speex_dep, x11_dep, libsystemd_dep, libintl_dep, platform_dep, tcpwrap_dep, platform_socket_dep,],
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
implicit_include_directories : false)
|
||||
|
||||
libpulsecore_dep = declare_dependency(link_with: libpulsecore)
|
||||
|
|
@ -249,7 +255,7 @@ libcli = shared_library('cli',
|
|||
c_args : [pa_c_args, server_c_args, database_c_args],
|
||||
link_args : [nodelete_link_args],
|
||||
include_directories : [configinc, topinc],
|
||||
dependencies : [libpulse_dep, libpulsecommon_dep, libpulsecore_dep],
|
||||
dependencies : [libpulse_dep, libpulsecommon_dep, libpulsecore_dep, libatomic_ops_dep],
|
||||
install : true,
|
||||
install_rpath : privlibdir,
|
||||
install_dir : modlibexecdir,
|
||||
|
|
@ -266,7 +272,7 @@ libprotocol_cli = shared_library('protocol-cli',
|
|||
c_args : [pa_c_args, server_c_args, database_c_args],
|
||||
link_args : [nodelete_link_args],
|
||||
include_directories : [configinc, topinc],
|
||||
dependencies : [libpulse_dep, libpulsecommon_dep, libpulsecore_dep, libcli_dep],
|
||||
dependencies : [libpulse_dep, libpulsecommon_dep, libpulsecore_dep, libcli_dep, libatomic_ops_dep],
|
||||
install : true,
|
||||
install_rpath : rpath_dirs,
|
||||
install_dir : modlibexecdir,
|
||||
|
|
@ -278,7 +284,7 @@ libprotocol_http = shared_library('protocol-http',
|
|||
c_args : [pa_c_args, server_c_args, database_c_args],
|
||||
link_args : [nodelete_link_args],
|
||||
include_directories : [configinc, topinc],
|
||||
dependencies : [libpulse_dep, libpulsecommon_dep, libpulsecore_dep],
|
||||
dependencies : [libpulse_dep, libpulsecommon_dep, libpulsecore_dep, libatomic_ops_dep],
|
||||
install : true,
|
||||
install_rpath : privlibdir,
|
||||
install_dir : modlibexecdir,
|
||||
|
|
@ -290,7 +296,7 @@ libprotocol_native = shared_library('protocol-native',
|
|||
c_args : [pa_c_args, server_c_args, database_c_args],
|
||||
link_args : [nodelete_link_args],
|
||||
include_directories : [configinc, topinc],
|
||||
dependencies : [libpulse_dep, libpulsecommon_dep, libpulsecore_dep, dbus_dep],
|
||||
dependencies : [libpulse_dep, libpulsecommon_dep, libpulsecore_dep, dbus_dep, libatomic_ops_dep],
|
||||
install : true,
|
||||
install_rpath : privlibdir,
|
||||
install_dir : modlibexecdir,
|
||||
|
|
@ -302,7 +308,7 @@ libprotocol_simple = shared_library('protocol-simple',
|
|||
c_args : [pa_c_args, server_c_args, database_c_args],
|
||||
link_args : [nodelete_link_args],
|
||||
include_directories : [configinc, topinc],
|
||||
dependencies : [libpulse_dep, libpulsecommon_dep, libpulsecore_dep],
|
||||
dependencies : [libpulse_dep, libpulsecommon_dep, libpulsecore_dep, libatomic_ops_dep],
|
||||
install : true,
|
||||
install_rpath : privlibdir,
|
||||
install_dir : modlibexecdir,
|
||||
|
|
|
|||
|
|
@ -131,8 +131,31 @@ int pa_message_handler_send_message(pa_core *c, const char *object_path, const c
|
|||
if (message_parameters) {
|
||||
parameters = pa_json_parse(message_parameters);
|
||||
|
||||
<<<<<<< HEAD
|
||||
if (!parameters)
|
||||
return -PA_ERR_INVALID;
|
||||
=======
|
||||
if (!parameters) {
|
||||
char *wrapped_message_parameters;
|
||||
|
||||
/* Message parameters is not a valid JSON
|
||||
*
|
||||
* Wrap message parameters into JSON string and try again.
|
||||
* User might have missed double-quotes and passed ARGSTRING instead of proper JSON "ARGSTRING"
|
||||
*/
|
||||
pa_log_warn("Message parameters is not a valid JSON, wrapping into JSON string '\"%s\"'", message_parameters);
|
||||
|
||||
wrapped_message_parameters = pa_sprintf_malloc("\"%s\"", message_parameters);
|
||||
parameters = pa_json_parse(wrapped_message_parameters);
|
||||
pa_xfree(wrapped_message_parameters);
|
||||
|
||||
if (!parameters) {
|
||||
pa_log_error("Message parameters is not a valid JSON object. Tried both '%s' and '\"%s\"'",
|
||||
message_parameters, message_parameters);
|
||||
return -PA_ERR_INVALID;
|
||||
}
|
||||
}
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
}
|
||||
|
||||
/* The handler is expected to return an error code and may also
|
||||
|
|
|
|||
|
|
@ -64,7 +64,8 @@ int pa_common_command_register_memfd_shmid(pa_pstream *p, pa_pdispatch *pd, uint
|
|||
if (version < 31 || pa_tagstruct_getu32(t, &shm_id) < 0 || !pa_tagstruct_eof(t))
|
||||
goto finish;
|
||||
|
||||
pa_pstream_attach_memfd_shmid(p, shm_id, ancil->fds[0]);
|
||||
if (pa_pstream_attach_memfd_shmid(p, shm_id, ancil->fds[0]))
|
||||
goto finish;
|
||||
|
||||
ret = 0;
|
||||
finish:
|
||||
|
|
|
|||
|
|
@ -109,8 +109,8 @@ static HANDLE
|
|||
HandleFromFd (int fd)
|
||||
{
|
||||
/* since socket() returns a HANDLE already, try that first */
|
||||
if (IsSocketHandle((HANDLE) fd))
|
||||
return ((HANDLE) fd);
|
||||
if (IsSocketHandle(PA_INT_TO_PTR(fd)))
|
||||
return PA_INT_TO_PTR(fd);
|
||||
|
||||
return ((HANDLE) _get_osfhandle(fd));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,10 +51,16 @@ static const gchar* _g_get_application_name(void) PA_GCC_WEAKREF(g_get_applicati
|
|||
#if defined(HAVE_GTK) && defined(PA_GCC_WEAKREF)
|
||||
#pragma GCC diagnostic ignored "-Wstrict-prototypes"
|
||||
#include <gtk/gtk.h>
|
||||
#include <gdk/gdkx.h>
|
||||
static const gchar* _gtk_window_get_default_icon_name(void) PA_GCC_WEAKREF(gtk_window_get_default_icon_name);
|
||||
#ifdef GDK_WINDOWING_X11
|
||||
#include <gdk/gdkx.h>
|
||||
<<<<<<< HEAD
|
||||
static const gchar* _gtk_window_get_default_icon_name(void) PA_GCC_WEAKREF(gtk_window_get_default_icon_name);
|
||||
=======
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
static Display *_gdk_display PA_GCC_WEAKREF(gdk_display);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include "proplist-util.h"
|
||||
|
||||
|
|
@ -89,6 +95,7 @@ static void add_gtk_properties(pa_proplist *p) {
|
|||
pa_proplist_sets(p, PA_PROP_APPLICATION_ICON_NAME, t);
|
||||
}
|
||||
|
||||
#ifdef GDK_WINDOWING_X11
|
||||
if (!pa_proplist_contains(p, PA_PROP_WINDOW_X11_DISPLAY))
|
||||
if (&_gdk_display && _gdk_display) {
|
||||
const char *t;
|
||||
|
|
@ -99,6 +106,7 @@ static void add_gtk_properties(pa_proplist *p) {
|
|||
pa_proplist_sets(p, PA_PROP_WINDOW_X11_DISPLAY, t);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1260,7 +1260,7 @@ static void native_connection_send_memblock(pa_native_connection *c) {
|
|||
if (schunk.length > r->buffer_attr.fragsize)
|
||||
schunk.length = r->buffer_attr.fragsize;
|
||||
|
||||
pa_pstream_send_memblock(c->pstream, r->index, 0, PA_SEEK_RELATIVE, &schunk);
|
||||
pa_pstream_send_memblock(c->pstream, r->index, 0, PA_SEEK_RELATIVE, &schunk, pa_memblockq_get_base(r->memblockq));
|
||||
|
||||
pa_memblockq_drop(r->memblockq, schunk.length);
|
||||
pa_memblock_unref(schunk.memblock);
|
||||
|
|
@ -1422,6 +1422,8 @@ static int sink_input_process_msg(pa_msgobject *o, int code, void *userdata, int
|
|||
s->write_index = pa_memblockq_get_write_index(s->memblockq);
|
||||
s->render_memblockq_length = pa_memblockq_get_length(s->sink_input->thread_info.render_memblockq);
|
||||
s->current_sink_latency = pa_sink_get_latency_within_thread(s->sink_input->sink, false);
|
||||
/* Add resampler latency */
|
||||
s->current_sink_latency += pa_resampler_get_delay_usec(i->thread_info.resampler);
|
||||
s->underrun_for = s->sink_input->thread_info.underrun_for;
|
||||
s->playing_for = s->sink_input->thread_info.playing_for;
|
||||
|
||||
|
|
@ -1700,6 +1702,8 @@ static int source_output_process_msg(pa_msgobject *_o, int code, void *userdata,
|
|||
/* Atomically get a snapshot of all timing parameters... */
|
||||
s->current_monitor_latency = o->source->monitor_of ? pa_sink_get_latency_within_thread(o->source->monitor_of, false) : 0;
|
||||
s->current_source_latency = pa_source_get_latency_within_thread(o->source, false);
|
||||
/* Add resampler latency */
|
||||
s->current_source_latency += pa_resampler_get_delay_usec(o->thread_info.resampler);
|
||||
s->on_the_fly_snapshot = pa_atomic_load(&s->on_the_fly);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -2531,7 +2535,7 @@ static void setup_srbchannel(pa_native_connection *c, pa_mem_type_t shm_type) {
|
|||
mc.memblock = srbt.memblock;
|
||||
mc.index = 0;
|
||||
mc.length = pa_memblock_get_length(srbt.memblock);
|
||||
pa_pstream_send_memblock(c->pstream, 0, 0, 0, &mc);
|
||||
pa_pstream_send_memblock(c->pstream, 0, 0, 0, &mc, 0);
|
||||
|
||||
c->srbpending = srb;
|
||||
return;
|
||||
|
|
@ -4375,23 +4379,33 @@ static void command_set_default_sink_or_source(pa_pdispatch *pd, uint32_t comman
|
|||
}
|
||||
|
||||
CHECK_VALIDITY(c->pstream, c->authorized, tag, PA_ERR_ACCESS);
|
||||
CHECK_VALIDITY(c->pstream, !s || pa_namereg_is_valid_name(s), tag, PA_ERR_INVALID);
|
||||
CHECK_VALIDITY(c->pstream, !s || pa_namereg_is_valid_name(s) || pa_safe_streq(s,"@NONE@"), tag, PA_ERR_INVALID);
|
||||
|
||||
if (command == PA_COMMAND_SET_DEFAULT_SOURCE) {
|
||||
pa_source *source;
|
||||
char *source_name = NULL;
|
||||
|
||||
source = pa_namereg_get(c->protocol->core, s, PA_NAMEREG_SOURCE);
|
||||
CHECK_VALIDITY(c->pstream, source, tag, PA_ERR_NOENTITY);
|
||||
if (!pa_safe_streq(s,"@NONE@")) {
|
||||
pa_source *source;
|
||||
|
||||
pa_core_set_configured_default_source(c->protocol->core, source->name);
|
||||
source = pa_namereg_get(c->protocol->core, s, PA_NAMEREG_SOURCE);
|
||||
CHECK_VALIDITY(c->pstream, source, tag, PA_ERR_NOENTITY);
|
||||
source_name = source->name;
|
||||
}
|
||||
|
||||
pa_core_set_configured_default_source(c->protocol->core, source_name);
|
||||
} else {
|
||||
pa_sink *sink;
|
||||
char *sink_name = NULL;
|
||||
pa_assert(command == PA_COMMAND_SET_DEFAULT_SINK);
|
||||
|
||||
sink = pa_namereg_get(c->protocol->core, s, PA_NAMEREG_SINK);
|
||||
CHECK_VALIDITY(c->pstream, sink, tag, PA_ERR_NOENTITY);
|
||||
if (!pa_safe_streq(s,"@NONE@")) {
|
||||
pa_sink *sink;
|
||||
|
||||
pa_core_set_configured_default_sink(c->protocol->core, sink->name);
|
||||
sink = pa_namereg_get(c->protocol->core, s, PA_NAMEREG_SINK);
|
||||
CHECK_VALIDITY(c->pstream, sink, tag, PA_ERR_NOENTITY);
|
||||
sink_name = sink->name;
|
||||
}
|
||||
|
||||
pa_core_set_configured_default_sink(c->protocol->core, sink_name);
|
||||
}
|
||||
|
||||
pa_pstream_send_simple_ack(c->pstream, tag);
|
||||
|
|
@ -4715,7 +4729,7 @@ static void command_extension(pa_pdispatch *pd, uint32_t command, uint32_t tag,
|
|||
CHECK_VALIDITY(c->pstream, m, tag, PA_ERR_NOEXTENSION);
|
||||
CHECK_VALIDITY(c->pstream, m->load_once || idx != PA_INVALID_INDEX, tag, PA_ERR_INVALID);
|
||||
|
||||
cb = (pa_native_protocol_ext_cb_t) (unsigned long) pa_hashmap_get(c->protocol->extensions, m);
|
||||
cb = pa_hashmap_get(c->protocol->extensions, m);
|
||||
CHECK_VALIDITY(c->pstream, cb, tag, PA_ERR_NOEXTENSION);
|
||||
|
||||
if (cb(c->protocol, m, c, tag, t) < 0)
|
||||
|
|
@ -5064,9 +5078,9 @@ static void pstream_memblock_callback(pa_pstream *p, uint32_t channel, int64_t o
|
|||
playback_stream *ps = PLAYBACK_STREAM(stream);
|
||||
|
||||
size_t frame_size = pa_frame_size(&ps->sink_input->sample_spec);
|
||||
if (chunk->index % frame_size != 0 || chunk->length % frame_size != 0) {
|
||||
pa_log_warn("Client sent non-aligned memblock: index %d, length %d, frame size: %d",
|
||||
(int) chunk->index, (int) chunk->length, (int) frame_size);
|
||||
if (chunk->length % frame_size != 0) {
|
||||
pa_log_warn("Client sent non-aligned memblock: length %d, frame size: %d",
|
||||
(int) chunk->length, (int) frame_size);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -5414,7 +5428,7 @@ int pa_native_protocol_install_ext(pa_native_protocol *p, pa_module *m, pa_nativ
|
|||
pa_assert(cb);
|
||||
pa_assert(!pa_hashmap_get(p->extensions, m));
|
||||
|
||||
pa_assert_se(pa_hashmap_put(p->extensions, m, (void*) (unsigned long) cb) == 0);
|
||||
pa_assert_se(pa_hashmap_put(p->extensions, m, cb) == 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -82,6 +82,10 @@ typedef uint32_t pa_pstream_descriptor[PA_PSTREAM_DESCRIPTOR_MAX];
|
|||
*/
|
||||
#define FRAME_SIZE_MAX_ALLOW (1024*1024*16)
|
||||
|
||||
/* Default memblock alignment used with pa_pstream_send_memblock()
|
||||
*/
|
||||
#define DEFAULT_PSTREAM_MEMBLOCK_ALIGN (256)
|
||||
|
||||
PA_STATIC_FLIST_DECLARE(items, 0, pa_xfree);
|
||||
|
||||
struct item_info {
|
||||
|
|
@ -475,7 +479,7 @@ void pa_pstream_send_packet(pa_pstream*p, pa_packet *packet, pa_cmsg_ancil_data
|
|||
p->mainloop->defer_enable(p->defer_event, 1);
|
||||
}
|
||||
|
||||
void pa_pstream_send_memblock(pa_pstream*p, uint32_t channel, int64_t offset, pa_seek_mode_t seek_mode, const pa_memchunk *chunk) {
|
||||
void pa_pstream_send_memblock(pa_pstream*p, uint32_t channel, int64_t offset, pa_seek_mode_t seek_mode, const pa_memchunk *chunk, size_t align) {
|
||||
size_t length, idx;
|
||||
size_t bsm;
|
||||
|
||||
|
|
@ -492,6 +496,11 @@ void pa_pstream_send_memblock(pa_pstream*p, uint32_t channel, int64_t offset, pa
|
|||
|
||||
bsm = pa_mempool_block_size_max(p->mempool);
|
||||
|
||||
if (align == 0)
|
||||
align = DEFAULT_PSTREAM_MEMBLOCK_ALIGN;
|
||||
|
||||
bsm = (bsm / align) * align;
|
||||
|
||||
while (length > 0) {
|
||||
struct item_info *i;
|
||||
size_t n;
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ void pa_pstream_unlink(pa_pstream *p);
|
|||
int pa_pstream_attach_memfd_shmid(pa_pstream *p, unsigned shm_id, int memfd_fd);
|
||||
|
||||
void pa_pstream_send_packet(pa_pstream*p, pa_packet *packet, pa_cmsg_ancil_data *ancil_data);
|
||||
void pa_pstream_send_memblock(pa_pstream*p, uint32_t channel, int64_t offset, pa_seek_mode_t seek, const pa_memchunk *chunk);
|
||||
void pa_pstream_send_memblock(pa_pstream*p, uint32_t channel, int64_t offset, pa_seek_mode_t seek, const pa_memchunk *chunk, size_t align);
|
||||
void pa_pstream_send_release(pa_pstream *p, uint32_t block_id);
|
||||
void pa_pstream_send_revoke(pa_pstream *p, uint32_t block_id);
|
||||
|
||||
|
|
|
|||
|
|
@ -22,8 +22,10 @@
|
|||
#endif
|
||||
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <pulse/xmalloc.h>
|
||||
#include <pulse/timeval.h>
|
||||
#include <pulsecore/log.h>
|
||||
#include <pulsecore/macro.h>
|
||||
#include <pulsecore/strbuf.h>
|
||||
|
|
@ -120,6 +122,24 @@ static int (* const init_table[])(pa_resampler *r) = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static void calculate_gcd(pa_resampler *r) {
|
||||
unsigned gcd, n;
|
||||
|
||||
pa_assert(r);
|
||||
|
||||
gcd = r->i_ss.rate;
|
||||
n = r->o_ss.rate;
|
||||
|
||||
while (n != 0) {
|
||||
unsigned tmp = gcd;
|
||||
|
||||
gcd = n;
|
||||
n = tmp % n;
|
||||
}
|
||||
|
||||
r->gcd = gcd;
|
||||
}
|
||||
|
||||
static pa_resample_method_t choose_auto_resampler(pa_resample_flags_t flags) {
|
||||
pa_resample_method_t method;
|
||||
|
||||
|
|
@ -163,9 +183,6 @@ static pa_resample_method_t fix_method(
|
|||
}
|
||||
/* Else fall through */
|
||||
case PA_RESAMPLER_FFMPEG:
|
||||
case PA_RESAMPLER_SOXR_MQ:
|
||||
case PA_RESAMPLER_SOXR_HQ:
|
||||
case PA_RESAMPLER_SOXR_VHQ:
|
||||
if (flags & PA_RESAMPLER_VARIABLE_RATE) {
|
||||
pa_log_info("Resampler '%s' cannot do variable rate, reverting to resampler 'auto'.", pa_resample_method_to_string(method));
|
||||
method = PA_RESAMPLER_AUTO;
|
||||
|
|
@ -349,10 +366,13 @@ pa_resampler* pa_resampler_new(
|
|||
r->mempool = pool;
|
||||
r->method = method;
|
||||
r->flags = flags;
|
||||
r->in_frames = 0;
|
||||
r->out_frames = 0;
|
||||
|
||||
/* Fill sample specs */
|
||||
r->i_ss = *a;
|
||||
r->o_ss = *b;
|
||||
calculate_gcd(r);
|
||||
|
||||
if (am)
|
||||
r->i_cm = *am;
|
||||
|
|
@ -479,7 +499,12 @@ void pa_resampler_set_input_rate(pa_resampler *r, uint32_t rate) {
|
|||
if (r->i_ss.rate == rate)
|
||||
return;
|
||||
|
||||
/* Recalculate delay counters */
|
||||
r->in_frames = pa_resampler_get_delay(r, false);
|
||||
r->out_frames = 0;
|
||||
|
||||
r->i_ss.rate = rate;
|
||||
calculate_gcd(r);
|
||||
|
||||
r->impl.update_rates(r);
|
||||
}
|
||||
|
|
@ -492,7 +517,12 @@ void pa_resampler_set_output_rate(pa_resampler *r, uint32_t rate) {
|
|||
if (r->o_ss.rate == rate)
|
||||
return;
|
||||
|
||||
/* Recalculate delay counters */
|
||||
r->in_frames = pa_resampler_get_delay(r, false);
|
||||
r->out_frames = 0;
|
||||
|
||||
r->o_ss.rate = rate;
|
||||
calculate_gcd(r);
|
||||
|
||||
r->impl.update_rates(r);
|
||||
|
||||
|
|
@ -500,34 +530,73 @@ void pa_resampler_set_output_rate(pa_resampler *r, uint32_t rate) {
|
|||
pa_lfe_filter_update_rate(r->lfe_filter, rate);
|
||||
}
|
||||
|
||||
/* pa_resampler_request() and pa_resampler_result() should be as exact as
|
||||
* possible to ensure that no samples are lost or duplicated during rewinds.
|
||||
* Ignore the leftover buffer, the value appears to be wrong for ffmpeg
|
||||
* and 0 in all other cases. If the resampler is NULL it means that no
|
||||
* resampling is necessary and the input length equals the output length.
|
||||
* FIXME: These functions are not exact for the soxr resamplers because
|
||||
* soxr uses a different algorithm. */
|
||||
size_t pa_resampler_request(pa_resampler *r, size_t out_length) {
|
||||
pa_assert(r);
|
||||
size_t in_length;
|
||||
|
||||
/* Let's round up here to make it more likely that the caller will get at
|
||||
* least out_length amount of data from pa_resampler_run().
|
||||
*
|
||||
* We don't take the leftover into account here. If we did, then it might
|
||||
* be in theory possible that this function would return 0 and
|
||||
* pa_resampler_run() would also return 0. That could lead to infinite
|
||||
* loops. When the leftover is ignored here, such loops would eventually
|
||||
* terminate, because the leftover would grow each round, finally
|
||||
* surpassing the minimum input threshold of the resampler. */
|
||||
return ((((uint64_t) ((out_length + r->o_fz-1) / r->o_fz) * r->i_ss.rate) + r->o_ss.rate-1) / r->o_ss.rate) * r->i_fz;
|
||||
if (!r || out_length == 0)
|
||||
return out_length;
|
||||
|
||||
/* Convert to output frames */
|
||||
out_length = out_length / r->o_fz;
|
||||
|
||||
/* Convert to input frames. The equation matches exactly the
|
||||
* behavior of the used resamplers and will calculate the
|
||||
* minimum number of input frames that are needed to produce
|
||||
* the given number of output frames. */
|
||||
in_length = (out_length - 1) * r->i_ss.rate / r->o_ss.rate + 1;
|
||||
|
||||
/* Convert to input length */
|
||||
return in_length * r->i_fz;
|
||||
}
|
||||
|
||||
size_t pa_resampler_result(pa_resampler *r, size_t in_length) {
|
||||
size_t frames;
|
||||
size_t out_length;
|
||||
|
||||
pa_assert(r);
|
||||
if (!r)
|
||||
return in_length;
|
||||
|
||||
/* Let's round up here to ensure that the caller will always allocate big
|
||||
* enough output buffer. */
|
||||
/* Convert to intput frames */
|
||||
in_length = in_length / r->i_fz;
|
||||
|
||||
frames = (in_length + r->i_fz - 1) / r->i_fz;
|
||||
if (*r->have_leftover)
|
||||
frames += r->leftover_buf->length / r->w_fz;
|
||||
/* soxr processes samples in blocks, depending on the ratio.
|
||||
* Therefore samples that do not fit into a block must be
|
||||
* ignored. */
|
||||
if (r->method == PA_RESAMPLER_SOXR_MQ || r->method == PA_RESAMPLER_SOXR_HQ || r->method == PA_RESAMPLER_SOXR_VHQ) {
|
||||
double ratio;
|
||||
size_t block_size;
|
||||
int k;
|
||||
|
||||
return (((uint64_t) frames * r->o_ss.rate + r->i_ss.rate - 1) / r->i_ss.rate) * r->o_fz;
|
||||
ratio = (double)r->i_ss.rate / (double)r->o_ss.rate;
|
||||
|
||||
for (k = 0; k < 7; k++) {
|
||||
if (ratio < pow(2, k + 1))
|
||||
break;
|
||||
}
|
||||
block_size = pow(2, k);
|
||||
in_length = in_length - in_length % block_size;
|
||||
}
|
||||
|
||||
/* Convert to output frames. This matches exactly the algorithm
|
||||
* used by the resamplers except for the soxr resamplers. */
|
||||
|
||||
out_length = in_length * r->o_ss.rate / r->i_ss.rate;
|
||||
if ((double)in_length * (double)r->o_ss.rate / (double)r->i_ss.rate - out_length > 0)
|
||||
out_length++;
|
||||
/* The libsamplerate resamplers return one sample more if the result is integral and the ratio is not integral. */
|
||||
else if (r->method >= PA_RESAMPLER_SRC_SINC_BEST_QUALITY && r->method <= PA_RESAMPLER_SRC_SINC_FASTEST && r->i_ss.rate > r->o_ss.rate && r->i_ss.rate % r->o_ss.rate > 0 && (double)in_length * (double)r->o_ss.rate / (double)r->i_ss.rate - out_length <= 0)
|
||||
out_length++;
|
||||
else if (r->method == PA_RESAMPLER_SRC_ZERO_ORDER_HOLD && r->i_ss.rate > r->o_ss.rate && (double)in_length * (double)r->o_ss.rate / (double)r->i_ss.rate - out_length <= 0)
|
||||
out_length++;
|
||||
|
||||
/* Convert to output length */
|
||||
return out_length * r->o_fz;
|
||||
}
|
||||
|
||||
size_t pa_resampler_max_block_size(pa_resampler *r) {
|
||||
|
|
@ -544,9 +613,13 @@ size_t pa_resampler_max_block_size(pa_resampler *r) {
|
|||
* conversion */
|
||||
max_ss.channels = (uint8_t) (PA_MAX(r->i_ss.channels, r->o_ss.channels));
|
||||
|
||||
/* We silently assume that the format enum is ordered by size */
|
||||
max_ss.format = PA_MAX(r->i_ss.format, r->o_ss.format);
|
||||
max_ss.format = PA_MAX(max_ss.format, r->work_format);
|
||||
max_ss.format = r->i_ss.format;
|
||||
|
||||
if (pa_sample_size_of_format(max_ss.format) < pa_sample_size_of_format(r->o_ss.format))
|
||||
max_ss.format = r->o_ss.format;
|
||||
|
||||
if (pa_sample_size_of_format(max_ss.format) < pa_sample_size_of_format(r->work_format))
|
||||
max_ss.format = r->work_format;
|
||||
|
||||
max_ss.rate = PA_MAX(r->i_ss.rate, r->o_ss.rate);
|
||||
|
||||
|
|
@ -586,20 +659,89 @@ void pa_resampler_reset(pa_resampler *r) {
|
|||
pa_lfe_filter_reset(r->lfe_filter);
|
||||
|
||||
*r->have_leftover = false;
|
||||
|
||||
r->in_frames = 0;
|
||||
r->out_frames = 0;
|
||||
}
|
||||
|
||||
void pa_resampler_rewind(pa_resampler *r, size_t out_frames) {
|
||||
/* This function runs amount bytes of data from the history queue through the
|
||||
* resampler and discards the result. The history queue is unchanged after the
|
||||
* call. This is used to preload a resampler after a reset. Returns the number
|
||||
* of frames produced by the resampler. */
|
||||
size_t pa_resampler_prepare(pa_resampler *r, pa_memblockq *history_queue, size_t amount) {
|
||||
size_t history_bytes, max_block_size, out_size;
|
||||
int64_t to_run;
|
||||
|
||||
pa_assert(r);
|
||||
|
||||
/* For now, we don't have any rewindable resamplers, so we just
|
||||
reset the resampler instead (and hope that nobody hears the difference). */
|
||||
if (r->impl.reset)
|
||||
if (!history_queue || amount == 0)
|
||||
return 0;
|
||||
|
||||
/* Rewind the LFE filter by the amount of history data. */
|
||||
history_bytes = pa_resampler_result(r, amount);
|
||||
if (r->lfe_filter)
|
||||
pa_lfe_filter_rewind(r->lfe_filter, history_bytes);
|
||||
|
||||
pa_memblockq_rewind(history_queue, amount);
|
||||
max_block_size = pa_resampler_max_block_size(r);
|
||||
to_run = amount;
|
||||
out_size = 0;
|
||||
|
||||
while (to_run > 0) {
|
||||
pa_memchunk in_chunk, out_chunk;
|
||||
size_t current;
|
||||
|
||||
current = PA_MIN(to_run, (int64_t) max_block_size);
|
||||
|
||||
/* Get data from memblockq */
|
||||
if (pa_memblockq_peek_fixed_size(history_queue, current, &in_chunk) < 0) {
|
||||
pa_log_warn("Could not read history data for resampler.");
|
||||
|
||||
/* Restore queue to original state and reset resampler */
|
||||
pa_memblockq_drop(history_queue, to_run);
|
||||
pa_resampler_reset(r);
|
||||
return out_size;
|
||||
}
|
||||
|
||||
/* Run the resampler */
|
||||
pa_resampler_run(r, &in_chunk, &out_chunk);
|
||||
|
||||
/* Discard result */
|
||||
if (out_chunk.length != 0) {
|
||||
out_size += out_chunk.length;
|
||||
pa_memblock_unref(out_chunk.memblock);
|
||||
}
|
||||
|
||||
pa_memblock_unref(in_chunk.memblock);
|
||||
pa_memblockq_drop(history_queue, current);
|
||||
to_run -= current;
|
||||
}
|
||||
|
||||
return out_size;
|
||||
}
|
||||
|
||||
size_t pa_resampler_rewind(pa_resampler *r, size_t out_bytes, pa_memblockq *history_queue, size_t amount) {
|
||||
pa_assert(r);
|
||||
|
||||
/* For now, we don't have any rewindable resamplers, so we just reset
|
||||
* the resampler if we cannot rewind using pa_resampler_prepare(). */
|
||||
if (r->impl.reset && !history_queue)
|
||||
r->impl.reset(r);
|
||||
|
||||
if (r->lfe_filter)
|
||||
pa_lfe_filter_rewind(r->lfe_filter, out_frames);
|
||||
pa_lfe_filter_rewind(r->lfe_filter, out_bytes);
|
||||
|
||||
*r->have_leftover = false;
|
||||
if (!history_queue) {
|
||||
*r->have_leftover = false;
|
||||
|
||||
r->in_frames = 0;
|
||||
r->out_frames = 0;
|
||||
}
|
||||
|
||||
if (history_queue && amount > 0)
|
||||
return pa_resampler_prepare(r, history_queue, amount);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
pa_resample_method_t pa_resampler_get_method(pa_resampler *r) {
|
||||
|
|
@ -1469,6 +1611,7 @@ void pa_resampler_run(pa_resampler *r, const pa_memchunk *in, pa_memchunk *out)
|
|||
pa_assert(in->length % r->i_fz == 0);
|
||||
|
||||
buf = (pa_memchunk*) in;
|
||||
r->in_frames += buf->length / r->i_fz;
|
||||
buf = convert_to_work_format(r, buf);
|
||||
|
||||
/* Try to save resampling effort: if we have more output channels than
|
||||
|
|
@ -1487,6 +1630,7 @@ void pa_resampler_run(pa_resampler *r, const pa_memchunk *in, pa_memchunk *out)
|
|||
if (buf->length) {
|
||||
buf = convert_from_work_format(r, buf);
|
||||
*out = *buf;
|
||||
r->out_frames += buf->length / r->o_fz;
|
||||
|
||||
if (buf == in)
|
||||
pa_memblock_ref(buf->memblock);
|
||||
|
|
@ -1496,6 +1640,47 @@ void pa_resampler_run(pa_resampler *r, const pa_memchunk *in, pa_memchunk *out)
|
|||
pa_memchunk_reset(out);
|
||||
}
|
||||
|
||||
/* Get delay in input frames. Some resamplers may have negative delay. */
|
||||
double pa_resampler_get_delay(pa_resampler *r, bool allow_negative) {
|
||||
double frames;
|
||||
|
||||
frames = r->out_frames * r->i_ss.rate / r->o_ss.rate;
|
||||
if (frames >= r->in_frames && !allow_negative)
|
||||
return 0;
|
||||
return r->in_frames - frames;
|
||||
}
|
||||
|
||||
/* Get delay in usec */
|
||||
pa_usec_t pa_resampler_get_delay_usec(pa_resampler *r) {
|
||||
|
||||
if (!r)
|
||||
return 0;
|
||||
|
||||
return (pa_usec_t) (pa_resampler_get_delay(r, false) * PA_USEC_PER_SEC / r->i_ss.rate);
|
||||
}
|
||||
|
||||
/* Get GCD of input and output rate. */
|
||||
unsigned pa_resampler_get_gcd(pa_resampler *r) {
|
||||
pa_assert(r);
|
||||
|
||||
return r->gcd;
|
||||
}
|
||||
|
||||
/* Get maximum resampler history. The resamplers have finite impulse response, so really old
|
||||
* data (more than 2x the resampler latency) cannot affect the output. This means, that in an
|
||||
* ideal case, we should re-run 2 - 3 times the resampler delay through the resampler when it
|
||||
* is rewound. On the other hand this would mean for high sample rates that more than 25000
|
||||
* samples would need to be used (384k * 33ms). Therefore limit the history to 1.5 times the
|
||||
* maximum resampler delay, which should be fully sufficient in most cases and allows to run
|
||||
* at least more than one delay through the resampler in case of high rates. */
|
||||
size_t pa_resampler_get_max_history(pa_resampler *r) {
|
||||
|
||||
if (!r)
|
||||
return 0;
|
||||
|
||||
return (uint64_t) PA_RESAMPLER_MAX_DELAY_USEC * r->i_ss.rate * 3 / PA_USEC_PER_SEC / 2;
|
||||
}
|
||||
|
||||
/*** copy (noop) implementation ***/
|
||||
|
||||
static int copy_init(pa_resampler *r) {
|
||||
|
|
|
|||
|
|
@ -73,6 +73,12 @@ typedef enum pa_resample_flags {
|
|||
PA_RESAMPLER_CONSUME_LFE = 0x0040U,
|
||||
} pa_resample_flags_t;
|
||||
|
||||
/* Currently, the soxr reampler has the largest delay of all supported resamplers.
|
||||
* The maximum value below has been obtained empirically and contains a safety
|
||||
* margin of about 3ms. If the resampler configuration is changed or additional
|
||||
* resamplers are added, the constant must be re-evaluated. */
|
||||
#define PA_RESAMPLER_MAX_DELAY_USEC 33000
|
||||
|
||||
struct pa_resampler {
|
||||
pa_resample_method_t method;
|
||||
pa_resample_flags_t flags;
|
||||
|
|
@ -109,6 +115,10 @@ struct pa_resampler {
|
|||
pa_remap_t remap;
|
||||
bool map_required;
|
||||
|
||||
double in_frames;
|
||||
double out_frames;
|
||||
unsigned gcd;
|
||||
|
||||
pa_lfe_filter_t *lfe_filter;
|
||||
|
||||
pa_resampler_impl impl;
|
||||
|
|
@ -147,8 +157,11 @@ void pa_resampler_set_output_rate(pa_resampler *r, uint32_t rate);
|
|||
/* Reinitialize state of the resampler, possibly due to seeking or other discontinuities */
|
||||
void pa_resampler_reset(pa_resampler *r);
|
||||
|
||||
/* Prepare resampler for use by running some old data through it. */
|
||||
size_t pa_resampler_prepare(pa_resampler *r, pa_memblockq *history_queue, size_t amount);
|
||||
|
||||
/* Rewind resampler */
|
||||
void pa_resampler_rewind(pa_resampler *r, size_t out_frames);
|
||||
size_t pa_resampler_rewind(pa_resampler *r, size_t out_bytes, pa_memblockq *history_queue, size_t amount);
|
||||
|
||||
/* Return the resampling method of the resampler object */
|
||||
pa_resample_method_t pa_resampler_get_method(pa_resampler *r);
|
||||
|
|
@ -162,6 +175,18 @@ const char *pa_resample_method_to_string(pa_resample_method_t m);
|
|||
/* Return 1 when the specified resampling method is supported */
|
||||
int pa_resample_method_supported(pa_resample_method_t m);
|
||||
|
||||
/* Get delay of the resampler in input frames */
|
||||
double pa_resampler_get_delay(pa_resampler *r, bool allow_negative);
|
||||
|
||||
/* Get delay of the resampler in usec */
|
||||
pa_usec_t pa_resampler_get_delay_usec(pa_resampler *r);
|
||||
|
||||
/* Get the GCD of input and outpu rate */
|
||||
unsigned pa_resampler_get_gcd(pa_resampler *r);
|
||||
|
||||
/* Get maximum number of history frames */
|
||||
size_t pa_resampler_get_max_history(pa_resampler *r);
|
||||
|
||||
const pa_channel_map* pa_resampler_input_channel_map(pa_resampler *r);
|
||||
const pa_sample_spec* pa_resampler_input_sample_spec(pa_resampler *r);
|
||||
const pa_channel_map* pa_resampler_output_channel_map(pa_resampler *r);
|
||||
|
|
|
|||
|
|
@ -65,9 +65,14 @@ static void resampler_soxr_free(pa_resampler *r) {
|
|||
|
||||
static void resampler_soxr_reset(pa_resampler *r) {
|
||||
#if SOXR_THIS_VERSION >= SOXR_VERSION(0, 1, 2)
|
||||
double ratio;
|
||||
|
||||
pa_assert(r);
|
||||
|
||||
soxr_clear(r->impl.data);
|
||||
|
||||
ratio = (double)r->i_ss.rate / (double)r->o_ss.rate;
|
||||
soxr_set_io_ratio(r->impl.data, ratio, 0);
|
||||
#else
|
||||
/* With libsoxr prior to 0.1.2 soxr_clear() makes soxr_process() crash afterwards,
|
||||
* so don't use this function and re-create the context instead. */
|
||||
|
|
@ -89,23 +94,12 @@ static void resampler_soxr_reset(pa_resampler *r) {
|
|||
}
|
||||
|
||||
static void resampler_soxr_update_rates(pa_resampler *r) {
|
||||
soxr_t old_state;
|
||||
double ratio;
|
||||
|
||||
pa_assert(r);
|
||||
|
||||
/* There is no update method in libsoxr,
|
||||
* so just re-create the resampler context */
|
||||
|
||||
old_state = r->impl.data;
|
||||
r->impl.data = NULL;
|
||||
|
||||
if (pa_resampler_soxr_init(r) == 0) {
|
||||
if (old_state)
|
||||
soxr_delete(old_state);
|
||||
} else {
|
||||
r->impl.data = old_state;
|
||||
pa_log_error("Failed to update libsoxr sample rates");
|
||||
}
|
||||
ratio = (double)r->i_ss.rate / (double)r->o_ss.rate;
|
||||
soxr_set_io_ratio(r->impl.data, ratio, 0);
|
||||
}
|
||||
|
||||
int pa_resampler_soxr_init(pa_resampler *r) {
|
||||
|
|
@ -116,6 +110,7 @@ int pa_resampler_soxr_init(pa_resampler *r) {
|
|||
unsigned long quality_recipe;
|
||||
soxr_quality_spec_t quality;
|
||||
soxr_error_t err = NULL;
|
||||
double ratio;
|
||||
|
||||
pa_assert(r);
|
||||
|
||||
|
|
@ -150,14 +145,18 @@ int pa_resampler_soxr_init(pa_resampler *r) {
|
|||
pa_assert_not_reached();
|
||||
}
|
||||
|
||||
quality = soxr_quality_spec(quality_recipe, 0);
|
||||
quality = soxr_quality_spec(quality_recipe, SOXR_VR);
|
||||
|
||||
state = soxr_create(r->i_ss.rate, r->o_ss.rate, r->work_channels, &err, &io_spec, &quality, &runtime_spec);
|
||||
/* Maximum resample ratio is 100:1 */
|
||||
state = soxr_create(100, 1, r->work_channels, &err, &io_spec, &quality, &runtime_spec);
|
||||
if (!state) {
|
||||
pa_log_error("Failed to create libsoxr resampler context: %s.", (err ? err : "[unknown error]"));
|
||||
return -1;
|
||||
}
|
||||
|
||||
ratio = (double)r->i_ss.rate / (double)r->o_ss.rate;
|
||||
soxr_set_io_ratio(state, ratio, 0);
|
||||
|
||||
r->impl.free = resampler_soxr_free;
|
||||
r->impl.reset = resampler_soxr_reset;
|
||||
r->impl.update_rates = resampler_soxr_update_rates;
|
||||
|
|
|
|||
|
|
@ -132,6 +132,7 @@ static void speex_reset(pa_resampler *r) {
|
|||
state = r->impl.data;
|
||||
|
||||
pa_assert_se(speex_resampler_reset_mem(state) == 0);
|
||||
speex_resampler_skip_zeros(state);
|
||||
}
|
||||
|
||||
static void speex_free(pa_resampler *r) {
|
||||
|
|
@ -172,6 +173,8 @@ int pa_resampler_speex_init(pa_resampler *r) {
|
|||
if (!(state = speex_resampler_init(r->work_channels, r->i_ss.rate, r->o_ss.rate, q, &err)))
|
||||
return -1;
|
||||
|
||||
speex_resampler_skip_zeros(state);
|
||||
|
||||
r->impl.data = state;
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -91,7 +91,10 @@ struct shm_marker {
|
|||
uint64_t _reserved2;
|
||||
uint64_t _reserved3;
|
||||
uint64_t _reserved4;
|
||||
} PA_GCC_PACKED;
|
||||
};
|
||||
|
||||
// Ensure struct is appropriately packed
|
||||
static_assert(sizeof(struct shm_marker) == 8 * 5, "`struct shm_marker` is not tightly packed");
|
||||
|
||||
static inline size_t shm_marker_size(pa_mem_type_t type) {
|
||||
if (type == PA_MEM_TYPE_SHARED_POSIX)
|
||||
|
|
@ -161,7 +164,11 @@ static int sharedmem_create(pa_shm *m, pa_mem_type_t type, size_t size, mode_t m
|
|||
#endif
|
||||
#ifdef HAVE_MEMFD
|
||||
case PA_MEM_TYPE_SHARED_MEMFD:
|
||||
fd = memfd_create("pulseaudio", MFD_ALLOW_SEALING);
|
||||
/* For linux >= 6.3 create fd with MFD_NOEXEC_SEAL flag */
|
||||
fd = memfd_create("pulseaudio", MFD_ALLOW_SEALING|MFD_CLOEXEC|MFD_NOEXEC_SEAL);
|
||||
/* Retry creating fd without MFD_NOEXEC_SEAL to support linux < 6.3 */
|
||||
if (fd < 0)
|
||||
fd = memfd_create("pulseaudio", MFD_ALLOW_SEALING|MFD_CLOEXEC);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@
|
|||
#include <pulse/xmalloc.h>
|
||||
#include <pulse/util.h>
|
||||
#include <pulse/internal.h>
|
||||
#include <pulse/timeval.h>
|
||||
|
||||
#include <pulsecore/core-format.h>
|
||||
#include <pulsecore/mix.h>
|
||||
|
|
@ -53,6 +54,66 @@ struct volume_factor_entry {
|
|||
pa_cvolume volume;
|
||||
};
|
||||
|
||||
/* Calculate number of input samples for the resampler so that either the number
|
||||
* of input samples or the number of output samples matches the defined history
|
||||
* length. */
|
||||
static size_t calculate_resampler_history_bytes(pa_sink_input *i, size_t in_rewind_frames) {
|
||||
size_t history_frames, history_max, matching_period, total_frames, remainder;
|
||||
double delay;
|
||||
pa_resampler *r;
|
||||
|
||||
if (!(r = i->thread_info.resampler))
|
||||
return 0;
|
||||
|
||||
/* Initialize some variables, cut off full seconds from the rewind */
|
||||
total_frames = 0;
|
||||
in_rewind_frames = in_rewind_frames % r->i_ss.rate;
|
||||
history_max = pa_resampler_get_max_history(r);
|
||||
|
||||
/* Get the current internal delay of the resampler */
|
||||
delay = pa_resampler_get_delay(r, false);
|
||||
|
||||
/* Calculate the matching period */
|
||||
matching_period = r->i_ss.rate / pa_resampler_get_gcd(r);
|
||||
pa_log_debug("Integral period length is %lu input frames", matching_period);
|
||||
|
||||
/* If the delay is larger than the length of the history queue, we can only
|
||||
* replay as much as we have. */
|
||||
if ((size_t)delay >= history_max) {
|
||||
history_frames = history_max;
|
||||
pa_log_debug("Resampler delay exceeds maximum history");
|
||||
return history_frames * r->i_fz;
|
||||
}
|
||||
|
||||
/* Initially set the history to 3 times the resampler delay. Use at least 2 ms.
|
||||
* We try to find a value between 2 and 3 times the resampler delay to ensure
|
||||
* that the old data has no impact anymore. See also comment to
|
||||
* pa_resampler_get_max_history() in resampler.c. */
|
||||
history_frames = (size_t)(delay * 3.0);
|
||||
history_frames = PA_MAX(history_frames, r->i_ss.rate / 500);
|
||||
|
||||
/* Check how the rewind fits into multiples of the matching period. */
|
||||
remainder = (in_rewind_frames + history_frames) % matching_period;
|
||||
|
||||
/* If possible, use between 2 and 3 times the resampler delay */
|
||||
if (remainder < (size_t)delay && history_frames - remainder <= history_max)
|
||||
total_frames = in_rewind_frames + history_frames - remainder;
|
||||
/* Else, try above 3 times the delay */
|
||||
else if (history_frames + matching_period - remainder <= history_max)
|
||||
total_frames = in_rewind_frames + history_frames + matching_period - remainder;
|
||||
|
||||
if (total_frames != 0)
|
||||
/* We found a perfect match. */
|
||||
history_frames = total_frames - in_rewind_frames;
|
||||
else {
|
||||
/* Try to use 2.5 times the delay. */
|
||||
history_frames = PA_MIN((size_t)(delay * 2.5), history_max);
|
||||
pa_log_debug("No usable integral matching period");
|
||||
}
|
||||
|
||||
return history_frames * r->i_fz;
|
||||
}
|
||||
|
||||
static struct volume_factor_entry *volume_factor_entry_new(const char *key, const pa_cvolume *volume) {
|
||||
struct volume_factor_entry *entry;
|
||||
|
||||
|
|
@ -286,6 +347,7 @@ static void reset_callbacks(pa_sink_input *i) {
|
|||
i->send_event = NULL;
|
||||
i->volume_changed = NULL;
|
||||
i->mute_changed = NULL;
|
||||
i->get_max_rewind_limit = NULL;
|
||||
}
|
||||
|
||||
/* Called from main context */
|
||||
|
|
@ -301,6 +363,7 @@ int pa_sink_input_new(
|
|||
int r;
|
||||
char *pt;
|
||||
char *memblockq_name;
|
||||
pa_memchunk silence;
|
||||
|
||||
pa_assert(_i);
|
||||
pa_assert(core);
|
||||
|
|
@ -562,6 +625,11 @@ int pa_sink_input_new(
|
|||
i->thread_info.underrun_for_sink = 0;
|
||||
i->thread_info.playing_for = 0;
|
||||
i->thread_info.direct_outputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
|
||||
i->thread_info.move_start_time = 0;
|
||||
i->thread_info.resampler_delay_frames = 0;
|
||||
i->thread_info.origin_sink_latency = 0;
|
||||
i->thread_info.dont_rewrite = false;
|
||||
i->origin_rewind_bytes = 0;
|
||||
|
||||
pa_assert_se(pa_idxset_put(core->sink_inputs, i, &i->index) == 0);
|
||||
pa_assert_se(pa_idxset_put(i->sink->inputs, pa_sink_input_ref(i), NULL) == 0);
|
||||
|
|
@ -582,6 +650,21 @@ int pa_sink_input_new(
|
|||
&i->sink->silence);
|
||||
pa_xfree(memblockq_name);
|
||||
|
||||
memblockq_name = pa_sprintf_malloc("sink input history memblockq [%u]", i->index);
|
||||
pa_sink_input_get_silence(i, &silence);
|
||||
i->thread_info.history_memblockq = pa_memblockq_new(
|
||||
memblockq_name,
|
||||
0,
|
||||
MEMBLOCKQ_MAXLENGTH,
|
||||
0,
|
||||
&i->sample_spec,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
&silence);
|
||||
pa_xfree(memblockq_name);
|
||||
pa_memblock_unref(silence.memblock);
|
||||
|
||||
pt = pa_proplist_to_string_sep(i->proplist, "\n ");
|
||||
pa_log_info("Created input %u \"%s\" on %s with sample spec %s and channel map %s\n %s",
|
||||
i->index,
|
||||
|
|
@ -765,6 +848,9 @@ static void sink_input_free(pa_object *o) {
|
|||
if (i->thread_info.render_memblockq)
|
||||
pa_memblockq_free(i->thread_info.render_memblockq);
|
||||
|
||||
if (i->thread_info.history_memblockq)
|
||||
pa_memblockq_free(i->thread_info.history_memblockq);
|
||||
|
||||
if (i->thread_info.resampler)
|
||||
pa_resampler_free(i->thread_info.resampler);
|
||||
|
||||
|
|
@ -934,6 +1020,7 @@ void pa_sink_input_peek(pa_sink_input *i, size_t slength /* in sink bytes */, pa
|
|||
* data, so let's just hand out silence */
|
||||
|
||||
pa_memblockq_seek(i->thread_info.render_memblockq, (int64_t) slength, PA_SEEK_RELATIVE, true);
|
||||
pa_memblockq_seek(i->thread_info.history_memblockq, (int64_t) ilength_full, PA_SEEK_RELATIVE, true);
|
||||
i->thread_info.playing_for = 0;
|
||||
if (i->thread_info.underrun_for != (uint64_t) -1) {
|
||||
i->thread_info.underrun_for += ilength_full;
|
||||
|
|
@ -981,6 +1068,9 @@ void pa_sink_input_peek(pa_sink_input *i, size_t slength /* in sink bytes */, pa
|
|||
pa_volume_memchunk(&wchunk, &i->thread_info.sample_spec, &i->thread_info.soft_volume);
|
||||
}
|
||||
|
||||
/* Push chunk into history queue to retain some resampler input history. */
|
||||
pa_memblockq_push(i->thread_info.history_memblockq, &wchunk);
|
||||
|
||||
if (!i->thread_info.resampler) {
|
||||
|
||||
if (nvfs) {
|
||||
|
|
@ -1045,6 +1135,7 @@ void pa_sink_input_peek(pa_sink_input *i, size_t slength /* in sink bytes */, pa
|
|||
|
||||
/* Called from thread context */
|
||||
void pa_sink_input_drop(pa_sink_input *i, size_t nbytes /* in sink sample spec */) {
|
||||
int64_t rbq, hbq;
|
||||
|
||||
pa_sink_input_assert_ref(i);
|
||||
pa_sink_input_assert_io_context(i);
|
||||
|
|
@ -1057,6 +1148,22 @@ void pa_sink_input_drop(pa_sink_input *i, size_t nbytes /* in sink sample spec *
|
|||
#endif
|
||||
|
||||
pa_memblockq_drop(i->thread_info.render_memblockq, nbytes);
|
||||
|
||||
/* Keep memblockq's in sync. Using pa_resampler_request()
|
||||
* on nbytes will not work here because of rounding. */
|
||||
rbq = pa_memblockq_get_write_index(i->thread_info.render_memblockq);
|
||||
rbq -= pa_memblockq_get_read_index(i->thread_info.render_memblockq);
|
||||
hbq = pa_memblockq_get_write_index(i->thread_info.history_memblockq);
|
||||
hbq -= pa_memblockq_get_read_index(i->thread_info.history_memblockq);
|
||||
if (rbq >= 0)
|
||||
rbq = pa_resampler_request(i->thread_info.resampler, rbq);
|
||||
else
|
||||
rbq = - (int64_t) pa_resampler_request(i->thread_info.resampler, - rbq);
|
||||
|
||||
if (hbq > rbq)
|
||||
pa_memblockq_drop(i->thread_info.history_memblockq, hbq - rbq);
|
||||
else if (rbq > hbq)
|
||||
pa_memblockq_rewind(i->thread_info.history_memblockq, rbq - hbq);
|
||||
}
|
||||
|
||||
/* Called from thread context */
|
||||
|
|
@ -1070,6 +1177,7 @@ bool pa_sink_input_process_underrun(pa_sink_input *i) {
|
|||
if (i->process_underrun && i->process_underrun(i)) {
|
||||
/* All valid data has been played back, so we can empty this queue. */
|
||||
pa_memblockq_silence(i->thread_info.render_memblockq);
|
||||
pa_memblockq_silence(i->thread_info.history_memblockq);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
@ -1079,6 +1187,7 @@ bool pa_sink_input_process_underrun(pa_sink_input *i) {
|
|||
void pa_sink_input_process_rewind(pa_sink_input *i, size_t nbytes /* in sink sample spec */) {
|
||||
size_t lbq;
|
||||
bool called = false;
|
||||
size_t sink_input_nbytes;
|
||||
|
||||
pa_sink_input_assert_ref(i);
|
||||
pa_sink_input_assert_io_context(i);
|
||||
|
|
@ -1090,21 +1199,27 @@ void pa_sink_input_process_rewind(pa_sink_input *i, size_t nbytes /* in sink sam
|
|||
#endif
|
||||
|
||||
lbq = pa_memblockq_get_length(i->thread_info.render_memblockq);
|
||||
sink_input_nbytes = pa_resampler_request(i->thread_info.resampler, nbytes);
|
||||
|
||||
if (nbytes > 0 && !i->thread_info.dont_rewind_render) {
|
||||
pa_log_debug("Have to rewind %lu bytes on render memblockq.", (unsigned long) nbytes);
|
||||
pa_memblockq_rewind(i->thread_info.render_memblockq, nbytes);
|
||||
pa_memblockq_rewind(i->thread_info.history_memblockq, sink_input_nbytes);
|
||||
}
|
||||
|
||||
if (i->thread_info.dont_rewrite)
|
||||
goto finish;
|
||||
|
||||
if (i->thread_info.rewrite_nbytes == (size_t) -1) {
|
||||
|
||||
/* We were asked to drop all buffered data, and rerequest new
|
||||
* data from implementor the next time peek() is called */
|
||||
|
||||
pa_memblockq_flush_write(i->thread_info.render_memblockq, true);
|
||||
pa_memblockq_flush_write(i->thread_info.history_memblockq, true);
|
||||
|
||||
} else if (i->thread_info.rewrite_nbytes > 0) {
|
||||
size_t max_rewrite, amount;
|
||||
size_t max_rewrite, sink_amount, sink_input_amount;
|
||||
|
||||
/* Calculate how much make sense to rewrite at most */
|
||||
max_rewrite = nbytes;
|
||||
|
|
@ -1112,41 +1227,68 @@ void pa_sink_input_process_rewind(pa_sink_input *i, size_t nbytes /* in sink sam
|
|||
max_rewrite += lbq;
|
||||
|
||||
/* Transform into local domain */
|
||||
if (i->thread_info.resampler)
|
||||
max_rewrite = pa_resampler_request(i->thread_info.resampler, max_rewrite);
|
||||
sink_input_amount = pa_resampler_request(i->thread_info.resampler, max_rewrite);
|
||||
|
||||
/* Calculate how much of the rewinded data should actually be rewritten */
|
||||
amount = PA_MIN(i->thread_info.rewrite_nbytes, max_rewrite);
|
||||
sink_input_amount = PA_MIN(i->thread_info.rewrite_nbytes, sink_input_amount);
|
||||
|
||||
if (amount > 0) {
|
||||
pa_log_debug("Have to rewind %lu bytes on implementor.", (unsigned long) amount);
|
||||
/* Transform to sink domain */
|
||||
sink_amount = pa_resampler_result(i->thread_info.resampler, sink_input_amount);
|
||||
|
||||
if (sink_input_amount > 0) {
|
||||
pa_log_debug("Have to rewind %lu bytes on implementor.", (unsigned long) sink_input_amount);
|
||||
|
||||
/* Tell the implementor */
|
||||
if (i->process_rewind)
|
||||
i->process_rewind(i, amount);
|
||||
i->process_rewind(i, sink_input_amount);
|
||||
called = true;
|
||||
|
||||
/* Convert back to sink domain */
|
||||
if (i->thread_info.resampler)
|
||||
amount = pa_resampler_result(i->thread_info.resampler, amount);
|
||||
/* Update the write pointer. Use pa_resampler_result(r, sink_input_amount) instead
|
||||
* of sink_amount because the two may differ and the actual replay of the samples
|
||||
* will produce pa_resampler_result(r, sink_input_amount) samples. */
|
||||
pa_memblockq_seek(i->thread_info.render_memblockq, - ((int64_t) pa_resampler_result(i->thread_info.resampler, sink_input_amount)),PA_SEEK_RELATIVE, true);
|
||||
|
||||
if (amount > 0)
|
||||
/* Ok, now update the write pointer */
|
||||
pa_memblockq_seek(i->thread_info.render_memblockq, - ((int64_t) amount), PA_SEEK_RELATIVE, true);
|
||||
/* Rewind the resampler */
|
||||
if (i->thread_info.resampler) {
|
||||
size_t history_bytes;
|
||||
int64_t history_result;
|
||||
|
||||
if (i->thread_info.rewrite_flush)
|
||||
history_bytes = calculate_resampler_history_bytes(i, sink_input_amount / pa_frame_size(&i->sample_spec));
|
||||
|
||||
if (history_bytes > 0) {
|
||||
history_result = pa_resampler_rewind(i->thread_info.resampler, sink_amount, i->thread_info.history_memblockq, history_bytes);
|
||||
|
||||
/* We may have produced one sample too much or or one sample less than expected.
|
||||
* The replay of the rewound sink input data will then produce a deviation in
|
||||
* the other direction, so that the total number of produced samples matches
|
||||
* pa_resampler_result(r, sink_input_amount + history_bytes). Therefore we have
|
||||
* to correct the write pointer of the render queue accordingly.
|
||||
* Strictly this is only true, if the history can be replayed from a known
|
||||
* resampler state, that is if a true matching period exists. In case where
|
||||
* we are using an approximate matching period, we may still loose or duplicate
|
||||
* one sample during rewind. */
|
||||
history_result -= (int64_t) pa_resampler_result(i->thread_info.resampler, history_bytes);
|
||||
if (history_result != 0)
|
||||
pa_memblockq_seek(i->thread_info.render_memblockq, history_result, PA_SEEK_RELATIVE, true);
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the history write pointer */
|
||||
pa_memblockq_seek(i->thread_info.history_memblockq, - ((int64_t) sink_input_amount), PA_SEEK_RELATIVE, true);
|
||||
|
||||
if (i->thread_info.rewrite_flush) {
|
||||
pa_memblockq_silence(i->thread_info.render_memblockq);
|
||||
|
||||
/* And rewind the resampler */
|
||||
if (i->thread_info.resampler)
|
||||
pa_resampler_rewind(i->thread_info.resampler, amount);
|
||||
pa_memblockq_silence(i->thread_info.history_memblockq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
finish:
|
||||
if (!called)
|
||||
if (i->process_rewind)
|
||||
i->process_rewind(i, 0);
|
||||
|
||||
i->thread_info.dont_rewrite = false;
|
||||
i->thread_info.rewrite_nbytes = 0;
|
||||
i->thread_info.rewrite_flush = false;
|
||||
i->thread_info.dont_rewind_render = false;
|
||||
|
|
@ -1157,7 +1299,7 @@ size_t pa_sink_input_get_max_rewind(pa_sink_input *i) {
|
|||
pa_sink_input_assert_ref(i);
|
||||
pa_sink_input_assert_io_context(i);
|
||||
|
||||
return i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, i->sink->thread_info.max_rewind) : i->sink->thread_info.max_rewind;
|
||||
return pa_resampler_request(i->thread_info.resampler, i->sink->thread_info.max_rewind);
|
||||
}
|
||||
|
||||
/* Called from thread context */
|
||||
|
|
@ -1168,11 +1310,14 @@ size_t pa_sink_input_get_max_request(pa_sink_input *i) {
|
|||
/* We're not verifying the status here, to allow this to be called
|
||||
* in the state change handler between _INIT and _RUNNING */
|
||||
|
||||
return i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, i->sink->thread_info.max_request) : i->sink->thread_info.max_request;
|
||||
return pa_resampler_request(i->thread_info.resampler, i->sink->thread_info.max_request);
|
||||
}
|
||||
|
||||
/* Called from thread context */
|
||||
void pa_sink_input_update_max_rewind(pa_sink_input *i, size_t nbytes /* in the sink's sample spec */) {
|
||||
size_t max_rewind;
|
||||
size_t resampler_history;
|
||||
|
||||
pa_sink_input_assert_ref(i);
|
||||
pa_sink_input_assert_io_context(i);
|
||||
pa_assert(PA_SINK_INPUT_IS_LINKED(i->thread_info.state));
|
||||
|
|
@ -1180,8 +1325,15 @@ void pa_sink_input_update_max_rewind(pa_sink_input *i, size_t nbytes /* in the
|
|||
|
||||
pa_memblockq_set_maxrewind(i->thread_info.render_memblockq, nbytes);
|
||||
|
||||
max_rewind = pa_resampler_request(i->thread_info.resampler, nbytes);
|
||||
/* Calculate maximum history needed */
|
||||
resampler_history = pa_resampler_get_max_history(i->thread_info.resampler);
|
||||
resampler_history *= pa_frame_size(&i->sample_spec);
|
||||
|
||||
pa_memblockq_set_maxrewind(i->thread_info.history_memblockq, max_rewind + resampler_history);
|
||||
|
||||
if (i->update_max_rewind)
|
||||
i->update_max_rewind(i, i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, nbytes) : nbytes);
|
||||
i->update_max_rewind(i, max_rewind);
|
||||
}
|
||||
|
||||
/* Called from thread context */
|
||||
|
|
@ -1192,7 +1344,7 @@ void pa_sink_input_update_max_request(pa_sink_input *i, size_t nbytes /* in the
|
|||
pa_assert(pa_frame_aligned(nbytes, &i->sink->sample_spec));
|
||||
|
||||
if (i->update_max_request)
|
||||
i->update_max_request(i, i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, nbytes) : nbytes);
|
||||
i->update_max_request(i, pa_resampler_request(i->thread_info.resampler, nbytes));
|
||||
}
|
||||
|
||||
/* Called from thread context */
|
||||
|
|
@ -1754,6 +1906,11 @@ int pa_sink_input_start_move(pa_sink_input *i) {
|
|||
|
||||
pa_cvolume_remap(&i->volume_factor_sink, &i->sink->channel_map, &i->channel_map);
|
||||
|
||||
/* Calculate how much of the latency was rewound on the old sink */
|
||||
i->origin_rewind_bytes = pa_sink_get_last_rewind(i->sink) / pa_frame_size(&i->sink->sample_spec);
|
||||
i->origin_rewind_bytes = i->origin_rewind_bytes * i->sample_spec.rate / i->sink->sample_spec.rate;
|
||||
i->origin_rewind_bytes *= pa_frame_size(&i->sample_spec);
|
||||
|
||||
i->sink = NULL;
|
||||
i->sink_requested_by_application = false;
|
||||
|
||||
|
|
@ -1904,6 +2061,92 @@ static void set_preferred_sink(pa_sink_input *i, const char *sink_name) {
|
|||
pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_PREFERRED_SINK_CHANGED], i);
|
||||
}
|
||||
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
/* Restores the render memblockq from the history memblockq during a move.
|
||||
* Called from main context while the sink input is detached. */
|
||||
static void restore_render_memblockq(pa_sink_input *i) {
|
||||
size_t block_size, to_push;
|
||||
size_t latency_bytes = 0;
|
||||
size_t bytes_on_origin_sink = 0;
|
||||
size_t resampler_delay_bytes = 0;
|
||||
|
||||
/* Calculate how much of the latency was left on the old sink */
|
||||
latency_bytes = pa_usec_to_bytes(i->thread_info.origin_sink_latency, &i->sample_spec);
|
||||
if (latency_bytes > i->origin_rewind_bytes)
|
||||
bytes_on_origin_sink = latency_bytes - i->origin_rewind_bytes;
|
||||
|
||||
/* Get resampler latency of old resampler */
|
||||
resampler_delay_bytes = i->thread_info.resampler_delay_frames * pa_frame_size(&i->sample_spec);
|
||||
|
||||
/* Flush the render memblockq and reset the resampler */
|
||||
pa_memblockq_flush_write(i->thread_info.render_memblockq, true);
|
||||
if (i->thread_info.resampler)
|
||||
pa_resampler_reset(i->thread_info.resampler);
|
||||
|
||||
/* Rewind the history queue */
|
||||
if (i->origin_rewind_bytes + resampler_delay_bytes > 0)
|
||||
pa_memblockq_rewind(i->thread_info.history_memblockq, i->origin_rewind_bytes + resampler_delay_bytes);
|
||||
|
||||
/* If something is left playing on the origin sink, add silence to the render memblockq */
|
||||
if (bytes_on_origin_sink > 0) {
|
||||
pa_memchunk chunk;;
|
||||
|
||||
chunk.length = pa_resampler_result(i->thread_info.resampler, bytes_on_origin_sink);
|
||||
if (chunk.length > 0) {
|
||||
chunk.memblock = pa_memblock_new(i->core->mempool, chunk.length);
|
||||
chunk.index = 0;
|
||||
pa_silence_memchunk(&chunk, &i->sink->sample_spec);
|
||||
pa_memblockq_push(i->thread_info.render_memblockq, &chunk);
|
||||
pa_memblock_unref(chunk.memblock);
|
||||
}
|
||||
}
|
||||
|
||||
/* Determine maximum block size */
|
||||
if (i->thread_info.resampler)
|
||||
block_size = pa_resampler_max_block_size(i->thread_info.resampler);
|
||||
else
|
||||
block_size = pa_frame_align(pa_mempool_block_size_max(i->core->mempool), &i->sample_spec);
|
||||
|
||||
/* Now push all the data in the history queue into the render memblockq */
|
||||
to_push = pa_memblockq_get_length(i->thread_info.history_memblockq);
|
||||
while (to_push > 0) {
|
||||
pa_memchunk in_chunk, out_chunk;
|
||||
size_t push_bytes;
|
||||
|
||||
push_bytes = block_size;
|
||||
if (to_push < block_size)
|
||||
push_bytes = to_push;
|
||||
|
||||
if (pa_memblockq_peek_fixed_size(i->thread_info.history_memblockq, push_bytes, &in_chunk) < 0) {
|
||||
pa_log_warn("Could not restore memblockq during move");
|
||||
break;
|
||||
}
|
||||
|
||||
if (i->thread_info.resampler) {
|
||||
pa_resampler_run(i->thread_info.resampler, &in_chunk, &out_chunk);
|
||||
pa_memblock_unref(in_chunk.memblock);
|
||||
} else
|
||||
out_chunk = in_chunk;
|
||||
|
||||
if (out_chunk.length > 0) {
|
||||
pa_memblockq_push(i->thread_info.render_memblockq, &out_chunk);
|
||||
pa_memblock_unref(out_chunk.memblock);
|
||||
}
|
||||
|
||||
pa_memblockq_drop(i->thread_info.history_memblockq, push_bytes);
|
||||
to_push -= push_bytes;
|
||||
}
|
||||
|
||||
/* No need to rewind the history queue here, it will be re-synchronized
|
||||
* with the render queue during the next pa_sink_input_drop() call. */
|
||||
|
||||
/* Tell the sink input not to ask the implementer to rewrite during the
|
||||
* the next rewind */
|
||||
i->thread_info.dont_rewrite = true;
|
||||
}
|
||||
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
/* Called from main context */
|
||||
int pa_sink_input_finish_move(pa_sink_input *i, pa_sink *dest, bool save) {
|
||||
struct volume_factor_entry *v;
|
||||
|
|
@ -1962,7 +2205,9 @@ int pa_sink_input_finish_move(pa_sink_input *i, pa_sink *dest, bool save) {
|
|||
if (i->state == PA_SINK_INPUT_CORKED)
|
||||
i->sink->n_corked++;
|
||||
|
||||
pa_sink_input_update_resampler(i);
|
||||
pa_sink_input_update_resampler(i, false);
|
||||
|
||||
restore_render_memblockq(i);
|
||||
|
||||
pa_sink_update_status(dest);
|
||||
|
||||
|
|
@ -1973,6 +2218,9 @@ int pa_sink_input_finish_move(pa_sink_input *i, pa_sink *dest, bool save) {
|
|||
|
||||
pa_assert_se(pa_asyncmsgq_send(i->sink->asyncmsgq, PA_MSGOBJECT(i->sink), PA_SINK_MESSAGE_FINISH_MOVE, i, 0, NULL) == 0);
|
||||
|
||||
/* Reset move variable */
|
||||
i->origin_rewind_bytes = 0;
|
||||
|
||||
pa_log_debug("Successfully moved sink input %i to %s.", i->index, dest->name);
|
||||
|
||||
/* Notify everyone */
|
||||
|
|
@ -2112,6 +2360,7 @@ int pa_sink_input_process_msg(pa_msgobject *o, int code, void *userdata, int64_t
|
|||
pa_usec_t *r = userdata;
|
||||
|
||||
r[0] += pa_bytes_to_usec(pa_memblockq_get_length(i->thread_info.render_memblockq), &i->sink->sample_spec);
|
||||
r[0] += pa_resampler_get_delay_usec(i->thread_info.resampler);
|
||||
r[1] += pa_sink_get_latency_within_thread(i->sink, false);
|
||||
|
||||
return 0;
|
||||
|
|
@ -2216,14 +2465,31 @@ void pa_sink_input_request_rewind(
|
|||
/* Check if rewinding for the maximum is requested, and if so, fix up */
|
||||
if (nbytes <= 0) {
|
||||
|
||||
/* Calculate maximum number of bytes that could be rewound in theory */
|
||||
nbytes = i->sink->thread_info.max_rewind + lbq;
|
||||
/* Calculate maximum number of bytes that could be rewound in theory.
|
||||
* If the sink has a virtual sink attached, limit rewinding to max_rewind.
|
||||
*
|
||||
* The max_rewind value of a virtual sink depends on the rewinding capability
|
||||
* of its DSP code. The DSP code is rewound in the process_rewind() callback
|
||||
* of the sink input. Therefore rewinding must be limited to max_rewind here. */
|
||||
nbytes = i->sink->thread_info.max_rewind;
|
||||
if (!pa_sink_has_filter_attached(i->sink) && !pa_sink_is_filter(i->sink))
|
||||
nbytes += lbq;
|
||||
|
||||
/* Transform from sink domain */
|
||||
if (i->thread_info.resampler)
|
||||
nbytes = pa_resampler_request(i->thread_info.resampler, nbytes);
|
||||
nbytes = pa_resampler_request(i->thread_info.resampler, nbytes);
|
||||
}
|
||||
|
||||
/* For virtual sinks there are two situations where nbytes may exceed max_rewind:
|
||||
* 1) If an underrun was detected.
|
||||
* 2) When the sink input is rewound during a move when it is attached to
|
||||
* the destination sink.
|
||||
* Moving a sink input is handled without involving the implementer, so the
|
||||
* implementer will only be asked to rewind more than max_rewind if an
|
||||
* underrun occurs. In that case, the DSP code of virtual sinks should be
|
||||
* reset instead of rewound. Therefore the rewind function of filters should
|
||||
* check if the requested rewind exceeds the maximum possible rewind of the
|
||||
* filter. */
|
||||
|
||||
/* Remember how much we actually want to rewrite */
|
||||
if (i->thread_info.rewrite_nbytes != (size_t) -1) {
|
||||
if (rewrite) {
|
||||
|
|
@ -2247,8 +2513,7 @@ void pa_sink_input_request_rewind(
|
|||
if (nbytes != (size_t) -1) {
|
||||
|
||||
/* Transform to sink domain */
|
||||
if (i->thread_info.resampler)
|
||||
nbytes = pa_resampler_result(i->thread_info.resampler, nbytes);
|
||||
nbytes = pa_resampler_result(i->thread_info.resampler, nbytes);
|
||||
|
||||
if (nbytes > lbq)
|
||||
pa_sink_request_rewind(i->sink, nbytes - lbq);
|
||||
|
|
@ -2308,7 +2573,7 @@ finish:
|
|||
/* Called from main context */
|
||||
/* Updates the sink input's resampler with whatever the current sink requires
|
||||
* -- useful when the underlying sink's sample spec might have changed */
|
||||
int pa_sink_input_update_resampler(pa_sink_input *i) {
|
||||
int pa_sink_input_update_resampler(pa_sink_input *i, bool flush_history) {
|
||||
pa_resampler *new_resampler;
|
||||
char *memblockq_name;
|
||||
|
||||
|
|
@ -2345,6 +2610,9 @@ int pa_sink_input_update_resampler(pa_sink_input *i) {
|
|||
} else
|
||||
new_resampler = NULL;
|
||||
|
||||
if (flush_history)
|
||||
pa_memblockq_flush_write(i->thread_info.history_memblockq, true);
|
||||
|
||||
if (new_resampler == i->thread_info.resampler)
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -156,6 +156,13 @@ struct pa_sink_input {
|
|||
* changes. Called from IO context. */
|
||||
void (*update_max_rewind) (pa_sink_input *i, size_t nbytes); /* may be NULL */
|
||||
|
||||
/* Called whenever the maximum rewindable size of the sink
|
||||
* changes. Used by virtual sinks to communicate rewind limits
|
||||
* of the virtual sink to the master sink. Must return size_t (-1)
|
||||
* if there is no limit or if the virtual sink is not opened.
|
||||
* Called from IO context. */
|
||||
size_t (*get_max_rewind_limit) (pa_sink_input *i); /* may be NULL */
|
||||
|
||||
/* Called whenever the maximum request size of the sink
|
||||
* changes. Called from IO context. */
|
||||
void (*update_max_request) (pa_sink_input *i, size_t nbytes); /* may be NULL */
|
||||
|
|
@ -231,6 +238,9 @@ struct pa_sink_input {
|
|||
* mute status changes. Called from main context */
|
||||
void (*mute_changed)(pa_sink_input *i); /* may be NULL */
|
||||
|
||||
/* Used to store the rewind amount of the origin sink during a move */
|
||||
size_t origin_rewind_bytes; /* In sink input sample spec */
|
||||
|
||||
struct {
|
||||
pa_sink_input_state_t state;
|
||||
|
||||
|
|
@ -252,11 +262,21 @@ struct pa_sink_input {
|
|||
/* We maintain a history of resampled audio data here. */
|
||||
pa_memblockq *render_memblockq;
|
||||
|
||||
/* This queue keeps the history before resampling and is used
|
||||
* when rewinding the resampler. */
|
||||
pa_memblockq *history_memblockq;
|
||||
|
||||
pa_sink_input *sync_prev, *sync_next;
|
||||
|
||||
/* The requested latency for the sink */
|
||||
pa_usec_t requested_sink_latency;
|
||||
|
||||
/* Variables used during move */
|
||||
pa_usec_t move_start_time;
|
||||
pa_usec_t origin_sink_latency;
|
||||
size_t resampler_delay_frames;
|
||||
bool dont_rewrite;
|
||||
|
||||
pa_hashmap *direct_outputs;
|
||||
} thread_info;
|
||||
|
||||
|
|
@ -361,7 +381,7 @@ void pa_sink_input_request_rewind(pa_sink_input *i, size_t nbytes, bool rewrite,
|
|||
void pa_sink_input_cork(pa_sink_input *i, bool b);
|
||||
|
||||
int pa_sink_input_set_rate(pa_sink_input *i, uint32_t rate);
|
||||
int pa_sink_input_update_resampler(pa_sink_input *i);
|
||||
int pa_sink_input_update_resampler(pa_sink_input *i, bool flush_history);
|
||||
|
||||
/* This returns the sink's fields converted into out sample type */
|
||||
size_t pa_sink_input_get_max_rewind(pa_sink_input *i);
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@
|
|||
#include <pulsecore/namereg.h>
|
||||
#include <pulsecore/core-util.h>
|
||||
#include <pulsecore/sample-util.h>
|
||||
#include <pulsecore/stream-util.h>
|
||||
#include <pulsecore/mix.h>
|
||||
#include <pulsecore/core-subscribe.h>
|
||||
#include <pulsecore/log.h>
|
||||
|
|
@ -338,6 +339,7 @@ pa_sink* pa_sink_new(
|
|||
s->thread_info.soft_muted = s->muted;
|
||||
s->thread_info.state = s->state;
|
||||
s->thread_info.rewind_nbytes = 0;
|
||||
s->thread_info.last_rewind_nbytes = 0;
|
||||
s->thread_info.rewind_requested = false;
|
||||
s->thread_info.max_rewind = 0;
|
||||
s->thread_info.max_request = 0;
|
||||
|
|
@ -1014,20 +1016,29 @@ size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
|
|||
if (i->origin_sink) {
|
||||
size_t filter_result, left_to_play_origin;
|
||||
|
||||
/* The recursive call works in the origin sink domain ... */
|
||||
left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
|
||||
/* The combine sink sets i->origin sink but has a different threading model
|
||||
* than the filter sinks. Therefore the recursion below may not be executed
|
||||
* because pa_sink_process_input_underruns() was not called in the thread
|
||||
* context of the origin sink.
|
||||
* FIXME: It is unclear if some other kind of recursion would be necessary
|
||||
* for the combine sink. */
|
||||
if (!i->module || !pa_safe_streq(i->module->name, "module-combine-sink")) {
|
||||
|
||||
/* .. and returns the time to sleep before waking up. We need the
|
||||
* underrun duration for comparisons, so we undo the subtraction on
|
||||
* the return value... */
|
||||
filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
|
||||
/* The recursive call works in the origin sink domain ... */
|
||||
left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
|
||||
|
||||
/* ... and convert it back to the master sink domain */
|
||||
filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
|
||||
/* .. and returns the time to sleep before waking up. We need the
|
||||
* underrun duration for comparisons, so we undo the subtraction on
|
||||
* the return value... */
|
||||
filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
|
||||
|
||||
/* Remember the longest underrun so far */
|
||||
if (filter_result > result)
|
||||
result = filter_result;
|
||||
/* ... and convert it back to the master sink domain */
|
||||
filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
|
||||
|
||||
/* Remember the longest underrun so far */
|
||||
if (filter_result > result)
|
||||
result = filter_result;
|
||||
}
|
||||
}
|
||||
|
||||
if (uf == 0) {
|
||||
|
|
@ -1073,6 +1084,9 @@ void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
|
|||
pa_sink_volume_change_rewind(s, nbytes);
|
||||
}
|
||||
|
||||
/* Save rewind value */
|
||||
s->thread_info.last_rewind_nbytes = nbytes;
|
||||
|
||||
PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
|
||||
pa_sink_input_assert_ref(i);
|
||||
pa_sink_input_process_rewind(i, nbytes);
|
||||
|
|
@ -1557,12 +1571,25 @@ void pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
|
|||
|
||||
PA_IDXSET_FOREACH(i, s->inputs, idx) {
|
||||
if (i->state == PA_SINK_INPUT_CORKED)
|
||||
pa_sink_input_update_resampler(i);
|
||||
pa_sink_input_update_resampler(i, true);
|
||||
}
|
||||
|
||||
pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
|
||||
}
|
||||
|
||||
/* Called from main thread */
|
||||
size_t pa_sink_get_last_rewind(pa_sink *s) {
|
||||
size_t rewind_bytes;
|
||||
|
||||
pa_sink_assert_ref(s);
|
||||
pa_assert_ctl_context();
|
||||
pa_assert(PA_SINK_IS_LINKED(s->state));
|
||||
|
||||
pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LAST_REWIND, &rewind_bytes, 0, NULL) == 0);
|
||||
|
||||
return rewind_bytes;
|
||||
}
|
||||
|
||||
/* Called from main thread */
|
||||
pa_usec_t pa_sink_get_latency(pa_sink *s) {
|
||||
int64_t usec = 0;
|
||||
|
|
@ -1639,6 +1666,27 @@ bool pa_sink_flat_volume_enabled(pa_sink *s) {
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Check if the sink has a virtual sink attached.
|
||||
* Called from the IO thread. */
|
||||
bool pa_sink_has_filter_attached(pa_sink *s) {
|
||||
bool vsink_attached = false;
|
||||
void *state = NULL;
|
||||
pa_sink_input *i;
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
if (PA_SINK_IS_LINKED(s->thread_info.state)) {
|
||||
PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
|
||||
if (!i->origin_sink)
|
||||
continue;
|
||||
|
||||
vsink_attached = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return vsink_attached;
|
||||
}
|
||||
|
||||
/* Called from the main thread (and also from the IO thread while the main
|
||||
* thread is waiting). */
|
||||
pa_sink *pa_sink_get_master(pa_sink *s) {
|
||||
|
|
@ -2555,6 +2603,40 @@ static void set_shared_volume_within_thread(pa_sink *s) {
|
|||
}
|
||||
}
|
||||
|
||||
/* Called from IO thread. Gets max_rewind limit from sink inputs.
|
||||
* This function is used to communicate the max_rewind value of a
|
||||
* virtual sink to the master sink. The get_max_rewind_limit()
|
||||
* callback is implemented by sink inputs connecting a virtual
|
||||
* sink to its master. */
|
||||
static size_t get_max_rewind_limit(pa_sink *s, size_t requested_limit) {
|
||||
pa_sink_input *i;
|
||||
void *state = NULL;
|
||||
size_t rewind_limit;
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
/* Get rewind limit in sink sample spec from sink inputs */
|
||||
rewind_limit = (size_t)(-1);
|
||||
if (PA_SINK_IS_LINKED(s->thread_info.state)) {
|
||||
PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
|
||||
|
||||
if (i->get_max_rewind_limit) {
|
||||
size_t limit;
|
||||
|
||||
limit = i->get_max_rewind_limit(i);
|
||||
if (rewind_limit == (size_t)(-1) || rewind_limit > limit)
|
||||
rewind_limit = limit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Set max_rewind */
|
||||
if (rewind_limit != (size_t)(-1))
|
||||
requested_limit = PA_MIN(rewind_limit, requested_limit);
|
||||
|
||||
return requested_limit;
|
||||
}
|
||||
|
||||
/* Called from IO thread, except when it is not */
|
||||
int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
|
||||
pa_sink *s = PA_SINK(o);
|
||||
|
|
@ -2651,8 +2733,8 @@ int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offse
|
|||
}
|
||||
|
||||
pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
|
||||
pa_sink_invalidate_requested_latency(s, true);
|
||||
pa_sink_request_rewind(s, (size_t) -1);
|
||||
pa_sink_invalidate_requested_latency(s, true);
|
||||
|
||||
/* In flat volume mode we need to update the volume as
|
||||
* well */
|
||||
|
|
@ -2669,59 +2751,21 @@ int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offse
|
|||
pa_assert(!i->thread_info.sync_prev);
|
||||
|
||||
if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
|
||||
pa_usec_t usec = 0;
|
||||
size_t sink_nbytes, total_nbytes;
|
||||
|
||||
/* The old sink probably has some audio from this
|
||||
* stream in its buffer. We want to "take it back" as
|
||||
* much as possible and play it to the new sink. We
|
||||
* don't know at this point how much the old sink can
|
||||
* rewind. We have to pick something, and that
|
||||
* something is the full latency of the old sink here.
|
||||
* So we rewind the stream buffer by the sink latency
|
||||
* amount, which may be more than what we should
|
||||
* rewind. This can result in a chunk of audio being
|
||||
* played both to the old sink and the new sink.
|
||||
*
|
||||
* FIXME: Fix this code so that we don't have to make
|
||||
* guesses about how much the sink will actually be
|
||||
* able to rewind. If someone comes up with a solution
|
||||
* for this, something to note is that the part of the
|
||||
* latency that the old sink couldn't rewind should
|
||||
* ideally be compensated after the stream has moved
|
||||
* to the new sink by adding silence. The new sink
|
||||
* most likely can't start playing the moved stream
|
||||
* immediately, and that gap should be removed from
|
||||
* the "compensation silence" (at least at the time of
|
||||
* writing this, the move finish code will actually
|
||||
* already take care of dropping the new sink's
|
||||
* unrewindable latency, so taking into account the
|
||||
* unrewindable latency of the old sink is the only
|
||||
* problem).
|
||||
*
|
||||
* The render_memblockq contents are discarded,
|
||||
* because when the sink changes, the format of the
|
||||
* audio stored in the render_memblockq may change
|
||||
* too, making the stored audio invalid. FIXME:
|
||||
* However, the read and write indices are moved back
|
||||
* the same amount, so if they are not the same now,
|
||||
* they won't be the same after the rewind either. If
|
||||
* the write index of the render_memblockq is ahead of
|
||||
* the read index, then the render_memblockq will feed
|
||||
* the new sink some silence first, which it shouldn't
|
||||
* do. The write index should be flushed to be the
|
||||
* same as the read index. */
|
||||
* rewind, so we just save some values and reconstruct
|
||||
* the render memblockq in finish_move(). */
|
||||
|
||||
/* Get the latency of the sink */
|
||||
usec = pa_sink_get_latency_within_thread(s, false);
|
||||
sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
|
||||
total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
|
||||
|
||||
if (total_nbytes > 0) {
|
||||
i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
|
||||
i->thread_info.rewrite_flush = true;
|
||||
pa_sink_input_process_rewind(i, sink_nbytes);
|
||||
}
|
||||
/* Save some current values for restore_render_memblockq() */
|
||||
i->thread_info.origin_sink_latency = pa_sink_get_latency_within_thread(s, false);
|
||||
i->thread_info.move_start_time = pa_rtclock_now();
|
||||
i->thread_info.resampler_delay_frames = 0;
|
||||
if (i->thread_info.resampler)
|
||||
/* Round down */
|
||||
i->thread_info.resampler_delay_frames = pa_resampler_get_delay(i->thread_info.resampler, false);
|
||||
}
|
||||
|
||||
pa_sink_input_detach(i);
|
||||
|
|
@ -2729,11 +2773,13 @@ int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offse
|
|||
/* Let's remove the sink input ...*/
|
||||
pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
|
||||
|
||||
pa_sink_invalidate_requested_latency(s, true);
|
||||
|
||||
/* The rewind must be requested before invalidating the latency, otherwise
|
||||
* the max_rewind value of the sink may change before the rewind. */
|
||||
pa_log_debug("Requesting rewind due to started move");
|
||||
pa_sink_request_rewind(s, (size_t) -1);
|
||||
|
||||
pa_sink_invalidate_requested_latency(s, true);
|
||||
|
||||
/* In flat volume mode we need to update the volume as
|
||||
* well */
|
||||
return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
|
||||
|
|
@ -2754,7 +2800,7 @@ int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offse
|
|||
|
||||
if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
|
||||
pa_usec_t usec = 0;
|
||||
size_t nbytes;
|
||||
size_t nbytes, delay_bytes;
|
||||
|
||||
/* In the ideal case the new sink would start playing
|
||||
* the stream immediately. That requires the sink to
|
||||
|
|
@ -2778,8 +2824,20 @@ int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offse
|
|||
usec = pa_sink_get_latency_within_thread(s, false);
|
||||
nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
|
||||
|
||||
if (nbytes > 0)
|
||||
pa_sink_input_drop(i, nbytes);
|
||||
/* Calculate number of samples that have been played during the move */
|
||||
delay_bytes = 0;
|
||||
if (i->thread_info.move_start_time > 0) {
|
||||
usec = pa_rtclock_now() - i->thread_info.move_start_time;
|
||||
pa_log_debug("Move took %lu usec", usec);
|
||||
delay_bytes = pa_usec_to_bytes(usec, &s->sample_spec);
|
||||
}
|
||||
|
||||
/* max_rewind must be updated for the sink input because otherwise
|
||||
* the data in the render memblockq will get lost */
|
||||
pa_sink_input_update_max_rewind(i, nbytes);
|
||||
|
||||
if (nbytes + delay_bytes > 0)
|
||||
pa_sink_input_drop(i, nbytes + delay_bytes);
|
||||
|
||||
pa_log_debug("Requesting rewind due to finished move");
|
||||
pa_sink_request_rewind(s, nbytes);
|
||||
|
|
@ -2796,6 +2854,11 @@ int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offse
|
|||
pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
|
||||
pa_sink_input_update_max_request(i, s->thread_info.max_request);
|
||||
|
||||
/* Reset move variables */
|
||||
i->thread_info.move_start_time = 0;
|
||||
i->thread_info.resampler_delay_frames = 0;
|
||||
i->thread_info.origin_sink_latency = 0;
|
||||
|
||||
return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
|
||||
}
|
||||
|
||||
|
|
@ -2942,6 +3005,11 @@ int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offse
|
|||
*((size_t*) userdata) = s->thread_info.max_rewind;
|
||||
return 0;
|
||||
|
||||
case PA_SINK_MESSAGE_GET_LAST_REWIND:
|
||||
|
||||
*((size_t*) userdata) = s->thread_info.last_rewind_nbytes;
|
||||
return 0;
|
||||
|
||||
case PA_SINK_MESSAGE_GET_MAX_REQUEST:
|
||||
|
||||
*((size_t*) userdata) = s->thread_info.max_request;
|
||||
|
|
@ -3118,6 +3186,8 @@ void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
|
|||
pa_sink_assert_ref(s);
|
||||
pa_sink_assert_io_context(s);
|
||||
|
||||
max_rewind = get_max_rewind_limit(s, max_rewind);
|
||||
|
||||
if (max_rewind == s->thread_info.max_rewind)
|
||||
return;
|
||||
|
||||
|
|
|
|||
|
|
@ -299,6 +299,9 @@ struct pa_sink {
|
|||
size_t rewind_nbytes;
|
||||
bool rewind_requested;
|
||||
|
||||
/* Size of last rewind */
|
||||
size_t last_rewind_nbytes;
|
||||
|
||||
/* Both dynamic and fixed latencies will be clamped to this
|
||||
* range. */
|
||||
pa_usec_t min_latency; /* we won't go below this latency */
|
||||
|
|
@ -359,6 +362,7 @@ typedef enum pa_sink_message {
|
|||
PA_SINK_MESSAGE_SET_MAX_REQUEST,
|
||||
PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE,
|
||||
PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET,
|
||||
PA_SINK_MESSAGE_GET_LAST_REWIND,
|
||||
PA_SINK_MESSAGE_MAX
|
||||
} pa_sink_message_t;
|
||||
|
||||
|
|
@ -456,6 +460,7 @@ void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *ma
|
|||
pa_usec_t pa_sink_get_fixed_latency(pa_sink *s);
|
||||
|
||||
size_t pa_sink_get_max_rewind(pa_sink *s);
|
||||
size_t pa_sink_get_last_rewind(pa_sink *s);
|
||||
size_t pa_sink_get_max_request(pa_sink *s);
|
||||
|
||||
int pa_sink_update_status(pa_sink*s);
|
||||
|
|
@ -465,6 +470,10 @@ int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause);
|
|||
/* Use this instead of checking s->flags & PA_SINK_FLAT_VOLUME directly. */
|
||||
bool pa_sink_flat_volume_enabled(pa_sink *s);
|
||||
|
||||
/* Check if the sink has a virtual sink attached.
|
||||
* Called from the IO thread. */
|
||||
bool pa_sink_has_filter_attached(pa_sink *s);
|
||||
|
||||
/* Get the master sink when sharing volumes */
|
||||
pa_sink *pa_sink_get_master(pa_sink *s);
|
||||
|
||||
|
|
|
|||
|
|
@ -51,11 +51,11 @@ int pa_sndfile_read_sample_spec(SNDFILE *sf, pa_sample_spec *ss) {
|
|||
ss->format = PA_SAMPLE_S16NE;
|
||||
break;
|
||||
|
||||
case SF_FORMAT_PCM_24:
|
||||
ss->format = PA_SAMPLE_S24NE;
|
||||
break;
|
||||
|
||||
case SF_FORMAT_PCM_32:
|
||||
case SF_FORMAT_PCM_24:
|
||||
/* note that libsndfile will convert 24 bits samples to 32 bits
|
||||
* when using the sf_readf_int function, which will be selected
|
||||
* by setting the format to s32. */
|
||||
ss->format = PA_SAMPLE_S32NE;
|
||||
break;
|
||||
|
||||
|
|
|
|||
|
|
@ -642,3 +642,83 @@ char *pa_socket_server_get_address(pa_socket_server *s, char *c, size_t l) {
|
|||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef HAVE_SYS_UN_H
|
||||
|
||||
int pa_unix_socket_is_stale(const char *fn) {
|
||||
struct sockaddr_un sa;
|
||||
int fd = -1, ret = -1;
|
||||
|
||||
pa_assert(fn);
|
||||
|
||||
if ((fd = pa_socket_cloexec(PF_UNIX, SOCK_STREAM, 0)) < 0) {
|
||||
pa_log("socket(): %s", pa_cstrerror(errno));
|
||||
goto finish;
|
||||
}
|
||||
|
||||
sa.sun_family = AF_UNIX;
|
||||
strncpy(sa.sun_path, fn, sizeof(sa.sun_path)-1);
|
||||
sa.sun_path[sizeof(sa.sun_path) - 1] = 0;
|
||||
|
||||
if (connect(fd, (struct sockaddr*) &sa, sizeof(sa)) < 0) {
|
||||
#if !defined(OS_IS_WIN32)
|
||||
if (errno == ECONNREFUSED)
|
||||
ret = 1;
|
||||
#else
|
||||
if (WSAGetLastError() == WSAECONNREFUSED || WSAGetLastError() == WSAEINVAL)
|
||||
ret = 1;
|
||||
#endif
|
||||
} else
|
||||
ret = 0;
|
||||
|
||||
finish:
|
||||
if (fd >= 0)
|
||||
pa_close(fd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int pa_unix_socket_remove_stale(const char *fn) {
|
||||
int r;
|
||||
|
||||
pa_assert(fn);
|
||||
|
||||
#ifdef HAVE_SYSTEMD_DAEMON
|
||||
{
|
||||
int n = sd_listen_fds(0);
|
||||
if (n > 0) {
|
||||
for (int i = 0; i < n; ++i) {
|
||||
if (sd_is_socket_unix(SD_LISTEN_FDS_START + i, SOCK_STREAM, 1, fn, 0) > 0) {
|
||||
/* This is a socket activated socket, therefore do not consider
|
||||
* it stale. */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if ((r = pa_unix_socket_is_stale(fn)) < 0)
|
||||
return errno != ENOENT ? -1 : 0;
|
||||
|
||||
if (!r)
|
||||
return 0;
|
||||
|
||||
/* Yes, here is a race condition. But who cares? */
|
||||
if (unlink(fn) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* HAVE_SYS_UN_H */
|
||||
|
||||
int pa_unix_socket_is_stale(const char *fn) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int pa_unix_socket_remove_stale(const char *fn) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
#endif /* HAVE_SYS_UN_H */
|
||||
|
|
|
|||
|
|
@ -50,4 +50,7 @@ void pa_socket_server_set_callback(pa_socket_server*s, pa_socket_server_on_conne
|
|||
|
||||
char *pa_socket_server_get_address(pa_socket_server *s, char *c, size_t l);
|
||||
|
||||
int pa_unix_socket_is_stale(const char *fn);
|
||||
int pa_unix_socket_remove_stale(const char *fn);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -50,9 +50,6 @@
|
|||
#ifdef HAVE_NETDB_H
|
||||
#include <netdb.h>
|
||||
#endif
|
||||
#ifdef HAVE_SYSTEMD_DAEMON
|
||||
#include <systemd/sd-daemon.h>
|
||||
#endif
|
||||
|
||||
#include <pulsecore/core-error.h>
|
||||
#include <pulsecore/core-util.h>
|
||||
|
|
@ -221,6 +218,7 @@ int pa_socket_set_sndbuf(int fd, size_t l) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
<<<<<<< HEAD
|
||||
#ifdef HAVE_SYS_UN_H
|
||||
|
||||
int pa_unix_socket_is_stale(const char *fn) {
|
||||
|
|
@ -301,6 +299,8 @@ int pa_unix_socket_remove_stale(const char *fn) {
|
|||
|
||||
#endif /* HAVE_SYS_UN_H */
|
||||
|
||||
=======
|
||||
>>>>>>> c1990dd02647405b0c13aab59f75d05cbb202336
|
||||
bool pa_socket_address_is_local(const struct sockaddr *sa) {
|
||||
pa_assert(sa);
|
||||
|
||||
|
|
|
|||
|
|
@ -35,9 +35,6 @@ void pa_make_udp_socket_low_delay(int fd);
|
|||
int pa_socket_set_sndbuf(int fd, size_t l);
|
||||
int pa_socket_set_rcvbuf(int fd, size_t l);
|
||||
|
||||
int pa_unix_socket_is_stale(const char *fn);
|
||||
int pa_unix_socket_remove_stale(const char *fn);
|
||||
|
||||
bool pa_socket_address_is_local(const struct sockaddr *sa);
|
||||
bool pa_socket_is_local(int fd);
|
||||
|
||||
|
|
|
|||
|
|
@ -185,7 +185,7 @@ static int sink_input_pop_cb(pa_sink_input *i, size_t length, pa_memchunk *chunk
|
|||
|
||||
tchunk.length = (size_t) n * fs;
|
||||
|
||||
pa_memblockq_push(u->memblockq, &tchunk);
|
||||
pa_memblockq_push_align(u->memblockq, &tchunk);
|
||||
pa_memblock_unref(tchunk.memblock);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@
|
|||
#include <pulse/xmalloc.h>
|
||||
#include <pulse/util.h>
|
||||
#include <pulse/internal.h>
|
||||
#include <pulse/timeval.h>
|
||||
|
||||
#include <pulsecore/core-format.h>
|
||||
#include <pulsecore/mix.h>
|
||||
|
|
@ -237,6 +238,7 @@ int pa_source_output_new(
|
|||
pa_channel_map volume_map;
|
||||
int r;
|
||||
char *pt;
|
||||
size_t resampler_history;
|
||||
|
||||
pa_assert(_o);
|
||||
pa_assert(core);
|
||||
|
|
@ -499,6 +501,11 @@ int pa_source_output_new(
|
|||
0,
|
||||
&o->source->silence);
|
||||
|
||||
resampler_history = (uint64_t) PA_RESAMPLER_MAX_DELAY_USEC * o->source->sample_spec.rate / PA_USEC_PER_SEC;
|
||||
resampler_history *= pa_frame_size(&o->source->sample_spec);
|
||||
|
||||
pa_memblockq_set_maxrewind(o->thread_info.delay_memblockq, resampler_history + pa_source_get_max_rewind(o->source));
|
||||
|
||||
pa_assert_se(pa_idxset_put(core->source_outputs, o, &o->index) == 0);
|
||||
pa_assert_se(pa_idxset_put(o->source->outputs, pa_source_output_ref(o), NULL) == 0);
|
||||
|
||||
|
|
@ -865,21 +872,36 @@ void pa_source_output_process_rewind(pa_source_output *o, size_t nbytes /* in so
|
|||
return;
|
||||
|
||||
if (o->process_rewind) {
|
||||
pa_assert(pa_memblockq_get_length(o->thread_info.delay_memblockq) == 0);
|
||||
size_t source_output_nbytes;
|
||||
size_t length;
|
||||
|
||||
if (o->thread_info.resampler)
|
||||
nbytes = pa_resampler_result(o->thread_info.resampler, nbytes);
|
||||
/* The length of the memblockq may be non-zero if pa_source_output_rewind() is called twice
|
||||
* without pa_source_output_push() called in between. In that case, the resampler has already
|
||||
* been reset and we can skip that part. */
|
||||
length = pa_memblockq_get_length(o->thread_info.delay_memblockq);
|
||||
|
||||
pa_log_debug("Have to rewind %lu bytes on implementor.", (unsigned long) nbytes);
|
||||
pa_memblockq_rewind(o->thread_info.delay_memblockq, nbytes);
|
||||
|
||||
if (nbytes > 0)
|
||||
o->process_rewind(o, nbytes);
|
||||
source_output_nbytes = pa_resampler_result(o->thread_info.resampler, nbytes);
|
||||
|
||||
if (o->thread_info.resampler)
|
||||
pa_resampler_rewind(o->thread_info.resampler, nbytes);
|
||||
pa_log_debug("Have to rewind %lu bytes on implementor.", (unsigned long) source_output_nbytes);
|
||||
|
||||
} else
|
||||
pa_memblockq_seek(o->thread_info.delay_memblockq, - ((int64_t) nbytes), PA_SEEK_RELATIVE, true);
|
||||
if (source_output_nbytes > 0)
|
||||
o->process_rewind(o, source_output_nbytes);
|
||||
|
||||
if (o->thread_info.resampler && length == 0) {
|
||||
size_t resampler_bytes;
|
||||
|
||||
/* Round down to full frames */
|
||||
resampler_bytes = (size_t) pa_resampler_get_delay(o->thread_info.resampler, false) * pa_frame_size(&o->source->sample_spec);
|
||||
if (resampler_bytes > 0)
|
||||
pa_memblockq_rewind(o->thread_info.delay_memblockq, resampler_bytes);
|
||||
|
||||
pa_resampler_rewind(o->thread_info.resampler, source_output_nbytes, NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
pa_memblockq_seek(o->thread_info.delay_memblockq, - ((int64_t) nbytes), PA_SEEK_RELATIVE, true);
|
||||
}
|
||||
|
||||
/* Called from thread context */
|
||||
|
|
@ -887,18 +909,25 @@ size_t pa_source_output_get_max_rewind(pa_source_output *o) {
|
|||
pa_source_output_assert_ref(o);
|
||||
pa_source_output_assert_io_context(o);
|
||||
|
||||
return o->thread_info.resampler ? pa_resampler_request(o->thread_info.resampler, o->source->thread_info.max_rewind) : o->source->thread_info.max_rewind;
|
||||
return pa_resampler_result(o->thread_info.resampler, o->source->thread_info.max_rewind);
|
||||
}
|
||||
|
||||
/* Called from thread context */
|
||||
void pa_source_output_update_max_rewind(pa_source_output *o, size_t nbytes /* in the source's sample spec */) {
|
||||
size_t resampler_history;
|
||||
|
||||
pa_source_output_assert_ref(o);
|
||||
pa_source_output_assert_io_context(o);
|
||||
pa_assert(PA_SOURCE_OUTPUT_IS_LINKED(o->thread_info.state));
|
||||
pa_assert(pa_frame_aligned(nbytes, &o->source->sample_spec));
|
||||
|
||||
resampler_history = (uint64_t) PA_RESAMPLER_MAX_DELAY_USEC * o->source->sample_spec.rate / PA_USEC_PER_SEC;
|
||||
resampler_history *= pa_frame_size(&o->source->sample_spec);
|
||||
|
||||
pa_memblockq_set_maxrewind(o->thread_info.delay_memblockq, resampler_history + nbytes);
|
||||
|
||||
if (o->update_max_rewind)
|
||||
o->update_max_rewind(o, o->thread_info.resampler ? pa_resampler_result(o->thread_info.resampler, nbytes) : nbytes);
|
||||
o->update_max_rewind(o, pa_resampler_result(o->thread_info.resampler, nbytes));
|
||||
}
|
||||
|
||||
/* Called from thread context */
|
||||
|
|
@ -1701,6 +1730,7 @@ int pa_source_output_process_msg(pa_msgobject *mo, int code, void *userdata, int
|
|||
pa_usec_t *r = userdata;
|
||||
|
||||
r[0] += pa_bytes_to_usec(pa_memblockq_get_length(o->thread_info.delay_memblockq), &o->source->sample_spec);
|
||||
r[0] += pa_resampler_get_delay_usec(o->thread_info.resampler);
|
||||
r[1] += pa_source_get_latency_within_thread(o->source, false);
|
||||
|
||||
return 0;
|
||||
|
|
@ -1784,6 +1814,7 @@ finish:
|
|||
int pa_source_output_update_resampler(pa_source_output *o) {
|
||||
pa_resampler *new_resampler;
|
||||
char *memblockq_name;
|
||||
size_t resampler_history;
|
||||
|
||||
pa_source_output_assert_ref(o);
|
||||
pa_assert_ctl_context();
|
||||
|
|
@ -1841,6 +1872,11 @@ int pa_source_output_update_resampler(pa_source_output *o) {
|
|||
&o->source->silence);
|
||||
pa_xfree(memblockq_name);
|
||||
|
||||
resampler_history = (uint64_t) PA_RESAMPLER_MAX_DELAY_USEC * o->source->sample_spec.rate / PA_USEC_PER_SEC;
|
||||
resampler_history *= pa_frame_size(&o->source->sample_spec);
|
||||
|
||||
pa_memblockq_set_maxrewind(o->thread_info.delay_memblockq, resampler_history + pa_source_get_max_rewind(o->source));
|
||||
|
||||
o->actual_resample_method = new_resampler ? pa_resampler_get_method(new_resampler) : PA_RESAMPLER_INVALID;
|
||||
|
||||
pa_log_debug("Updated resampler for source output %d", o->index);
|
||||
|
|
|
|||
409
src/pulsecore/time-smoother_2.c
Normal file
409
src/pulsecore/time-smoother_2.c
Normal file
|
|
@ -0,0 +1,409 @@
|
|||
/***
|
||||
This file is part of PulseAudio.
|
||||
|
||||
PulseAudio is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as
|
||||
published by the Free Software Foundation; either version 2.1 of the
|
||||
License, or (at your option) any later version.
|
||||
|
||||
PulseAudio is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
|
||||
***/
|
||||
|
||||
/* The code in this file is based on the theoretical background found at
|
||||
* https://www.freedesktop.org/software/pulseaudio/misc/rate_estimator.odt.
|
||||
* The theory has never been reviewed, so it may be inaccurate in places. */
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include <config.h>
|
||||
#endif
|
||||
|
||||
#include <pulsecore/macro.h>
|
||||
#include <pulse/sample.h>
|
||||
#include <pulse/xmalloc.h>
|
||||
#include <pulse/timeval.h>
|
||||
|
||||
#include "time-smoother_2.h"
|
||||
|
||||
struct pa_smoother_2 {
|
||||
/* Values set when the smoother is created */
|
||||
pa_usec_t smoother_window_time;
|
||||
uint32_t rate;
|
||||
uint32_t frame_size;
|
||||
|
||||
/* USB hack parameters */
|
||||
bool usb_hack;
|
||||
bool enable_usb_hack;
|
||||
uint32_t hack_threshold;
|
||||
|
||||
/* Smoother state */
|
||||
bool init;
|
||||
bool paused;
|
||||
|
||||
/* Current byte count start value */
|
||||
double start_pos;
|
||||
/* System time corresponding to start_pos */
|
||||
pa_usec_t start_time;
|
||||
/* Conversion factor between time domains */
|
||||
double time_factor;
|
||||
|
||||
/* Used if the smoother is paused while still in init state */
|
||||
pa_usec_t fixup_time;
|
||||
|
||||
/* Time offset for USB devices */
|
||||
int64_t time_offset;
|
||||
|
||||
/* Various time stamps */
|
||||
pa_usec_t resume_time;
|
||||
pa_usec_t pause_time;
|
||||
pa_usec_t smoother_start_time;
|
||||
pa_usec_t last_time;
|
||||
|
||||
/* Variables used for Kalman filter */
|
||||
double time_variance;
|
||||
double time_factor_variance;
|
||||
double kalman_variance;
|
||||
|
||||
/* Variables used for low pass filter */
|
||||
double drift_filter;
|
||||
double drift_filter_1;
|
||||
};
|
||||
|
||||
/* Create new smoother */
|
||||
pa_smoother_2* pa_smoother_2_new(pa_usec_t window, pa_usec_t time_stamp, uint32_t frame_size, uint32_t rate) {
|
||||
pa_smoother_2 *s;
|
||||
|
||||
pa_assert(window > 0);
|
||||
|
||||
s = pa_xnew(pa_smoother_2, 1);
|
||||
s->enable_usb_hack = false;
|
||||
s->usb_hack = false;
|
||||
s->hack_threshold = 0;
|
||||
s->smoother_window_time = window;
|
||||
s->rate = rate;
|
||||
s->frame_size = frame_size;
|
||||
|
||||
pa_smoother_2_reset(s, time_stamp);
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
/* Free the smoother */
|
||||
void pa_smoother_2_free(pa_smoother_2* s) {
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
pa_xfree(s);
|
||||
}
|
||||
|
||||
void pa_smoother_2_set_rate(pa_smoother_2 *s, pa_usec_t time_stamp, uint32_t rate) {
|
||||
|
||||
pa_assert(s);
|
||||
pa_assert(rate > 0);
|
||||
|
||||
/* If the rate has changed, data in the smoother will be invalid,
|
||||
* therefore also reset the smoother */
|
||||
if (rate != s->rate) {
|
||||
s->rate = rate;
|
||||
pa_smoother_2_reset(s, time_stamp);
|
||||
}
|
||||
}
|
||||
|
||||
void pa_smoother_2_set_sample_spec(pa_smoother_2 *s, pa_usec_t time_stamp, pa_sample_spec *spec) {
|
||||
size_t frame_size;
|
||||
|
||||
pa_assert(s);
|
||||
pa_assert(pa_sample_spec_valid(spec));
|
||||
|
||||
/* If the sample spec has changed, data in the smoother will be invalid,
|
||||
* therefore also reset the smoother */
|
||||
frame_size = pa_frame_size(spec);
|
||||
if (frame_size != s->frame_size || spec->rate != s->rate) {
|
||||
s->frame_size = frame_size;
|
||||
s->rate = spec->rate;
|
||||
pa_smoother_2_reset(s, time_stamp);
|
||||
}
|
||||
}
|
||||
|
||||
/* Add a new data point and re-calculate time conversion factor */
|
||||
void pa_smoother_2_put(pa_smoother_2 *s, pa_usec_t time_stamp, int64_t byte_count) {
|
||||
double byte_difference, iteration_time;
|
||||
double time_delta_system, time_delta_card, drift, filter_constant, filter_constant_1;
|
||||
double temp, filtered_time_delta_card, expected_time_delta_card;
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
/* Smoother is paused, nothing to do */
|
||||
if (s->paused)
|
||||
return;
|
||||
|
||||
/* Initial setup or resume */
|
||||
if PA_UNLIKELY((s->init)) {
|
||||
s->resume_time = time_stamp;
|
||||
|
||||
/* We have no data yet, nothing to do */
|
||||
if (byte_count <= 0)
|
||||
return;
|
||||
|
||||
/* Now we are playing/recording.
|
||||
* Get fresh time stamps and save the start count */
|
||||
s->start_pos = (double)byte_count;
|
||||
s->last_time = time_stamp;
|
||||
s->start_time = time_stamp;
|
||||
s->smoother_start_time = time_stamp;
|
||||
|
||||
s->usb_hack = s->enable_usb_hack;
|
||||
s->init = false;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Duration of last iteration */
|
||||
iteration_time = (double)time_stamp - s->last_time;
|
||||
|
||||
/* Don't go backwards in time */
|
||||
if (iteration_time <= 0)
|
||||
return;
|
||||
|
||||
/* Wait at least 100 ms before starting calculations, otherwise the
|
||||
* impact of the offset error will slow down convergence */
|
||||
if (time_stamp < s->smoother_start_time + 100 * PA_USEC_PER_MSEC)
|
||||
return;
|
||||
|
||||
/* Time difference in system time domain */
|
||||
time_delta_system = time_stamp - s->start_time;
|
||||
|
||||
/* Number of bytes played since start_time */
|
||||
byte_difference = (double)byte_count - s->start_pos;
|
||||
|
||||
/* Time difference in soundcard time domain. Don't use
|
||||
* pa_bytes_to_usec() here because byte_difference need not
|
||||
* be on a sample boundary */
|
||||
time_delta_card = byte_difference / s->frame_size / s->rate * PA_USEC_PER_SEC;
|
||||
filtered_time_delta_card = time_delta_card;
|
||||
|
||||
/* Prediction of measurement */
|
||||
expected_time_delta_card = time_delta_system * s->time_factor;
|
||||
|
||||
/* Filtered variance of card time measurements */
|
||||
s->time_variance = 0.9 * s->time_variance + 0.1 * (time_delta_card - expected_time_delta_card) * (time_delta_card - expected_time_delta_card);
|
||||
|
||||
/* Kalman filter, will only be used when the time factor has converged good enough,
|
||||
* the value of 100 corresponds to a change rate of approximately 10e-6 per second. */
|
||||
if (s->time_factor_variance < 100) {
|
||||
filtered_time_delta_card = (time_delta_card * s->kalman_variance + expected_time_delta_card * s->time_variance) / (s->kalman_variance + s->time_variance);
|
||||
s->kalman_variance = s->kalman_variance * s->time_variance / (s->kalman_variance + s->time_variance) + s->time_variance / 4 + 500;
|
||||
}
|
||||
|
||||
/* This is a horrible hack which is necessary because USB sinks seem to fix up
|
||||
* the reported delay by some millisecondsconds shortly after startup. This is
|
||||
* an artifact, the real latency does not change on the reported jump. If the
|
||||
* change is not caught or if the hack is triggered inadvertently, it will lead to
|
||||
* prolonged convergence time and decreased stability of the reported latency.
|
||||
* Since the fix up will occur within the first seconds, it is disabled later to
|
||||
* avoid false triggers. When run as batch device, the threshold for the hack must
|
||||
* be lower (1000) than for timer based scheduling (2000). */
|
||||
if (s->usb_hack && time_stamp - s->smoother_start_time < 5 * PA_USEC_PER_SEC) {
|
||||
if ((time_delta_system - filtered_time_delta_card / s->time_factor) > (double)s->hack_threshold) {
|
||||
/* Recalculate initial conditions */
|
||||
temp = time_stamp - time_delta_card - s->start_time;
|
||||
s->start_time += temp;
|
||||
s->smoother_start_time += temp;
|
||||
s->time_offset = -temp;
|
||||
|
||||
/* Reset time factor variance */
|
||||
s->time_factor_variance = 10000;
|
||||
|
||||
pa_log_debug("USB Hack, start time corrected by %0.2f usec", temp);
|
||||
s->usb_hack = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Parameter for lowpass filters with time constants of smoother_window_time
|
||||
* and smoother_window_time/8 */
|
||||
temp = (double)s->smoother_window_time / 6.2831853;
|
||||
filter_constant = iteration_time / (iteration_time + temp / 8.0);
|
||||
filter_constant_1 = iteration_time / (iteration_time + temp);
|
||||
|
||||
/* Temporarily save the current time factor */
|
||||
temp = s->time_factor;
|
||||
|
||||
/* Calculate geometric series */
|
||||
drift = (s->drift_filter_1 + 1.0) * (1.5 - filtered_time_delta_card / time_delta_system);
|
||||
|
||||
/* 2nd order lowpass */
|
||||
s->drift_filter = (1 - filter_constant) * s->drift_filter + filter_constant * drift;
|
||||
s->drift_filter_1 = (1 - filter_constant) * s->drift_filter_1 + filter_constant * s->drift_filter;
|
||||
|
||||
/* Calculate time conversion factor, filter again */
|
||||
s->time_factor = (1 - filter_constant_1) * s->time_factor + filter_constant_1 * (s->drift_filter_1 + 3) / (s->drift_filter_1 + 1) / 2;
|
||||
|
||||
/* Filtered variance of time factor derivative, used as measure for the convergence of the time factor */
|
||||
temp = (s->time_factor - temp) / iteration_time * 10000000000000;
|
||||
s->time_factor_variance = (1 - filter_constant_1) * s->time_factor_variance + filter_constant_1 * temp * temp;
|
||||
|
||||
/* Calculate new start time and corresponding sample count after window time */
|
||||
if (time_stamp > s->smoother_start_time + s->smoother_window_time) {
|
||||
s->start_pos += ((double)byte_count - s->start_pos) / (time_stamp - s->start_time) * iteration_time;
|
||||
s->start_time += (pa_usec_t)iteration_time;
|
||||
}
|
||||
|
||||
/* Save current system time */
|
||||
s->last_time = time_stamp;
|
||||
}
|
||||
|
||||
/* Calculate the current latency. For a source, the sign must be inverted */
|
||||
int64_t pa_smoother_2_get_delay(pa_smoother_2 *s, pa_usec_t time_stamp, uint64_t byte_count) {
|
||||
int64_t now, delay;
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
/* If we do not have a valid frame size and rate, just return 0 */
|
||||
if (!s->frame_size || !s->rate)
|
||||
return 0;
|
||||
|
||||
/* Smoother is paused or has been resumed but no new data has been received */
|
||||
if (s->paused || s->init) {
|
||||
delay = (int64_t)((double)byte_count * PA_USEC_PER_SEC / s->frame_size / s->rate);
|
||||
return delay - pa_smoother_2_get(s, time_stamp);
|
||||
}
|
||||
|
||||
/* Convert system time difference to soundcard time difference */
|
||||
now = (time_stamp - s->start_time - s->time_offset) * s->time_factor;
|
||||
|
||||
/* Don't use pa_bytes_to_usec(), u->start_pos needs not be on a sample boundary */
|
||||
return (int64_t)(((double)byte_count - s->start_pos) / s->frame_size / s->rate * PA_USEC_PER_SEC) - now;
|
||||
}
|
||||
|
||||
/* Convert system time to sound card time */
|
||||
pa_usec_t pa_smoother_2_get(pa_smoother_2 *s, pa_usec_t time_stamp) {
|
||||
pa_usec_t current_time;
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
/* If we do not have a valid frame size and rate, just return 0 */
|
||||
if (!s->frame_size || !s->rate)
|
||||
return 0;
|
||||
|
||||
/* Sound card time at start_time */
|
||||
current_time = (pa_usec_t)(s->start_pos / s->frame_size / s->rate * PA_USEC_PER_SEC);
|
||||
|
||||
/* If the smoother has not started, just return system time since resume */
|
||||
if (!s->start_time) {
|
||||
if (time_stamp >= s->resume_time && !s->paused)
|
||||
current_time = time_stamp - s->resume_time;
|
||||
else
|
||||
current_time = 0;
|
||||
|
||||
/* If we are paused return the sound card time at pause_time */
|
||||
} else if (s->paused)
|
||||
current_time += (s->pause_time - s->start_time - s->time_offset - s->fixup_time) * s->time_factor;
|
||||
|
||||
/* If we are initializing, add the time since resume to the card time at pause_time */
|
||||
else if (s->init) {
|
||||
current_time += (s->pause_time - s->start_time - s->time_offset - s->fixup_time) * s->time_factor;
|
||||
if (time_stamp > s->resume_time)
|
||||
current_time += (time_stamp - s->resume_time) * s->time_factor;
|
||||
|
||||
/* Smoother is running, calculate current sound card time */
|
||||
} else
|
||||
current_time += (time_stamp - s->start_time - s->time_offset) * s->time_factor;
|
||||
|
||||
return current_time;
|
||||
}
|
||||
|
||||
/* Convert a time interval from sound card time to system time */
|
||||
pa_usec_t pa_smoother_2_translate(pa_smoother_2 *s, pa_usec_t time_difference) {
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
/* If not started yet, return the time difference */
|
||||
if (!s->start_time)
|
||||
return time_difference;
|
||||
|
||||
return (pa_usec_t)(time_difference / s->time_factor);
|
||||
}
|
||||
|
||||
/* Enable USB hack */
|
||||
void pa_smoother_2_usb_hack_enable(pa_smoother_2 *s, bool enable, pa_usec_t offset) {
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
s->enable_usb_hack = enable;
|
||||
s->hack_threshold = offset;
|
||||
}
|
||||
|
||||
/* Reset the smoother */
|
||||
void pa_smoother_2_reset(pa_smoother_2 *s, pa_usec_t time_stamp) {
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
/* Reset variables for time estimation */
|
||||
s->drift_filter = 1.0;
|
||||
s->drift_filter_1 = 1.0;
|
||||
s->time_factor = 1.0;
|
||||
s->start_pos = 0;
|
||||
s->init = true;
|
||||
s->time_offset = 0;
|
||||
s->time_factor_variance = 10000.0;
|
||||
s->kalman_variance = 10000000.0;
|
||||
s->time_variance = 100000.0;
|
||||
s->start_time = 0;
|
||||
s->last_time = 0;
|
||||
s->smoother_start_time = 0;
|
||||
s->usb_hack = false;
|
||||
s->pause_time = time_stamp;
|
||||
s->fixup_time = 0;
|
||||
s->resume_time = time_stamp;
|
||||
s->paused = false;
|
||||
|
||||
/* Set smoother to paused if rate or frame size are invalid */
|
||||
if (!s->frame_size || !s->rate)
|
||||
s->paused = true;
|
||||
}
|
||||
|
||||
/* Pause the smoother */
|
||||
void pa_smoother_2_pause(pa_smoother_2 *s, pa_usec_t time_stamp) {
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
/* Smoother is already paused, nothing to do */
|
||||
if (s->paused)
|
||||
return;
|
||||
|
||||
/* If we are in init state, add the pause time to the fixup time */
|
||||
if (s->init)
|
||||
s->fixup_time += s->resume_time - s->pause_time;
|
||||
else
|
||||
s->fixup_time = 0;
|
||||
|
||||
s->smoother_start_time = 0;
|
||||
s->resume_time = time_stamp;
|
||||
s->pause_time = time_stamp;
|
||||
s->time_factor_variance = 10000.0;
|
||||
s->kalman_variance = 10000000.0;
|
||||
s->time_variance = 100000.0;
|
||||
s->init = true;
|
||||
s->paused = true;
|
||||
}
|
||||
|
||||
/* Resume the smoother */
|
||||
void pa_smoother_2_resume(pa_smoother_2 *s, pa_usec_t time_stamp) {
|
||||
|
||||
pa_assert(s);
|
||||
|
||||
if (!s->paused)
|
||||
return;
|
||||
|
||||
/* Keep smoother paused if rate or frame size is not set */
|
||||
if (!s->frame_size || !s->rate)
|
||||
return;
|
||||
|
||||
s->resume_time = time_stamp;
|
||||
s->paused = false;
|
||||
}
|
||||
53
src/pulsecore/time-smoother_2.h
Normal file
53
src/pulsecore/time-smoother_2.h
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
#ifndef foopulsetimesmoother2hfoo
|
||||
#define foopulsetimesmoother2hfoo
|
||||
|
||||
/***
|
||||
This file is part of PulseAudio.
|
||||
|
||||
PulseAudio is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as
|
||||
published by the Free Software Foundation; either version 2.1 of the
|
||||
License, or (at your option) any later version.
|
||||
|
||||
PulseAudio is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
|
||||
***/
|
||||
|
||||
#include <pulse/sample.h>
|
||||
|
||||
typedef struct pa_smoother_2 pa_smoother_2;
|
||||
|
||||
/* Create new smoother */
|
||||
pa_smoother_2* pa_smoother_2_new(pa_usec_t window, pa_usec_t time_stamp, uint32_t frame_size, uint32_t rate);
|
||||
/* Free the smoother */
|
||||
void pa_smoother_2_free(pa_smoother_2* s);
|
||||
/* Reset the smoother */
|
||||
void pa_smoother_2_reset(pa_smoother_2 *s, pa_usec_t time_stamp);
|
||||
/* Pause the smoother */
|
||||
void pa_smoother_2_pause(pa_smoother_2 *s, pa_usec_t time_stamp);
|
||||
/* Resume the smoother */
|
||||
void pa_smoother_2_resume(pa_smoother_2 *s, pa_usec_t time_stamp);
|
||||
|
||||
/* Add a new data point and re-calculate time conversion factor */
|
||||
void pa_smoother_2_put(pa_smoother_2 *s, pa_usec_t time_stamp, int64_t byte_count);
|
||||
|
||||
/* Calculate the current latency. For a source, the sign of the result must be inverted */
|
||||
int64_t pa_smoother_2_get_delay(pa_smoother_2 *s, pa_usec_t time_stamp, uint64_t byte_count);
|
||||
/* Convert system time since start to sound card time */
|
||||
pa_usec_t pa_smoother_2_get(pa_smoother_2 *s, pa_usec_t time_stamp);
|
||||
/* Convert a time interval from sound card time to system time */
|
||||
pa_usec_t pa_smoother_2_translate(pa_smoother_2 *s, pa_usec_t time_difference);
|
||||
|
||||
/* Enable USB hack, only used for alsa sinks */
|
||||
void pa_smoother_2_usb_hack_enable(pa_smoother_2 *s, bool enable, pa_usec_t offset);
|
||||
/* Set sample rate */
|
||||
void pa_smoother_2_set_rate(pa_smoother_2 *s, pa_usec_t time_stamp, uint32_t rate);
|
||||
/* Set rate and frame size */
|
||||
void pa_smoother_2_set_sample_spec(pa_smoother_2 *s, pa_usec_t time_stamp, pa_sample_spec *spec);
|
||||
|
||||
#endif
|
||||
Loading…
Add table
Add a link
Reference in a new issue