alsa: automatically decrease watermark after a time of stability

This commit is contained in:
Lennart Poettering 2009-08-24 03:26:56 +02:00
parent 80c6937303
commit 050a3a99e1
4 changed files with 241 additions and 84 deletions

View file

@ -62,9 +62,16 @@
/* #define DEBUG_TIMING */ /* #define DEBUG_TIMING */
#define DEFAULT_DEVICE "default" #define DEFAULT_DEVICE "default"
#define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */ #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
#define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */ #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
#define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
#define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
#define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
#define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
#define TSCHED_WATERMARK_INC_THRESHOLD_USEC (1*PA_USEC_PER_MSEC) /* 3ms -- If the buffer level ever below this theshold, increase the watermark */
#define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
#define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */ #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
#define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/ #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
@ -99,7 +106,12 @@ struct userdata {
hwbuf_unused, hwbuf_unused,
min_sleep, min_sleep,
min_wakeup, min_wakeup,
watermark_step; watermark_inc_step,
watermark_dec_step,
watermark_inc_threshold,
watermark_dec_threshold;
pa_usec_t watermark_dec_not_before;
unsigned nfragments; unsigned nfragments;
pa_memchunk memchunk; pa_memchunk memchunk;
@ -248,6 +260,7 @@ static void fix_min_sleep_wakeup(struct userdata *u) {
size_t max_use, max_use_2; size_t max_use, max_use_2;
pa_assert(u); pa_assert(u);
pa_assert(u->use_tsched);
max_use = u->hwbuf_size - u->hwbuf_unused; max_use = u->hwbuf_size - u->hwbuf_unused;
max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec); max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
@ -262,6 +275,7 @@ static void fix_min_sleep_wakeup(struct userdata *u) {
static void fix_tsched_watermark(struct userdata *u) { static void fix_tsched_watermark(struct userdata *u) {
size_t max_use; size_t max_use;
pa_assert(u); pa_assert(u);
pa_assert(u->use_tsched);
max_use = u->hwbuf_size - u->hwbuf_unused; max_use = u->hwbuf_size - u->hwbuf_unused;
@ -272,7 +286,7 @@ static void fix_tsched_watermark(struct userdata *u) {
u->tsched_watermark = u->min_wakeup; u->tsched_watermark = u->min_wakeup;
} }
static void adjust_after_underrun(struct userdata *u) { static void increase_watermark(struct userdata *u) {
size_t old_watermark; size_t old_watermark;
pa_usec_t old_min_latency, new_min_latency; pa_usec_t old_min_latency, new_min_latency;
@ -281,31 +295,64 @@ static void adjust_after_underrun(struct userdata *u) {
/* First, just try to increase the watermark */ /* First, just try to increase the watermark */
old_watermark = u->tsched_watermark; old_watermark = u->tsched_watermark;
u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step); u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
fix_tsched_watermark(u); fix_tsched_watermark(u);
if (old_watermark != u->tsched_watermark) { if (old_watermark != u->tsched_watermark) {
pa_log_notice("Increasing wakeup watermark to %0.2f ms", pa_log_info("Increasing wakeup watermark to %0.2f ms",
(double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC); (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
return; return;
} }
/* Hmm, we cannot increase the watermark any further, hence let's raise the latency */ /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
old_min_latency = u->sink->thread_info.min_latency; old_min_latency = u->sink->thread_info.min_latency;
new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC); new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency); new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
if (old_min_latency != new_min_latency) { if (old_min_latency != new_min_latency) {
pa_log_notice("Increasing minimal latency to %0.2f ms", pa_log_info("Increasing minimal latency to %0.2f ms",
(double) new_min_latency / PA_USEC_PER_MSEC); (double) new_min_latency / PA_USEC_PER_MSEC);
pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency); pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
return;
} }
/* When we reach this we're officialy fucked! */ /* When we reach this we're officialy fucked! */
} }
static void decrease_watermark(struct userdata *u) {
size_t old_watermark;
pa_usec_t now;
pa_assert(u);
pa_assert(u->use_tsched);
now = pa_rtclock_now();
if (u->watermark_dec_not_before <= 0)
goto restart;
if (u->watermark_dec_not_before > now)
return;
old_watermark = u->tsched_watermark;
if (u->tsched_watermark < u->watermark_dec_step)
u->tsched_watermark = u->tsched_watermark / 2;
else
u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
fix_tsched_watermark(u);
if (old_watermark != u->tsched_watermark)
pa_log_info("Decreasing wakeup watermark to %0.2f ms",
(double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
/* We don't change the latency range*/
restart:
u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
}
static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) { static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
pa_usec_t usec, wm; pa_usec_t usec, wm;
@ -313,6 +360,7 @@ static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*p
pa_assert(process_usec); pa_assert(process_usec);
pa_assert(u); pa_assert(u);
pa_assert(u->use_tsched);
usec = pa_sink_get_requested_latency_within_thread(u->sink); usec = pa_sink_get_requested_latency_within_thread(u->sink);
@ -360,7 +408,7 @@ static int try_recover(struct userdata *u, const char *call, int err) {
return 0; return 0;
} }
static size_t check_left_to_play(struct userdata *u, size_t n_bytes) { static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
size_t left_to_play; size_t left_to_play;
/* We use <= instead of < for this check here because an underrun /* We use <= instead of < for this check here because an underrun
@ -368,34 +416,55 @@ static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
* it is removed from the buffer. This is particularly important * it is removed from the buffer. This is particularly important
* when block transfer is used. */ * when block transfer is used. */
if (n_bytes <= u->hwbuf_size) { if (n_bytes <= u->hwbuf_size)
left_to_play = u->hwbuf_size - n_bytes; left_to_play = u->hwbuf_size - n_bytes;
else {
#ifdef DEBUG_TIMING /* We got a dropout. What a mess! */
pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
#endif
} else {
left_to_play = 0; left_to_play = 0;
#ifdef DEBUG_TIMING #ifdef DEBUG_TIMING
PA_DEBUG_TRAP; PA_DEBUG_TRAP;
#endif #endif
if (!u->first && !u->after_rewind) { if (!u->first && !u->after_rewind)
if (pa_log_ratelimit()) if (pa_log_ratelimit())
pa_log_info("Underrun!"); pa_log_info("Underrun!");
if (u->use_tsched)
adjust_after_underrun(u);
} }
#ifdef DEBUG_TIMING
pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
(double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
(double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
(double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
#endif
if (u->use_tsched) {
pa_bool_t reset_not_before = TRUE;
if (!u->first && !u->after_rewind) {
if (left_to_play < u->watermark_inc_threshold)
increase_watermark(u);
else if (left_to_play > u->watermark_dec_threshold) {
reset_not_before = FALSE;
/* We decrease the watermark only if have actually
* been woken up by a timeout. If something else woke
* us up it's too easy to fulfill the deadlines... */
if (on_timeout)
decrease_watermark(u);
}
}
if (reset_not_before)
u->watermark_dec_not_before = 0;
} }
return left_to_play; return left_to_play;
} }
static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) { static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
pa_bool_t work_done = TRUE; pa_bool_t work_done = TRUE;
pa_usec_t max_sleep_usec = 0, process_usec = 0; pa_usec_t max_sleep_usec = 0, process_usec = 0;
size_t left_to_play; size_t left_to_play;
@ -430,7 +499,8 @@ static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polle
pa_log_debug("avail: %lu", (unsigned long) n_bytes); pa_log_debug("avail: %lu", (unsigned long) n_bytes);
#endif #endif
left_to_play = check_left_to_play(u, n_bytes); left_to_play = check_left_to_play(u, n_bytes, on_timeout);
on_timeout = FALSE;
if (u->use_tsched) if (u->use_tsched)
@ -565,7 +635,7 @@ static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polle
return work_done ? 1 : 0; return work_done ? 1 : 0;
} }
static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) { static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
pa_bool_t work_done = FALSE; pa_bool_t work_done = FALSE;
pa_usec_t max_sleep_usec = 0, process_usec = 0; pa_usec_t max_sleep_usec = 0, process_usec = 0;
size_t left_to_play; size_t left_to_play;
@ -591,7 +661,8 @@ static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polle
} }
n_bytes = (size_t) n * u->frame_size; n_bytes = (size_t) n * u->frame_size;
left_to_play = check_left_to_play(u, n_bytes); left_to_play = check_left_to_play(u, n_bytes, on_timeout);
on_timeout = FALSE;
if (u->use_tsched) if (u->use_tsched)
@ -1278,15 +1349,16 @@ static void thread_func(void *userdata) {
if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) { if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
int work_done; int work_done;
pa_usec_t sleep_usec = 0; pa_usec_t sleep_usec = 0;
pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
if (process_rewind(u) < 0) if (process_rewind(u) < 0)
goto fail; goto fail;
if (u->use_mmap) if (u->use_mmap)
work_done = mmap_write(u, &sleep_usec, revents & POLLOUT); work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
else else
work_done = unix_write(u, &sleep_usec, revents & POLLOUT); work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
if (work_done < 0) if (work_done < 0)
goto fail; goto fail;
@ -1787,7 +1859,6 @@ pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_ca
u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size); u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
u->nfragments = nfrags; u->nfragments = nfrags;
u->hwbuf_size = u->fragment_size * nfrags; u->hwbuf_size = u->fragment_size * nfrags;
u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels); pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms", pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
@ -1798,7 +1869,13 @@ pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_ca
pa_sink_set_max_rewind(u->sink, u->hwbuf_size); pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
if (u->use_tsched) { if (u->use_tsched) {
u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec); u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
fix_min_sleep_wakeup(u); fix_min_sleep_wakeup(u);
fix_tsched_watermark(u); fix_tsched_watermark(u);
@ -1812,6 +1889,7 @@ pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_ca
} else } else
pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss)); pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
reserve_update(u); reserve_update(u);
if (update_sw_params(u) < 0) if (update_sw_params(u) < 0)

View file

@ -59,9 +59,17 @@
/* #define DEBUG_TIMING */ /* #define DEBUG_TIMING */
#define DEFAULT_DEVICE "default" #define DEFAULT_DEVICE "default"
#define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */ #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
#define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */ #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
#define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
#define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
#define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
#define TSCHED_WATERMARK_INC_THRESHOLD_USEC (1*PA_USEC_PER_MSEC) /* 3ms */
#define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
#define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */ #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
#define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */ #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
#define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */ #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
@ -96,7 +104,12 @@ struct userdata {
hwbuf_unused, hwbuf_unused,
min_sleep, min_sleep,
min_wakeup, min_wakeup,
watermark_step; watermark_inc_step,
watermark_dec_step,
watermark_inc_threshold,
watermark_dec_threshold;
pa_usec_t watermark_dec_not_before;
unsigned nfragments; unsigned nfragments;
@ -241,6 +254,7 @@ static int reserve_monitor_init(struct userdata *u, const char *dname) {
static void fix_min_sleep_wakeup(struct userdata *u) { static void fix_min_sleep_wakeup(struct userdata *u) {
size_t max_use, max_use_2; size_t max_use, max_use_2;
pa_assert(u); pa_assert(u);
pa_assert(u->use_tsched);
max_use = u->hwbuf_size - u->hwbuf_unused; max_use = u->hwbuf_size - u->hwbuf_unused;
max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec); max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
@ -255,6 +269,7 @@ static void fix_min_sleep_wakeup(struct userdata *u) {
static void fix_tsched_watermark(struct userdata *u) { static void fix_tsched_watermark(struct userdata *u) {
size_t max_use; size_t max_use;
pa_assert(u); pa_assert(u);
pa_assert(u->use_tsched);
max_use = u->hwbuf_size - u->hwbuf_unused; max_use = u->hwbuf_size - u->hwbuf_unused;
@ -265,7 +280,7 @@ static void fix_tsched_watermark(struct userdata *u) {
u->tsched_watermark = u->min_wakeup; u->tsched_watermark = u->min_wakeup;
} }
static void adjust_after_overrun(struct userdata *u) { static void increase_watermark(struct userdata *u) {
size_t old_watermark; size_t old_watermark;
pa_usec_t old_min_latency, new_min_latency; pa_usec_t old_min_latency, new_min_latency;
@ -274,36 +289,72 @@ static void adjust_after_overrun(struct userdata *u) {
/* First, just try to increase the watermark */ /* First, just try to increase the watermark */
old_watermark = u->tsched_watermark; old_watermark = u->tsched_watermark;
u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step); u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
fix_tsched_watermark(u); fix_tsched_watermark(u);
if (old_watermark != u->tsched_watermark) { if (old_watermark != u->tsched_watermark) {
pa_log_notice("Increasing wakeup watermark to %0.2f ms", pa_log_info("Increasing wakeup watermark to %0.2f ms",
(double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC); (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
return; return;
} }
/* Hmm, we cannot increase the watermark any further, hence let's raise the latency */ /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
old_min_latency = u->source->thread_info.min_latency; old_min_latency = u->source->thread_info.min_latency;
new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC); new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency); new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
if (old_min_latency != new_min_latency) { if (old_min_latency != new_min_latency) {
pa_log_notice("Increasing minimal latency to %0.2f ms", pa_log_info("Increasing minimal latency to %0.2f ms",
(double) new_min_latency / PA_USEC_PER_MSEC); (double) new_min_latency / PA_USEC_PER_MSEC);
pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency); pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
return;
} }
/* When we reach this we're officialy fucked! */ /* When we reach this we're officialy fucked! */
} }
static void decrease_watermark(struct userdata *u) {
size_t old_watermark;
pa_usec_t now;
pa_assert(u);
pa_assert(u->use_tsched);
now = pa_rtclock_now();
if (u->watermark_dec_not_before <= 0)
goto restart;
if (u->watermark_dec_not_before > now)
return;
old_watermark = u->tsched_watermark;
if (u->tsched_watermark < u->watermark_dec_step)
u->tsched_watermark = u->tsched_watermark / 2;
else
u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
fix_tsched_watermark(u);
if (old_watermark != u->tsched_watermark)
pa_log_info("Decreasing wakeup watermark to %0.2f ms",
(double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
/* We don't change the latency range*/
restart:
u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
}
static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) { static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
pa_usec_t wm, usec; pa_usec_t wm, usec;
pa_assert(sleep_usec);
pa_assert(process_usec);
pa_assert(u); pa_assert(u);
pa_assert(u->use_tsched);
usec = pa_source_get_requested_latency_within_thread(u->source); usec = pa_source_get_requested_latency_within_thread(u->source);
@ -352,7 +403,7 @@ static int try_recover(struct userdata *u, const char *call, int err) {
return 0; return 0;
} }
static size_t check_left_to_record(struct userdata *u, size_t n_bytes) { static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
size_t left_to_record; size_t left_to_record;
size_t rec_space = u->hwbuf_size - u->hwbuf_unused; size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
@ -361,14 +412,11 @@ static size_t check_left_to_record(struct userdata *u, size_t n_bytes) {
* it is removed from the buffer. This is particularly important * it is removed from the buffer. This is particularly important
* when block transfer is used. */ * when block transfer is used. */
if (n_bytes <= rec_space) { if (n_bytes <= rec_space)
left_to_record = rec_space - n_bytes; left_to_record = rec_space - n_bytes;
else {
#ifdef DEBUG_TIMING /* We got a dropout. What a mess! */
pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
#endif
} else {
left_to_record = 0; left_to_record = 0;
#ifdef DEBUG_TIMING #ifdef DEBUG_TIMING
@ -377,15 +425,36 @@ static size_t check_left_to_record(struct userdata *u, size_t n_bytes) {
if (pa_log_ratelimit()) if (pa_log_ratelimit())
pa_log_info("Overrun!"); pa_log_info("Overrun!");
}
if (u->use_tsched) #ifdef DEBUG_TIMING
adjust_after_overrun(u); pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
#endif
if (u->use_tsched) {
pa_bool_t reset_not_before = TRUE;
if (left_to_record < u->watermark_inc_threshold)
increase_watermark(u);
else if (left_to_record > u->watermark_dec_threshold) {
reset_not_before = FALSE;
/* We decrease the watermark only if have actually been
* woken up by a timeout. If something else woke us up
* it's too easy to fulfill the deadlines... */
if (on_timeout)
decrease_watermark(u);
}
if (reset_not_before)
u->watermark_dec_not_before = 0;
} }
return left_to_record; return left_to_record;
} }
static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) { static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
pa_bool_t work_done = FALSE; pa_bool_t work_done = FALSE;
pa_usec_t max_sleep_usec = 0, process_usec = 0; pa_usec_t max_sleep_usec = 0, process_usec = 0;
size_t left_to_record; size_t left_to_record;
@ -417,7 +486,8 @@ static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled
pa_log_debug("avail: %lu", (unsigned long) n_bytes); pa_log_debug("avail: %lu", (unsigned long) n_bytes);
#endif #endif
left_to_record = check_left_to_record(u, n_bytes); left_to_record = check_left_to_record(u, n_bytes, on_timeout);
on_timeout = FALSE;
if (u->use_tsched) if (u->use_tsched)
if (!polled && if (!polled &&
@ -543,7 +613,7 @@ static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled
return work_done ? 1 : 0; return work_done ? 1 : 0;
} }
static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) { static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
int work_done = FALSE; int work_done = FALSE;
pa_usec_t max_sleep_usec = 0, process_usec = 0; pa_usec_t max_sleep_usec = 0, process_usec = 0;
size_t left_to_record; size_t left_to_record;
@ -570,7 +640,8 @@ static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled
} }
n_bytes = (size_t) n * u->frame_size; n_bytes = (size_t) n * u->frame_size;
left_to_record = check_left_to_record(u, n_bytes); left_to_record = check_left_to_record(u, n_bytes, on_timeout);
on_timeout = FALSE;
if (u->use_tsched) if (u->use_tsched)
if (!polled && if (!polled &&
@ -1158,11 +1229,12 @@ static void thread_func(void *userdata) {
if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) { if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
int work_done; int work_done;
pa_usec_t sleep_usec = 0; pa_usec_t sleep_usec = 0;
pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
if (u->use_mmap) if (u->use_mmap)
work_done = mmap_read(u, &sleep_usec, revents & POLLIN); work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
else else
work_done = unix_read(u, &sleep_usec, revents & POLLIN); work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
if (work_done < 0) if (work_done < 0)
goto fail; goto fail;
@ -1632,7 +1704,6 @@ pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, p
u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size); u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
u->nfragments = nfrags; u->nfragments = nfrags;
u->hwbuf_size = u->fragment_size * nfrags; u->hwbuf_size = u->fragment_size * nfrags;
u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels); pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms", pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
@ -1640,7 +1711,13 @@ pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, p
(double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC); (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
if (u->use_tsched) { if (u->use_tsched) {
u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->source->sample_spec); u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
fix_min_sleep_wakeup(u); fix_min_sleep_wakeup(u);
fix_tsched_watermark(u); fix_tsched_watermark(u);

View file

@ -63,6 +63,7 @@ struct pa_rtpoll {
pa_bool_t running:1; pa_bool_t running:1;
pa_bool_t rebuild_needed:1; pa_bool_t rebuild_needed:1;
pa_bool_t quit:1; pa_bool_t quit:1;
pa_bool_t timer_elapsed:1;
#ifdef DEBUG_TIMING #ifdef DEBUG_TIMING
pa_usec_t timestamp; pa_usec_t timestamp;
@ -94,26 +95,14 @@ PA_STATIC_FLIST_DECLARE(items, 0, pa_xfree);
pa_rtpoll *pa_rtpoll_new(void) { pa_rtpoll *pa_rtpoll_new(void) {
pa_rtpoll *p; pa_rtpoll *p;
p = pa_xnew(pa_rtpoll, 1); p = pa_xnew0(pa_rtpoll, 1);
p->n_pollfd_alloc = 32; p->n_pollfd_alloc = 32;
p->pollfd = pa_xnew(struct pollfd, p->n_pollfd_alloc); p->pollfd = pa_xnew(struct pollfd, p->n_pollfd_alloc);
p->pollfd2 = pa_xnew(struct pollfd, p->n_pollfd_alloc); p->pollfd2 = pa_xnew(struct pollfd, p->n_pollfd_alloc);
p->n_pollfd_used = 0;
pa_zero(p->next_elapse);
p->timer_enabled = FALSE;
p->running = FALSE;
p->scan_for_dead = FALSE;
p->rebuild_needed = FALSE;
p->quit = FALSE;
PA_LLIST_HEAD_INIT(pa_rtpoll_item, p->items);
#ifdef DEBUG_TIMING #ifdef DEBUG_TIMING
p->timestamp = pa_rtclock_now(); p->timestamp = pa_rtclock_now();
p->slept = p->awake = 0;
#endif #endif
return p; return p;
@ -229,6 +218,7 @@ int pa_rtpoll_run(pa_rtpoll *p, pa_bool_t wait_op) {
pa_assert(!p->running); pa_assert(!p->running);
p->running = TRUE; p->running = TRUE;
p->timer_elapsed = FALSE;
/* First, let's do some work */ /* First, let's do some work */
for (i = p->items; i && i->priority < PA_RTPOLL_NEVER; i = i->next) { for (i = p->items; i && i->priority < PA_RTPOLL_NEVER; i = i->next) {
@ -286,7 +276,7 @@ int pa_rtpoll_run(pa_rtpoll *p, pa_bool_t wait_op) {
if (p->rebuild_needed) if (p->rebuild_needed)
rtpoll_rebuild(p); rtpoll_rebuild(p);
memset(&timeout, 0, sizeof(timeout)); pa_zero(timeout);
/* Calculate timeout */ /* Calculate timeout */
if (wait_op && !p->quit && p->timer_enabled) { if (wait_op && !p->quit && p->timer_enabled) {
@ -317,6 +307,8 @@ int pa_rtpoll_run(pa_rtpoll *p, pa_bool_t wait_op) {
r = poll(p->pollfd, p->n_pollfd_used, (!wait_op || p->quit || p->timer_enabled) ? (int) ((timeout.tv_sec*1000) + (timeout.tv_usec / 1000)) : -1); r = poll(p->pollfd, p->n_pollfd_used, (!wait_op || p->quit || p->timer_enabled) ? (int) ((timeout.tv_sec*1000) + (timeout.tv_usec / 1000)) : -1);
#endif #endif
p->timer_elapsed = r == 0;
#ifdef DEBUG_TIMING #ifdef DEBUG_TIMING
{ {
pa_usec_t now = pa_rtclock_now(); pa_usec_t now = pa_rtclock_now();
@ -628,3 +620,9 @@ void pa_rtpoll_quit(pa_rtpoll *p) {
p->quit = TRUE; p->quit = TRUE;
} }
pa_bool_t pa_rtpoll_timer_elapsed(pa_rtpoll *p) {
pa_assert(p);
return p->timer_elapsed;
}

View file

@ -73,6 +73,10 @@ void pa_rtpoll_set_timer_absolute(pa_rtpoll *p, pa_usec_t usec);
void pa_rtpoll_set_timer_relative(pa_rtpoll *p, pa_usec_t usec); void pa_rtpoll_set_timer_relative(pa_rtpoll *p, pa_usec_t usec);
void pa_rtpoll_set_timer_disabled(pa_rtpoll *p); void pa_rtpoll_set_timer_disabled(pa_rtpoll *p);
/* Return TRUE when the elapsed timer was the reason for
* the last pa_rtpoll_run() invocation to finish */
pa_bool_t pa_rtpoll_timer_elapsed(pa_rtpoll *p);
/* A new fd wakeup item for pa_rtpoll */ /* A new fd wakeup item for pa_rtpoll */
pa_rtpoll_item *pa_rtpoll_item_new(pa_rtpoll *p, pa_rtpoll_priority_t prio, unsigned n_fds); pa_rtpoll_item *pa_rtpoll_item_new(pa_rtpoll *p, pa_rtpoll_priority_t prio, unsigned n_fds);
void pa_rtpoll_item_free(pa_rtpoll_item *i); void pa_rtpoll_item_free(pa_rtpoll_item *i);