alsa: automatically decrease watermark after a time of stability

This commit is contained in:
Lennart Poettering 2009-08-24 03:26:56 +02:00
parent 80c6937303
commit 050a3a99e1
4 changed files with 241 additions and 84 deletions

View file

@ -62,14 +62,21 @@
/* #define DEBUG_TIMING */
#define DEFAULT_DEVICE "default"
#define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
#define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
#define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
#define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
#define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
#define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
#define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
#define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
#define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
#define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
#define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
#define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
#define TSCHED_WATERMARK_INC_THRESHOLD_USEC (1*PA_USEC_PER_MSEC) /* 3ms -- If the buffer level ever below this theshold, increase the watermark */
#define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
#define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
#define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
#define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
#define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
#define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
@ -99,7 +106,12 @@ struct userdata {
hwbuf_unused,
min_sleep,
min_wakeup,
watermark_step;
watermark_inc_step,
watermark_dec_step,
watermark_inc_threshold,
watermark_dec_threshold;
pa_usec_t watermark_dec_not_before;
unsigned nfragments;
pa_memchunk memchunk;
@ -248,6 +260,7 @@ static void fix_min_sleep_wakeup(struct userdata *u) {
size_t max_use, max_use_2;
pa_assert(u);
pa_assert(u->use_tsched);
max_use = u->hwbuf_size - u->hwbuf_unused;
max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
@ -262,6 +275,7 @@ static void fix_min_sleep_wakeup(struct userdata *u) {
static void fix_tsched_watermark(struct userdata *u) {
size_t max_use;
pa_assert(u);
pa_assert(u->use_tsched);
max_use = u->hwbuf_size - u->hwbuf_unused;
@ -272,7 +286,7 @@ static void fix_tsched_watermark(struct userdata *u) {
u->tsched_watermark = u->min_wakeup;
}
static void adjust_after_underrun(struct userdata *u) {
static void increase_watermark(struct userdata *u) {
size_t old_watermark;
pa_usec_t old_min_latency, new_min_latency;
@ -281,31 +295,64 @@ static void adjust_after_underrun(struct userdata *u) {
/* First, just try to increase the watermark */
old_watermark = u->tsched_watermark;
u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
fix_tsched_watermark(u);
if (old_watermark != u->tsched_watermark) {
pa_log_notice("Increasing wakeup watermark to %0.2f ms",
(double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
pa_log_info("Increasing wakeup watermark to %0.2f ms",
(double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
return;
}
/* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
old_min_latency = u->sink->thread_info.min_latency;
new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
if (old_min_latency != new_min_latency) {
pa_log_notice("Increasing minimal latency to %0.2f ms",
(double) new_min_latency / PA_USEC_PER_MSEC);
pa_log_info("Increasing minimal latency to %0.2f ms",
(double) new_min_latency / PA_USEC_PER_MSEC);
pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
return;
}
/* When we reach this we're officialy fucked! */
}
static void decrease_watermark(struct userdata *u) {
size_t old_watermark;
pa_usec_t now;
pa_assert(u);
pa_assert(u->use_tsched);
now = pa_rtclock_now();
if (u->watermark_dec_not_before <= 0)
goto restart;
if (u->watermark_dec_not_before > now)
return;
old_watermark = u->tsched_watermark;
if (u->tsched_watermark < u->watermark_dec_step)
u->tsched_watermark = u->tsched_watermark / 2;
else
u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
fix_tsched_watermark(u);
if (old_watermark != u->tsched_watermark)
pa_log_info("Decreasing wakeup watermark to %0.2f ms",
(double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
/* We don't change the latency range*/
restart:
u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
}
static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
pa_usec_t usec, wm;
@ -313,6 +360,7 @@ static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*p
pa_assert(process_usec);
pa_assert(u);
pa_assert(u->use_tsched);
usec = pa_sink_get_requested_latency_within_thread(u->sink);
@ -360,7 +408,7 @@ static int try_recover(struct userdata *u, const char *call, int err) {
return 0;
}
static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
size_t left_to_play;
/* We use <= instead of < for this check here because an underrun
@ -368,34 +416,55 @@ static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
* it is removed from the buffer. This is particularly important
* when block transfer is used. */
if (n_bytes <= u->hwbuf_size) {
if (n_bytes <= u->hwbuf_size)
left_to_play = u->hwbuf_size - n_bytes;
else {
#ifdef DEBUG_TIMING
pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
#endif
} else {
/* We got a dropout. What a mess! */
left_to_play = 0;
#ifdef DEBUG_TIMING
PA_DEBUG_TRAP;
#endif
if (!u->first && !u->after_rewind) {
if (!u->first && !u->after_rewind)
if (pa_log_ratelimit())
pa_log_info("Underrun!");
}
if (u->use_tsched)
adjust_after_underrun(u);
#ifdef DEBUG_TIMING
pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
(double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
(double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
(double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
#endif
if (u->use_tsched) {
pa_bool_t reset_not_before = TRUE;
if (!u->first && !u->after_rewind) {
if (left_to_play < u->watermark_inc_threshold)
increase_watermark(u);
else if (left_to_play > u->watermark_dec_threshold) {
reset_not_before = FALSE;
/* We decrease the watermark only if have actually
* been woken up by a timeout. If something else woke
* us up it's too easy to fulfill the deadlines... */
if (on_timeout)
decrease_watermark(u);
}
}
if (reset_not_before)
u->watermark_dec_not_before = 0;
}
return left_to_play;
}
static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
pa_bool_t work_done = TRUE;
pa_usec_t max_sleep_usec = 0, process_usec = 0;
size_t left_to_play;
@ -430,7 +499,8 @@ static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polle
pa_log_debug("avail: %lu", (unsigned long) n_bytes);
#endif
left_to_play = check_left_to_play(u, n_bytes);
left_to_play = check_left_to_play(u, n_bytes, on_timeout);
on_timeout = FALSE;
if (u->use_tsched)
@ -565,7 +635,7 @@ static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polle
return work_done ? 1 : 0;
}
static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
pa_bool_t work_done = FALSE;
pa_usec_t max_sleep_usec = 0, process_usec = 0;
size_t left_to_play;
@ -591,7 +661,8 @@ static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polle
}
n_bytes = (size_t) n * u->frame_size;
left_to_play = check_left_to_play(u, n_bytes);
left_to_play = check_left_to_play(u, n_bytes, on_timeout);
on_timeout = FALSE;
if (u->use_tsched)
@ -1278,15 +1349,16 @@ static void thread_func(void *userdata) {
if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
int work_done;
pa_usec_t sleep_usec = 0;
pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
if (process_rewind(u) < 0)
goto fail;
if (u->use_mmap)
work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
else
work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
if (work_done < 0)
goto fail;
@ -1787,7 +1859,6 @@ pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_ca
u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
u->nfragments = nfrags;
u->hwbuf_size = u->fragment_size * nfrags;
u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
@ -1798,7 +1869,13 @@ pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_ca
pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
if (u->use_tsched) {
u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
fix_min_sleep_wakeup(u);
fix_tsched_watermark(u);
@ -1812,6 +1889,7 @@ pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_ca
} else
pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
reserve_update(u);
if (update_sw_params(u) < 0)