bluez5: iso-io: more accurate resync after overrun

Take active rate correction properly into account when dropping data on
overrun resync.

Drop data only for the currently processed stream, after data has been
consumed from it. Make sure the rate correction factor is updated after
this for the next cycle of the stream.

Also fix buffer fill level calculation: the fill level interpolation
should use node rate corr, not clock rate diff, since the calculations
are done in system clock domain. Fix same issue in fractional delay
calculation, and take no resampler prefill into account.

Later, we maybe need some more resampler APIs to avoid such details
leaking in.

Previously, stream could have its old rate correction locked in, and its
fill level would then end up off the target on the next cycle.
This commit is contained in:
Pauli Virtanen 2026-01-01 16:17:40 +02:00 committed by Wim Taymans
parent b535534611
commit 11389d101a
3 changed files with 86 additions and 43 deletions

View file

@ -84,10 +84,10 @@ struct spa_bt_decode_buffer
int64_t duration_ns;
int64_t next_nsec;
double rate_diff;
int32_t delay;
int32_t delay_frac;
uint32_t prev_samples;
double prev_match_rate;
double level;
@ -97,6 +97,7 @@ struct spa_bt_decode_buffer
} rx;
uint8_t buffering:1;
uint8_t first_cycle:1;
};
static inline int spa_bt_decode_buffer_init(struct spa_bt_decode_buffer *this, struct spa_log *log,
@ -110,8 +111,10 @@ static inline int spa_bt_decode_buffer_init(struct spa_bt_decode_buffer *this, s
this->buffer_size = this->frame_size * quantum_limit * 2;
this->buffer_size += this->buffer_reserve;
this->corr = 1.0;
this->prev_match_rate = 1.0;
this->target = 0;
this->buffering = true;
this->first_cycle = true;
this->max_extra = INT32_MAX;
this->avg_period = BUFFERING_SHORT_MSEC * SPA_NSEC_PER_MSEC;
this->rate_diff_max = BUFFERING_RATE_DIFF_MAX;
@ -213,11 +216,25 @@ static inline void spa_bt_decode_buffer_write_packet(struct spa_bt_decode_buffer
uint32_t avail = spa_bt_decode_buffer_get_size(this) / this->frame_size;
int64_t dt = this->next_nsec - this->rx.nsec;
this->level = dt * this->rate_diff * this->rate / SPA_NSEC_PER_SEC
this->level = dt * this->corr * this->rate / SPA_NSEC_PER_SEC
+ avail + this->delay + this->delay_frac/1e9 - this->rx.position;
spa_log_trace_fp(this->log,
"%p level:%f avail:%u dt:%f delay:%f rx-pos:%"PRIu64" rdiff:%f",
this, this->level, avail,
dt * this->corr * this->rate / SPA_NSEC_PER_SEC,
this->delay + this->delay_frac/1e9,
this->rx.position,
this->corr);
} else {
this->level = spa_bt_decode_buffer_get_size(this) / this->frame_size
+ this->delay + this->delay_frac/1e9;
uint32_t avail = spa_bt_decode_buffer_get_size(this) / this->frame_size;
this->level = avail + this->delay + this->delay_frac/1e9;
spa_log_trace_fp(this->log,
"%p level:%f avail:%u delay:%f",
this, this->level, avail,
this->delay + this->delay_frac/1e9);
}
}
@ -257,7 +274,6 @@ static inline void spa_bt_decode_buffer_recover(struct spa_bt_decode_buffer *thi
{
int32_t target = spa_bt_decode_buffer_get_target_latency(this);
this->rx.nsec = 0;
this->corr = 1.0;
spa_bt_rate_control_init(&this->ctl, target * SPA_NSEC_PER_SEC / this->rate);
@ -352,13 +368,12 @@ static inline void spa_bt_decode_buffer_process(struct spa_bt_decode_buffer *thi
}
}
static inline void spa_bt_decode_buffer_set_next(struct spa_bt_decode_buffer *this, double rate_diff, int64_t next_nsec,
int32_t delay, int32_t delay_frac, bool delay_at_start)
static inline void spa_bt_decode_buffer_set_next(struct spa_bt_decode_buffer *this, int64_t next_nsec,
int32_t delay, int32_t delay_frac, double match_rate, bool delay_at_start)
{
/* Called after spa_bt_decode_buffer_process() on the same cycle to update
* next_nsec & rate_diff values.
* next_nsec values.
*/
this->rate_diff = rate_diff;
this->next_nsec = next_nsec;
this->delay = delay;
this->delay_frac = delay_frac;
@ -366,8 +381,15 @@ static inline void spa_bt_decode_buffer_set_next(struct spa_bt_decode_buffer *th
/* If fractional delay is given at the start of current cycle, make it relative to
* next_nsec used for the level calculations.
*/
if (delay_at_start)
this->delay_frac += (int32_t)(1e9 * this->prev_samples - this->duration_ns * this->rate * this->rate_diff);
if (delay_at_start) {
/* Adjust for no resampler prefill */
int32_t off = this->first_cycle ? -delay : 0;
this->delay_frac += (int32_t)(1e9 * (this->prev_samples + off) - this->duration_ns * this->rate / this->prev_match_rate);
}
this->prev_match_rate = match_rate > 0 ? match_rate : 1.0;
this->first_cycle = false;
/* Recalculate this->level */
spa_bt_decode_buffer_write_packet(this, 0, 0);

View file

@ -21,12 +21,12 @@
#include "media-codecs.h"
#include "defs.h"
#include "decode-buffer.h"
SPA_LOG_TOPIC_DEFINE_STATIC(log_topic, "spa.bluez5.iso");
#undef SPA_LOG_TOPIC_DEFAULT
#define SPA_LOG_TOPIC_DEFAULT &log_topic
#include "decode-buffer.h"
#include "bt-latency.h"
#define IDLE_TIME (500 * SPA_NSEC_PER_MSEC)
@ -684,6 +684,7 @@ void spa_bt_iso_io_set_source_buffer(struct spa_bt_iso_io *this, struct spa_bt_d
buffer->no_overrun_drop = true;
buffer->avg_period = ISO_BUFFERING_AVG_PERIOD;
buffer->rate_diff_max = ISO_BUFFERING_RATE_DIFF_MAX;
stream->this.need_resync = true;
}
}
@ -827,8 +828,38 @@ void spa_bt_iso_io_check_rx_sync(struct spa_bt_iso_io *this, uint64_t position)
if (!stream->source_buf)
return;
/* Act on pending resync */
target = stream->source_buf->target;
if (stream->source_buf && stream->this.need_resync) {
int32_t level;
stream->this.need_resync = false;
/* Resync level */;
spa_bt_decode_buffer_recover(stream->source_buf);
level = (int32_t)round(stream->source_buf->level +
(double)stream->source_buf->duration_ns * stream->source_buf->rate / SPA_NSEC_PER_SEC);
if (level > target) {
uint32_t drop = (level - target) * stream->source_buf->frame_size;
uint32_t avail = spa_bt_decode_buffer_get_size(stream->source_buf);
drop = SPA_MIN(drop, avail);
spa_log_debug(group->log, "%p: ISO overrun group:%u fd:%d level:%f target:%d drop:%u",
group, group->id, stream->fd,
stream->source_buf->level + stream->source_buf->prev_samples,
target,
drop/stream->source_buf->frame_size);
spa_bt_decode_buffer_read(stream->source_buf, drop);
}
}
/* Check sync after all input streams have completed process() on same cycle */
stream->position = position;
spa_list_for_each(s, &group->streams, link) {
if (!s->source_buf)
continue;
@ -836,8 +867,6 @@ void spa_bt_iso_io_check_rx_sync(struct spa_bt_iso_io *this, uint64_t position)
return;
}
target = stream->source_buf->target;
/* Rate match ISO clock */
corr = spa_bt_rate_control_update(&sync->dll, sync->avg_err, 0,
group->duration_rx, CLOCK_SYNC_AVG_PERIOD, CLOCK_SYNC_RATE_DIFF_MAX);
@ -856,7 +885,7 @@ void spa_bt_iso_io_check_rx_sync(struct spa_bt_iso_io *this, uint64_t position)
sync->avg_err = 0;
sync->avg_num = 0;
/* Handle overrun (e.g. resyncs streams after initial buffering) */
/* Detect overrun */
spa_list_for_each(s, &group->streams, link) {
if (s->source_buf) {
double level = s->source_buf->level;
@ -867,30 +896,10 @@ void spa_bt_iso_io_check_rx_sync(struct spa_bt_iso_io *this, uint64_t position)
}
}
if (!overrun)
return;
spa_list_for_each(s, &group->streams, link) {
if (!s->source_buf)
continue;
int32_t level = (int32_t)s->source_buf->level;
if (level > target) {
uint32_t drop = (level - target) * s->source_buf->frame_size;
uint32_t avail = spa_bt_decode_buffer_get_size(s->source_buf);
drop = SPA_MIN(drop, avail);
spa_log_debug(group->log, "%p: ISO overrun group:%u fd:%d level:%f target:%d drop:%u",
group, group->id, s->fd,
s->source_buf->level,
target,
drop/s->source_buf->frame_size);
spa_bt_decode_buffer_read(s->source_buf, drop);
if (overrun) {
spa_list_for_each(s, &group->streams, link) {
if (s->source_buf)
s->this.need_resync = true;
}
spa_bt_decode_buffer_recover(s->source_buf);
}
}

View file

@ -888,6 +888,15 @@ static void media_on_timeout(struct spa_source *source)
this->clock->next_nsec = this->next_time;
}
/* Set next position also here in case impl_node_process() fails to be scheduled */
if (this->transport_started)
spa_bt_decode_buffer_set_next(&port->buffer,
this->position ? this->position->clock.next_nsec : 0,
this->resampling ? this->port.rate_match->delay : 0,
this->resampling ? this->port.rate_match->delay_frac : 0,
this->resampling && this->matching ? port->rate_match->rate : 1.0,
true);
spa_node_call_ready(&this->callbacks, SPA_STATUS_HAVE_DATA);
set_timeout(this, this->next_time);
@ -1831,8 +1840,6 @@ static void process_buffering(struct impl *this)
spa_bt_decode_buffer_recover(&port->buffer);
}
setup_matching(this);
/* copy data to buffers */
if (!spa_list_is_empty(&port->free)) {
struct buffer *buffer;
@ -1883,8 +1890,13 @@ static void process_buffering(struct impl *this)
if (this->transport->iso_io && this->position && !this->initial_buffering)
spa_bt_iso_io_check_rx_sync(this->transport->iso_io, this->position->clock.position);
if (!port->buffer.buffering)
setup_matching(this);
if (!port->buffer.buffering) {
if (this->initial_buffering && this->transport->iso_io)
this->transport->iso_io->need_resync = true;
this->initial_buffering = false;
}
if (this->update_delay_event) {
int32_t target = spa_bt_decode_buffer_get_target_latency(&port->buffer);
@ -1987,10 +1999,10 @@ static int impl_node_process(void *object)
/* Update decode buffer vs. next wakeup timing */
spa_bt_decode_buffer_set_next(&port->buffer,
this->position ? this->position->clock.rate_diff : 1.0,
this->position ? this->position->clock.next_nsec : 0,
this->resampling ? this->port.rate_match->delay : 0,
this->resampling ? this->port.rate_match->delay_frac : 0,
this->resampling && this->matching ? port->rate_match->rate : 1.0,
this->following);
return ret;