mirror of
https://gitlab.freedesktop.org/pulseaudio/pulseaudio.git
synced 2026-02-27 01:40:34 -05:00
echo-cancel: Plug in WebRTC drift compensation
This adds the ability for echo cancellers to provide their own drift compensation, and hooks in the appropriate bits to implement this in the WebRTC canceller. We do this by introducing an alternative model for the canceller. So far, the core engine just provided a run() method which was given blocksize-sized chunks of playback and record samples. The new model has the engine provide play() and record() methods that can (in theory) be called by the playback and capture threads. The latter would actually do the processing required. In addition to this a set_drift() method may be provided by the implementation. PA will provide periodic samples of the drift to the engine. These values need to be aggregated and processed over some time, since the point values vary quite a bit (but generally fit a linear regression reasonably accurately). At some point of time, we might move the actual drift calculation into PA and change the semantics of this function. NOTE: This needs further testing before being deemed ready for wider use.
This commit is contained in:
parent
8c0cca7905
commit
23ce9a4f79
3 changed files with 250 additions and 68 deletions
|
|
@ -47,6 +47,7 @@ PA_C_DECL_END
|
|||
#define DEFAULT_MOBILE FALSE
|
||||
#define DEFAULT_ROUTING_MODE "speakerphone"
|
||||
#define DEFAULT_COMFORT_NOISE TRUE
|
||||
#define DEFAULT_DRIFT_COMPENSATION FALSE
|
||||
|
||||
static const char* const valid_modargs[] = {
|
||||
"high_pass_filter",
|
||||
|
|
@ -56,6 +57,7 @@ static const char* const valid_modargs[] = {
|
|||
"mobile",
|
||||
"routing_mode",
|
||||
"comfort_noise",
|
||||
"drift_compensation",
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
@ -125,7 +127,18 @@ pa_bool_t pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
ec->params.drift_compensation = DEFAULT_DRIFT_COMPENSATION;
|
||||
if (pa_modargs_get_value_boolean(ma, "drift_compensation", &ec->params.drift_compensation) < 0) {
|
||||
pa_log("Failed to parse drift_compensation value");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (mobile) {
|
||||
if (ec->params.drift_compensation) {
|
||||
pa_log("Can't use drift_compensation in mobile mode");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((rm = routing_mode_from_string(pa_modargs_get_value(ma, "routing_mode", DEFAULT_ROUTING_MODE))) < 0) {
|
||||
pa_log("Failed to parse routing_mode value");
|
||||
goto fail;
|
||||
|
|
@ -160,7 +173,13 @@ pa_bool_t pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec,
|
|||
apm->high_pass_filter()->Enable(true);
|
||||
|
||||
if (!mobile) {
|
||||
apm->echo_cancellation()->enable_drift_compensation(false);
|
||||
if (ec->params.drift_compensation) {
|
||||
apm->echo_cancellation()->set_device_sample_rate_hz(source_ss->rate);
|
||||
apm->echo_cancellation()->enable_drift_compensation(true);
|
||||
} else {
|
||||
apm->echo_cancellation()->enable_drift_compensation(false);
|
||||
}
|
||||
|
||||
apm->echo_cancellation()->Enable(true);
|
||||
} else {
|
||||
apm->echo_control_mobile()->set_routing_mode(static_cast<webrtc::EchoControlMobile::RoutingMode>(rm));
|
||||
|
|
@ -204,9 +223,9 @@ fail:
|
|||
return FALSE;
|
||||
}
|
||||
|
||||
void pa_webrtc_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) {
|
||||
void pa_webrtc_ec_play(pa_echo_canceller *ec, const uint8_t *play) {
|
||||
webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.priv.webrtc.apm;
|
||||
webrtc::AudioFrame play_frame, out_frame;
|
||||
webrtc::AudioFrame play_frame;
|
||||
const pa_sample_spec *ss = &ec->params.priv.webrtc.sample_spec;
|
||||
|
||||
play_frame._audioChannel = ss->channels;
|
||||
|
|
@ -214,18 +233,37 @@ void pa_webrtc_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *
|
|||
play_frame._payloadDataLengthInSamples = ec->params.priv.webrtc.blocksize / pa_frame_size(ss);
|
||||
memcpy(play_frame._payloadData, play, ec->params.priv.webrtc.blocksize);
|
||||
|
||||
apm->AnalyzeReverseStream(&play_frame);
|
||||
}
|
||||
|
||||
void pa_webrtc_ec_record(pa_echo_canceller *ec, const uint8_t *rec, uint8_t *out) {
|
||||
webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.priv.webrtc.apm;
|
||||
webrtc::AudioFrame out_frame;
|
||||
const pa_sample_spec *ss = &ec->params.priv.webrtc.sample_spec;
|
||||
|
||||
out_frame._audioChannel = ss->channels;
|
||||
out_frame._frequencyInHz = ss->rate;
|
||||
out_frame._payloadDataLengthInSamples = ec->params.priv.webrtc.blocksize / pa_frame_size(ss);
|
||||
memcpy(out_frame._payloadData, rec, ec->params.priv.webrtc.blocksize);
|
||||
|
||||
apm->AnalyzeReverseStream(&play_frame);
|
||||
apm->set_stream_delay_ms(0);
|
||||
apm->ProcessStream(&out_frame);
|
||||
|
||||
memcpy(out, out_frame._payloadData, ec->params.priv.webrtc.blocksize);
|
||||
}
|
||||
|
||||
void pa_webrtc_ec_set_drift(pa_echo_canceller *ec, float drift) {
|
||||
webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.priv.webrtc.apm;
|
||||
const pa_sample_spec *ss = &ec->params.priv.webrtc.sample_spec;
|
||||
|
||||
apm->echo_cancellation()->set_stream_drift_samples(drift * ec->params.priv.webrtc.blocksize / pa_frame_size(ss));
|
||||
}
|
||||
|
||||
void pa_webrtc_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) {
|
||||
pa_webrtc_ec_play(ec, play);
|
||||
pa_webrtc_ec_record(ec, rec, out);
|
||||
}
|
||||
|
||||
void pa_webrtc_ec_done(pa_echo_canceller *ec) {
|
||||
if (ec->params.priv.webrtc.apm) {
|
||||
webrtc::AudioProcessing::Destroy((webrtc::AudioProcessing*)ec->params.priv.webrtc.apm);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue