audioconvert: avoid even more precision loss in S32 to F32 conversion

There's really no point in doing that s25_32 intermediate step,
to be honest i don't have a clue why the original implementation
did that \_(ツ)_/¯.

Both `S25_SCALE` and `S32_SCALE` are powers of two,
and thus are both exactly representable as floats,
and reprocial of power-of-two is also exactly representable,
so it's not like that rescaling results in precision loss.

This additionally avoids right-shift, and thus is even faster.

As `test_lossless_s32_lossless_subset` shows,
if the integer is in the form of s25+shift,
the maximal absolute error became even lower,
but not zero, because F32->S32 still goes through S25 intermediate.
I think we could theoretically do better,
but then the clamping becomes pretty finicky,
so i don't feel like touching that here.
This commit is contained in:
Roman Lebedev 2024-06-14 06:05:18 +03:00
parent c517865864
commit f4c89b1b40
No known key found for this signature in database
GPG key ID: 083C3EBB4A1689E0
4 changed files with 26 additions and 37 deletions

View file

@ -316,7 +316,7 @@ conv_s32_to_f32d_4s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
float *d0 = dst[0], *d1 = dst[1], *d2 = dst[2], *d3 = dst[3];
uint32_t n, unrolled;
__m256i in[4];
__m256 out[4], factor = _mm256_set1_ps(1.0f / S25_SCALE);
__m256 out[4], factor = _mm256_set1_ps(1.0f / S32_SCALE);
__m256i mask1 = _mm256_setr_epi32(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels,
4*n_channels, 5*n_channels, 6*n_channels, 7*n_channels);
@ -334,11 +334,6 @@ conv_s32_to_f32d_4s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
in[2] = _mm256_i32gather_epi32((int*)&s[2], mask1, 4);
in[3] = _mm256_i32gather_epi32((int*)&s[3], mask1, 4);
in[0] = _mm256_srai_epi32(in[0], 7);
in[1] = _mm256_srai_epi32(in[1], 7);
in[2] = _mm256_srai_epi32(in[2], 7);
in[3] = _mm256_srai_epi32(in[3], 7);
out[0] = _mm256_cvtepi32_ps(in[0]);
out[1] = _mm256_cvtepi32_ps(in[1]);
out[2] = _mm256_cvtepi32_ps(in[2]);
@ -357,11 +352,11 @@ conv_s32_to_f32d_4s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
s += 8*n_channels;
}
for(; n < n_samples; n++) {
__m128 out[4], factor = _mm_set1_ps(1.0f / S25_SCALE);
out[0] = _mm_cvtsi32_ss(factor, s[0] >> 7);
out[1] = _mm_cvtsi32_ss(factor, s[1] >> 7);
out[2] = _mm_cvtsi32_ss(factor, s[2] >> 7);
out[3] = _mm_cvtsi32_ss(factor, s[3] >> 7);
__m128 out[4], factor = _mm_set1_ps(1.0f / S32_SCALE);
out[0] = _mm_cvtsi32_ss(factor, s[0]);
out[1] = _mm_cvtsi32_ss(factor, s[1]);
out[2] = _mm_cvtsi32_ss(factor, s[2]);
out[3] = _mm_cvtsi32_ss(factor, s[3]);
out[0] = _mm_mul_ss(out[0], factor);
out[1] = _mm_mul_ss(out[1], factor);
out[2] = _mm_mul_ss(out[2], factor);
@ -382,7 +377,7 @@ conv_s32_to_f32d_2s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
float *d0 = dst[0], *d1 = dst[1];
uint32_t n, unrolled;
__m256i in[4];
__m256 out[4], factor = _mm256_set1_ps(1.0f / S25_SCALE);
__m256 out[4], factor = _mm256_set1_ps(1.0f / S32_SCALE);
__m256i mask1 = _mm256_setr_epi32(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels,
4*n_channels, 5*n_channels, 6*n_channels, 7*n_channels);
@ -396,9 +391,6 @@ conv_s32_to_f32d_2s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
in[0] = _mm256_i32gather_epi32((int*)&s[0], mask1, 4);
in[1] = _mm256_i32gather_epi32((int*)&s[1], mask1, 4);
in[0] = _mm256_srai_epi32(in[0], 7);
in[1] = _mm256_srai_epi32(in[1], 7);
out[0] = _mm256_cvtepi32_ps(in[0]);
out[1] = _mm256_cvtepi32_ps(in[1]);
@ -411,9 +403,9 @@ conv_s32_to_f32d_2s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
s += 8*n_channels;
}
for(; n < n_samples; n++) {
__m128 out[2], factor = _mm_set1_ps(1.0f / S25_SCALE);
out[0] = _mm_cvtsi32_ss(factor, s[0] >> 7);
out[1] = _mm_cvtsi32_ss(factor, s[1] >> 7);
__m128 out[2], factor = _mm_set1_ps(1.0f / S32_SCALE);
out[0] = _mm_cvtsi32_ss(factor, s[0]);
out[1] = _mm_cvtsi32_ss(factor, s[1]);
out[0] = _mm_mul_ss(out[0], factor);
out[1] = _mm_mul_ss(out[1], factor);
_mm_store_ss(&d0[n], out[0]);
@ -430,7 +422,7 @@ conv_s32_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
float *d0 = dst[0];
uint32_t n, unrolled;
__m256i in[2];
__m256 out[2], factor = _mm256_set1_ps(1.0f / S25_SCALE);
__m256 out[2], factor = _mm256_set1_ps(1.0f / S32_SCALE);
__m256i mask1 = _mm256_setr_epi32(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels,
4*n_channels, 5*n_channels, 6*n_channels, 7*n_channels);
@ -443,9 +435,6 @@ conv_s32_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
in[0] = _mm256_i32gather_epi32(&s[0*n_channels], mask1, 4);
in[1] = _mm256_i32gather_epi32(&s[8*n_channels], mask1, 4);
in[0] = _mm256_srai_epi32(in[0], 7);
in[1] = _mm256_srai_epi32(in[1], 7);
out[0] = _mm256_cvtepi32_ps(in[0]);
out[1] = _mm256_cvtepi32_ps(in[1]);
@ -458,8 +447,8 @@ conv_s32_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
s += 16*n_channels;
}
for(; n < n_samples; n++) {
__m128 out, factor = _mm_set1_ps(1.0f / S25_SCALE);
out = _mm_cvtsi32_ss(factor, s[0] >> 7);
__m128 out, factor = _mm_set1_ps(1.0f / S32_SCALE);
out = _mm_cvtsi32_ss(factor, s[0]);
out = _mm_mul_ss(out, factor);
_mm_store_ss(&d0[n], out);
s += n_channels;