audioconvert: tweak conversion constants

Tweak the conversion constants a bit so that they handle the
extreme ranges a bit better.
Align the C and vector instructions.
Reactivate the unit test asserts when a conversion fails.
This commit is contained in:
Wim Taymans 2022-07-05 12:20:02 +02:00
parent e3951cc1f1
commit 22317da685
4 changed files with 237 additions and 227 deletions

View file

@ -338,7 +338,7 @@ conv_s32_to_f32d_1s_sse2(void *data, void * SPA_RESTRICT dst[], const void * SPA
float *d0 = dst[0];
uint32_t n, unrolled;
__m128i in;
__m128 out, factor = _mm_set1_ps(1.0f / S24_SCALE);
__m128 out, factor = _mm_set1_ps(1.0f / S32_SCALE);
if (SPA_IS_ALIGNED(d0, 16))
unrolled = n_samples & ~3;
@ -350,14 +350,13 @@ conv_s32_to_f32d_1s_sse2(void *data, void * SPA_RESTRICT dst[], const void * SPA
s[1*n_channels],
s[2*n_channels],
s[3*n_channels]);
in = _mm_srai_epi32(in, 8);
out = _mm_cvtepi32_ps(in);
out = _mm_mul_ps(out, factor);
_mm_store_ps(&d0[n], out);
s += 4*n_channels;
}
for(; n < n_samples; n++) {
out = _mm_cvtsi32_ss(factor, s[0]>>8);
out = _mm_cvtsi32_ss(factor, s[0]);
out = _mm_mul_ss(out, factor);
_mm_store_ss(&d0[n], out);
s += n_channels;
@ -395,7 +394,7 @@ conv_f32d_to_s32_1s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
for(n = 0; n < unrolled; n += 4) {
in[0] = _mm_mul_ps(_mm_load_ps(&s0[n]), scale);
in[0] = _mm_min_ps(in[0], int_max);
out[0] = _mm_cvtps_epi32(in[0]);
out[0] = _mm_cvttps_epi32(in[0]);
out[1] = _mm_shuffle_epi32(out[0], _MM_SHUFFLE(0, 3, 2, 1));
out[2] = _mm_shuffle_epi32(out[0], _MM_SHUFFLE(1, 0, 3, 2));
out[3] = _mm_shuffle_epi32(out[0], _MM_SHUFFLE(2, 1, 0, 3));
@ -440,8 +439,8 @@ conv_f32d_to_s32_2s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
in[0] = _mm_min_ps(in[0], int_max);
in[1] = _mm_min_ps(in[1], int_max);
out[0] = _mm_cvtps_epi32(in[0]);
out[1] = _mm_cvtps_epi32(in[1]);
out[0] = _mm_cvttps_epi32(in[0]);
out[1] = _mm_cvttps_epi32(in[1]);
t[0] = _mm_unpacklo_epi32(out[0], out[1]);
t[1] = _mm_unpackhi_epi32(out[0], out[1]);
@ -460,7 +459,7 @@ conv_f32d_to_s32_2s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
in[0] = _mm_mul_ps(in[0], scale);
in[0] = _mm_min_ps(in[0], int_max);
out[0] = _mm_cvtps_epi32(in[0]);
out[0] = _mm_cvttps_epi32(in[0]);
_mm_storel_epi64((__m128i*)d, out[0]);
d += n_channels;
}
@ -499,10 +498,10 @@ conv_f32d_to_s32_4s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
_MM_TRANSPOSE4_PS(in[0], in[1], in[2], in[3]);
out[0] = _mm_cvtps_epi32(in[0]);
out[1] = _mm_cvtps_epi32(in[1]);
out[2] = _mm_cvtps_epi32(in[2]);
out[3] = _mm_cvtps_epi32(in[3]);
out[0] = _mm_cvttps_epi32(in[0]);
out[1] = _mm_cvttps_epi32(in[1]);
out[2] = _mm_cvttps_epi32(in[2]);
out[3] = _mm_cvttps_epi32(in[3]);
_mm_storeu_si128((__m128i*)(d + 0*n_channels), out[0]);
_mm_storeu_si128((__m128i*)(d + 1*n_channels), out[1]);
@ -522,7 +521,7 @@ conv_f32d_to_s32_4s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
in[0] = _mm_mul_ps(in[0], scale);
in[0] = _mm_min_ps(in[0], int_max);
out[0] = _mm_cvtps_epi32(in[0]);
out[0] = _mm_cvttps_epi32(in[0]);
_mm_storeu_si128((__m128i*)d, out[0]);
d += n_channels;
}
@ -590,7 +589,7 @@ conv_f32d_to_s32_1s_dither_sse2(struct convert *conv, void * SPA_RESTRICT dst, c
in[0] = _mm_mul_ps(_mm_load_ps(&s[n]), scale);
in[0] = _mm_add_ps(in[0], _mm_load_ps(&dither[n]));
in[0] = _mm_min_ps(in[0], int_max);
out[0] = _mm_cvtps_epi32(in[0]);
out[0] = _mm_cvttps_epi32(in[0]);
out[1] = _mm_shuffle_epi32(out[0], _MM_SHUFFLE(0, 3, 2, 1));
out[2] = _mm_shuffle_epi32(out[0], _MM_SHUFFLE(1, 0, 3, 2));
out[3] = _mm_shuffle_epi32(out[0], _MM_SHUFFLE(2, 1, 0, 3));
@ -986,8 +985,9 @@ conv_f32_to_s16_1_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_RES
uint32_t n, unrolled;
__m128 in[2];
__m128i out[2];
__m128 int_max = _mm_set1_ps(S16_MAX_F);
__m128 int_min = _mm_sub_ps(_mm_setzero_ps(), int_max);
__m128 int_scale = _mm_set1_ps(S16_SCALE);
__m128 int_max = _mm_set1_ps(S16_MAX);
__m128 int_min = _mm_set1_ps(S16_MIN);
if (SPA_IS_ALIGNED(s, 16))
unrolled = n_samples & ~7;
@ -995,16 +995,16 @@ conv_f32_to_s16_1_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_RES
unrolled = 0;
for(n = 0; n < unrolled; n += 8) {
in[0] = _mm_mul_ps(_mm_load_ps(&s[n]), int_max);
in[1] = _mm_mul_ps(_mm_load_ps(&s[n+4]), int_max);
out[0] = _mm_cvtps_epi32(in[0]);
out[1] = _mm_cvtps_epi32(in[1]);
in[0] = _mm_mul_ps(_mm_load_ps(&s[n]), int_scale);
in[1] = _mm_mul_ps(_mm_load_ps(&s[n+4]), int_scale);
out[0] = _mm_cvttps_epi32(in[0]);
out[1] = _mm_cvttps_epi32(in[1]);
out[0] = _mm_packs_epi32(out[0], out[1]);
_mm_storeu_si128((__m128i*)(d+0), out[0]);
d += 8;
}
for(; n < n_samples; n++) {
in[0] = _mm_mul_ss(_mm_load_ss(&s[n]), int_max);
in[0] = _mm_mul_ss(_mm_load_ss(&s[n]), int_scale);
in[0] = _mm_min_ss(int_max, _mm_max_ss(in[0], int_min));
*d++ = _mm_cvtss_si32(in[0]);
}
@ -1035,8 +1035,9 @@ conv_f32d_to_s16_1s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
uint32_t n, unrolled;
__m128 in[2];
__m128i out[2];
__m128 int_max = _mm_set1_ps(S16_MAX_F);
__m128 int_min = _mm_sub_ps(_mm_setzero_ps(), int_max);
__m128 int_scale = _mm_set1_ps(S16_SCALE);
__m128 int_max = _mm_set1_ps(S16_MAX);
__m128 int_min = _mm_set1_ps(S16_MIN);
if (SPA_IS_ALIGNED(s0, 16))
unrolled = n_samples & ~7;
@ -1044,10 +1045,10 @@ conv_f32d_to_s16_1s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
unrolled = 0;
for(n = 0; n < unrolled; n += 8) {
in[0] = _mm_mul_ps(_mm_load_ps(&s0[n]), int_max);
in[1] = _mm_mul_ps(_mm_load_ps(&s0[n+4]), int_max);
out[0] = _mm_cvtps_epi32(in[0]);
out[1] = _mm_cvtps_epi32(in[1]);
in[0] = _mm_mul_ps(_mm_load_ps(&s0[n]), int_scale);
in[1] = _mm_mul_ps(_mm_load_ps(&s0[n+4]), int_scale);
out[0] = _mm_cvttps_epi32(in[0]);
out[1] = _mm_cvttps_epi32(in[1]);
out[0] = _mm_packs_epi32(out[0], out[1]);
d[0*n_channels] = _mm_extract_epi16(out[0], 0);
@ -1061,9 +1062,9 @@ conv_f32d_to_s16_1s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
d += 8*n_channels;
}
for(; n < n_samples; n++) {
in[0] = _mm_mul_ss(_mm_load_ss(&s0[n]), int_max);
in[0] = _mm_mul_ss(_mm_load_ss(&s0[n]), int_scale);
in[0] = _mm_min_ss(int_max, _mm_max_ss(in[0], int_min));
*d = _mm_cvtss_si32(in[0]);
*d = _mm_cvttss_si32(in[0]);
d += n_channels;
}
}
@ -1077,8 +1078,9 @@ conv_f32d_to_s16_2s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
uint32_t n, unrolled;
__m128 in[2];
__m128i out[4], t[2];
__m128 int_max = _mm_set1_ps(S16_MAX_F);
__m128 int_min = _mm_sub_ps(_mm_setzero_ps(), int_max);
__m128 int_scale = _mm_set1_ps(S16_SCALE);
__m128 int_max = _mm_set1_ps(S16_MAX);
__m128 int_min = _mm_set1_ps(S16_MIN);
if (SPA_IS_ALIGNED(s0, 16) &&
SPA_IS_ALIGNED(s1, 16))
@ -1087,11 +1089,11 @@ conv_f32d_to_s16_2s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
unrolled = 0;
for(n = 0; n < unrolled; n += 4) {
in[0] = _mm_mul_ps(_mm_load_ps(&s0[n]), int_max);
in[1] = _mm_mul_ps(_mm_load_ps(&s1[n]), int_max);
in[0] = _mm_mul_ps(_mm_load_ps(&s0[n]), int_scale);
in[1] = _mm_mul_ps(_mm_load_ps(&s1[n]), int_scale);
t[0] = _mm_cvtps_epi32(in[0]);
t[1] = _mm_cvtps_epi32(in[1]);
t[0] = _mm_cvttps_epi32(in[0]);
t[1] = _mm_cvttps_epi32(in[1]);
t[0] = _mm_packs_epi32(t[0], t[0]);
t[1] = _mm_packs_epi32(t[1], t[1]);
@ -1108,8 +1110,8 @@ conv_f32d_to_s16_2s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
d += 4*n_channels;
}
for(; n < n_samples; n++) {
in[0] = _mm_mul_ss(_mm_load_ss(&s0[n]), int_max);
in[1] = _mm_mul_ss(_mm_load_ss(&s1[n]), int_max);
in[0] = _mm_mul_ss(_mm_load_ss(&s0[n]), int_scale);
in[1] = _mm_mul_ss(_mm_load_ss(&s1[n]), int_scale);
in[0] = _mm_min_ss(int_max, _mm_max_ss(in[0], int_min));
in[1] = _mm_min_ss(int_max, _mm_max_ss(in[1], int_min));
d[0] = _mm_cvtss_si32(in[0]);
@ -1127,8 +1129,9 @@ conv_f32d_to_s16_4s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
uint32_t n, unrolled;
__m128 in[4];
__m128i out[4], t[4];
__m128 int_max = _mm_set1_ps(S16_MAX_F);
__m128 int_min = _mm_sub_ps(_mm_setzero_ps(), int_max);
__m128 int_scale = _mm_set1_ps(S16_SCALE);
__m128 int_max = _mm_set1_ps(S16_MAX);
__m128 int_min = _mm_set1_ps(S16_MIN);
if (SPA_IS_ALIGNED(s0, 16) &&
SPA_IS_ALIGNED(s1, 16) &&
@ -1139,15 +1142,15 @@ conv_f32d_to_s16_4s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
unrolled = 0;
for(n = 0; n < unrolled; n += 4) {
in[0] = _mm_mul_ps(_mm_load_ps(&s0[n]), int_max);
in[1] = _mm_mul_ps(_mm_load_ps(&s1[n]), int_max);
in[2] = _mm_mul_ps(_mm_load_ps(&s2[n]), int_max);
in[3] = _mm_mul_ps(_mm_load_ps(&s3[n]), int_max);
in[0] = _mm_mul_ps(_mm_load_ps(&s0[n]), int_scale);
in[1] = _mm_mul_ps(_mm_load_ps(&s1[n]), int_scale);
in[2] = _mm_mul_ps(_mm_load_ps(&s2[n]), int_scale);
in[3] = _mm_mul_ps(_mm_load_ps(&s3[n]), int_scale);
t[0] = _mm_cvtps_epi32(in[0]);
t[1] = _mm_cvtps_epi32(in[1]);
t[2] = _mm_cvtps_epi32(in[2]);
t[3] = _mm_cvtps_epi32(in[3]);
t[0] = _mm_cvttps_epi32(in[0]);
t[1] = _mm_cvttps_epi32(in[1]);
t[2] = _mm_cvttps_epi32(in[2]);
t[3] = _mm_cvttps_epi32(in[3]);
t[0] = _mm_packs_epi32(t[0], t[2]);
t[1] = _mm_packs_epi32(t[1], t[3]);
@ -1165,10 +1168,10 @@ conv_f32d_to_s16_4s_sse2(void *data, void * SPA_RESTRICT dst, const void * SPA_R
d += 4*n_channels;
}
for(; n < n_samples; n++) {
in[0] = _mm_mul_ss(_mm_load_ss(&s0[n]), int_max);
in[1] = _mm_mul_ss(_mm_load_ss(&s1[n]), int_max);
in[2] = _mm_mul_ss(_mm_load_ss(&s2[n]), int_max);
in[3] = _mm_mul_ss(_mm_load_ss(&s3[n]), int_max);
in[0] = _mm_mul_ss(_mm_load_ss(&s0[n]), int_scale);
in[1] = _mm_mul_ss(_mm_load_ss(&s1[n]), int_scale);
in[2] = _mm_mul_ss(_mm_load_ss(&s2[n]), int_scale);
in[3] = _mm_mul_ss(_mm_load_ss(&s3[n]), int_scale);
in[0] = _mm_min_ss(int_max, _mm_max_ss(in[0], int_min));
in[1] = _mm_min_ss(int_max, _mm_max_ss(in[1], int_min));
in[2] = _mm_min_ss(int_max, _mm_max_ss(in[2], int_min));
@ -1205,8 +1208,9 @@ conv_f32d_to_s16_2_sse2(struct convert *conv, void * SPA_RESTRICT dst[], const v
uint32_t n, unrolled;
__m128 in[4];
__m128i out[4];
__m128 int_max = _mm_set1_ps(S16_MAX_F);
__m128 int_min = _mm_sub_ps(_mm_setzero_ps(), int_max);
__m128 int_scale = _mm_set1_ps(S16_SCALE);
__m128 int_max = _mm_set1_ps(S16_MAX);
__m128 int_min = _mm_set1_ps(S16_MIN);
if (SPA_IS_ALIGNED(s0, 16) &&
SPA_IS_ALIGNED(s1, 16))
@ -1215,15 +1219,15 @@ conv_f32d_to_s16_2_sse2(struct convert *conv, void * SPA_RESTRICT dst[], const v
unrolled = 0;
for(n = 0; n < unrolled; n += 8) {
in[0] = _mm_mul_ps(_mm_load_ps(&s0[n+0]), int_max);
in[1] = _mm_mul_ps(_mm_load_ps(&s1[n+0]), int_max);
in[2] = _mm_mul_ps(_mm_load_ps(&s0[n+4]), int_max);
in[3] = _mm_mul_ps(_mm_load_ps(&s1[n+4]), int_max);
in[0] = _mm_mul_ps(_mm_load_ps(&s0[n+0]), int_scale);
in[1] = _mm_mul_ps(_mm_load_ps(&s1[n+0]), int_scale);
in[2] = _mm_mul_ps(_mm_load_ps(&s0[n+4]), int_scale);
in[3] = _mm_mul_ps(_mm_load_ps(&s1[n+4]), int_scale);
out[0] = _mm_cvtps_epi32(in[0]);
out[1] = _mm_cvtps_epi32(in[1]);
out[2] = _mm_cvtps_epi32(in[2]);
out[3] = _mm_cvtps_epi32(in[3]);
out[0] = _mm_cvttps_epi32(in[0]);
out[1] = _mm_cvttps_epi32(in[1]);
out[2] = _mm_cvttps_epi32(in[2]);
out[3] = _mm_cvttps_epi32(in[3]);
out[0] = _mm_packs_epi32(out[0], out[2]);
out[1] = _mm_packs_epi32(out[1], out[3]);
@ -1237,8 +1241,8 @@ conv_f32d_to_s16_2_sse2(struct convert *conv, void * SPA_RESTRICT dst[], const v
d += 16;
}
for(; n < n_samples; n++) {
in[0] = _mm_mul_ss(_mm_load_ss(&s0[n]), int_max);
in[1] = _mm_mul_ss(_mm_load_ss(&s1[n]), int_max);
in[0] = _mm_mul_ss(_mm_load_ss(&s0[n]), int_scale);
in[1] = _mm_mul_ss(_mm_load_ss(&s1[n]), int_scale);
in[0] = _mm_min_ss(int_max, _mm_max_ss(in[0], int_min));
in[1] = _mm_min_ss(int_max, _mm_max_ss(in[1], int_min));
d[0] = _mm_cvtss_si32(in[0]);