audiomixer: optimize avx mixer some more

Add avx mixer to test and benchmark
Rework and unroll the avx mixer some more.
The SSE one is 10 times faster than the C one, The AVX is 20 times
faster. The SSE2 function is 5 times faster than the C one.
This commit is contained in:
Wim Taymans 2022-07-10 23:13:24 +02:00
parent 23984f8790
commit 8fe83e5304
5 changed files with 77 additions and 58 deletions

View file

@ -86,50 +86,59 @@ static inline void mix_4(float * dst,
static inline void mix_2(float * dst, const float * SPA_RESTRICT src, uint32_t n_samples)
{
uint32_t n, unrolled;
if (SPA_IS_ALIGNED(src, 32) &&
SPA_IS_ALIGNED(dst, 32))
unrolled = n_samples & ~15;
else
unrolled = 0;
for (n = 0; n < unrolled; n += 16) {
__m256 in1[2], in2[2];
in1[0] = _mm256_load_ps(&dst[n + 0]);
in1[1] = _mm256_load_ps(&dst[n + 8]);
in2[0] = _mm256_load_ps(&src[n + 0]);
in2[1] = _mm256_load_ps(&src[n + 8]);
in1[0] = _mm256_add_ps(in1[0], in2[0]);
in1[1] = _mm256_add_ps(in1[1], in2[1]);
_mm256_store_ps(&dst[n + 0], in1[0]);
_mm256_store_ps(&dst[n + 8], in1[1]);
}
for (; n < n_samples; n++) {
__m128 in1[1], in2[1];
in1[0] = _mm_load_ss(&dst[n]),
in2[0] = _mm_load_ss(&src[n]),
in1[0] = _mm_add_ss(in1[0], in2[0]);
_mm_store_ss(&dst[n], in1[0]);
}
}
void
mix_f32_avx(struct mix_ops *ops, void * SPA_RESTRICT dst, const void * SPA_RESTRICT src[],
uint32_t n_src, uint32_t n_samples)
{
uint32_t i;
n_samples *= ops->n_channels;
if (n_src == 0)
memset(dst, 0, n_samples * ops->n_channels * sizeof(float));
else if (dst != src[0])
spa_memcpy(dst, src[0], n_samples * ops->n_channels * sizeof(float));
else if (n_src == 1) {
if (dst != src[0])
spa_memcpy(dst, src[0], n_samples * sizeof(float));
} else {
uint32_t i, n, unrolled;
const float **s = (const float **)src;
float *d = dst;
for (i = 1; i + 2 < n_src; i += 3)
mix_4(dst, src[i], src[i + 1], src[i + 2], n_samples);
for (; i < n_src; i++)
mix_2(dst, src[i], n_samples * ops->n_channels);
if (SPA_LIKELY(SPA_IS_ALIGNED(dst, 32))) {
unrolled = n_samples & ~31;
for (i = 0; i < n_src; i++) {
if (SPA_UNLIKELY(!SPA_IS_ALIGNED(src[i], 32))) {
unrolled = 0;
break;
}
}
} else
unrolled = 0;
for (n = 0; n < unrolled; n += 32) {
__m256 in[4];
in[0] = _mm256_load_ps(&s[0][n + 0]);
in[1] = _mm256_load_ps(&s[0][n + 8]);
in[2] = _mm256_load_ps(&s[0][n + 16]);
in[3] = _mm256_load_ps(&s[0][n + 24]);
for (i = 1; i < n_src; i++) {
in[0] = _mm256_add_ps(in[0], _mm256_load_ps(&s[i][n + 0]));
in[1] = _mm256_add_ps(in[1], _mm256_load_ps(&s[i][n + 8]));
in[2] = _mm256_add_ps(in[2], _mm256_load_ps(&s[i][n + 16]));
in[3] = _mm256_add_ps(in[3], _mm256_load_ps(&s[i][n + 24]));
}
_mm256_store_ps(&d[n + 0], in[0]);
_mm256_store_ps(&d[n + 8], in[1]);
_mm256_store_ps(&d[n + 16], in[2]);
_mm256_store_ps(&d[n + 24], in[3]);
}
for (; n < n_samples; n++) {
__m128 in[1];
in[0] = _mm_load_ss(&s[0][n]);
for (i = 1; i < n_src; i++)
in[0] = _mm_add_ss(in[0], _mm_load_ss(&s[i][n]));
_mm_store_ss(&d[n], in[0]);
}
}
}