From 55633ebf9af403408e14c9b7a3a5f36b9bf300e6 Mon Sep 17 00:00:00 2001 From: Wim Taymans Date: Wed, 18 Mar 2020 10:06:54 +0100 Subject: [PATCH] fmt-ops: move AVX --- spa/plugins/audioconvert/fmt-ops-avx2.c | 258 +++++++++++++++++++----- 1 file changed, 204 insertions(+), 54 deletions(-) diff --git a/spa/plugins/audioconvert/fmt-ops-avx2.c b/spa/plugins/audioconvert/fmt-ops-avx2.c index 6366f18ce..4fc7a30c1 100644 --- a/spa/plugins/audioconvert/fmt-ops-avx2.c +++ b/spa/plugins/audioconvert/fmt-ops-avx2.c @@ -33,26 +33,32 @@ conv_s16_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA const int16_t *s = src; float *d0 = dst[0]; uint32_t n, unrolled; - __m128i in; - __m128 out, factor = _mm_set1_ps(1.0f / S16_SCALE); + __m256i in; + __m256 out, factor = _mm256_set1_ps(1.0f / S16_SCALE); - if (SPA_LIKELY(SPA_IS_ALIGNED(d0, 16))) - unrolled = n_samples & ~3; + if (SPA_LIKELY(SPA_IS_ALIGNED(d0, 32))) + unrolled = n_samples & ~7; else unrolled = 0; - for(n = 0; n < unrolled; n += 4) { - in = _mm_insert_epi16(in, s[0*n_channels], 1); - in = _mm_insert_epi16(in, s[1*n_channels], 3); - in = _mm_insert_epi16(in, s[2*n_channels], 5); - in = _mm_insert_epi16(in, s[3*n_channels], 7); - in = _mm_srai_epi32(in, 16); - out = _mm_cvtepi32_ps(in); - out = _mm_mul_ps(out, factor); - _mm_store_ps(&d0[n], out); - s += 4*n_channels; + for(n = 0; n < unrolled; n += 8) { + in = _mm256_insert_epi16(in, s[0*n_channels], 1); + in = _mm256_insert_epi16(in, s[1*n_channels], 3); + in = _mm256_insert_epi16(in, s[2*n_channels], 5); + in = _mm256_insert_epi16(in, s[3*n_channels], 7); + in = _mm256_insert_epi16(in, s[4*n_channels], 9); + in = _mm256_insert_epi16(in, s[5*n_channels], 11); + in = _mm256_insert_epi16(in, s[6*n_channels], 13); + in = _mm256_insert_epi16(in, s[7*n_channels], 15); + + in = _mm256_srai_epi32(in, 16); + out = _mm256_cvtepi32_ps(in); + out = _mm256_mul_ps(out, factor); + _mm256_store_ps(&d0[n], out); + s += 8*n_channels; } for(; n < n_samples; n++) { + __m128 out, factor = _mm_set1_ps(1.0f / S16_SCALE); out = _mm_cvtsi32_ss(out, s[0]); out = _mm_mul_ss(out, factor); _mm_store_ss(&d0[n], out); @@ -78,46 +84,47 @@ conv_s16_to_f32d_2_avx2(struct convert *conv, void * SPA_RESTRICT dst[], const v const int16_t *s = src[0]; float *d0 = dst[0], *d1 = dst[1]; uint32_t n, unrolled; - __m128i in[2], t[4]; - __m128 out[4], factor = _mm_set1_ps(1.0f / S16_SCALE); + __m256i in[2], t[4]; + __m256 out[4], factor = _mm256_set1_ps(1.0f / S16_SCALE); - if (SPA_IS_ALIGNED(s, 16) && - SPA_IS_ALIGNED(d0, 16) && - SPA_IS_ALIGNED(d1, 16)) - unrolled = n_samples & ~7; + if (SPA_IS_ALIGNED(s, 32) && + SPA_IS_ALIGNED(d0, 32) && + SPA_IS_ALIGNED(d1, 32)) + unrolled = n_samples & ~15; else unrolled = 0; - for(n = 0; n < unrolled; n += 8) { - in[0] = _mm_load_si128((__m128i*)(s + 0)); - in[1] = _mm_load_si128((__m128i*)(s + 8)); + for(n = 0; n < unrolled; n += 16) { + in[0] = _mm256_load_si256((__m256i*)(s + 0)); + in[1] = _mm256_load_si256((__m256i*)(s + 16)); - t[0] = _mm_slli_epi32(in[0], 16); - t[0] = _mm_srai_epi32(t[0], 16); - out[0] = _mm_cvtepi32_ps(t[0]); - out[0] = _mm_mul_ps(out[0], factor); + t[0] = _mm256_slli_epi32(in[0], 16); + t[0] = _mm256_srai_epi32(t[0], 16); + out[0] = _mm256_cvtepi32_ps(t[0]); + out[0] = _mm256_mul_ps(out[0], factor); - t[1] = _mm_srai_epi32(in[0], 16); - out[1] = _mm_cvtepi32_ps(t[1]); - out[1] = _mm_mul_ps(out[1], factor); + t[1] = _mm256_srai_epi32(in[0], 16); + out[1] = _mm256_cvtepi32_ps(t[1]); + out[1] = _mm256_mul_ps(out[1], factor); - t[2] = _mm_slli_epi32(in[1], 16); - t[2] = _mm_srai_epi32(t[2], 16); - out[2] = _mm_cvtepi32_ps(t[2]); - out[2] = _mm_mul_ps(out[2], factor); + t[2] = _mm256_slli_epi32(in[1], 16); + t[2] = _mm256_srai_epi32(t[2], 16); + out[2] = _mm256_cvtepi32_ps(t[2]); + out[2] = _mm256_mul_ps(out[2], factor); - t[3] = _mm_srai_epi32(in[1], 16); - out[3] = _mm_cvtepi32_ps(t[3]); - out[3] = _mm_mul_ps(out[3], factor); + t[3] = _mm256_srai_epi32(in[1], 16); + out[3] = _mm256_cvtepi32_ps(t[3]); + out[3] = _mm256_mul_ps(out[3], factor); - _mm_store_ps(&d0[n + 0], out[0]); - _mm_store_ps(&d1[n + 0], out[1]); - _mm_store_ps(&d0[n + 4], out[2]); - _mm_store_ps(&d1[n + 4], out[3]); + _mm256_store_ps(&d0[n + 0], out[0]); + _mm256_store_ps(&d1[n + 0], out[1]); + _mm256_store_ps(&d0[n + 8], out[2]); + _mm256_store_ps(&d1[n + 8], out[3]); - s += 16; + s += 32; } for(; n < n_samples; n++) { + __m128 out[4], factor = _mm_set1_ps(1.0f / S16_SCALE); out[0] = _mm_cvtsi32_ss(out[0], s[0]); out[0] = _mm_mul_ss(out[0], factor); out[1] = _mm_cvtsi32_ss(out[1], s[1]); @@ -330,6 +337,140 @@ conv_s24_to_f32d_avx2(struct convert *conv, void * SPA_RESTRICT dst[], const voi } +void +conv_s32_to_f32d_4s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA_RESTRICT src, + uint32_t n_channels, uint32_t n_samples) +{ + const int32_t *s = src; + float *d0 = dst[0], *d1 = dst[1], *d2 = dst[2], *d3 = dst[3]; + uint32_t n, unrolled; + __m256i in[4], t[4]; + __m256 out[4], factor = _mm256_set1_ps(1.0f / S24_SCALE); + + if (SPA_IS_ALIGNED(d0, 32) && + SPA_IS_ALIGNED(d1, 32) && + SPA_IS_ALIGNED(d2, 32) && + SPA_IS_ALIGNED(d3, 32)) + unrolled = n_samples & ~7; + else + unrolled = 0; + + for(n = 0; n < unrolled; n += 8) { + in[0] = _mm256_loadu2_m128i((__m128i*)&s[4*n_channels], (__m128i*)&s[0*n_channels]); /* a0 b0 c0 d0 a4 b4 c4 d4 */ + in[1] = _mm256_loadu2_m128i((__m128i*)&s[5*n_channels], (__m128i*)&s[1*n_channels]); /* a1 b1 c1 d1 a5 b5 c5 d5 */ + in[2] = _mm256_loadu2_m128i((__m128i*)&s[6*n_channels], (__m128i*)&s[2*n_channels]); /* a2 b2 c2 d2 a6 b6 c6 d6 */ + in[3] = _mm256_loadu2_m128i((__m128i*)&s[7*n_channels], (__m128i*)&s[3*n_channels]); /* a3 b3 c3 d3 a7 b7 c7 d7 */ + + in[0] = _mm256_srai_epi32(in[0], 8); + in[1] = _mm256_srai_epi32(in[1], 8); + in[2] = _mm256_srai_epi32(in[2], 8); + in[3] = _mm256_srai_epi32(in[3], 8); + + t[0] = _mm256_unpacklo_epi32(in[0], in[1]); /* a0 a1 b0 b1 a4 a5 b4 b5 */ + t[1] = _mm256_unpackhi_epi32(in[0], in[1]); /* c0 c1 d0 d1 c4 c5 d4 d5 */ + t[2] = _mm256_unpacklo_epi32(in[2], in[3]); /* a2 a3 b2 b3 a6 a7 b6 b7 */ + t[3] = _mm256_unpackhi_epi32(in[2], in[3]); /* c2 c3 d2 d3 c6 c7 d6 d7 */ + in[0] = _mm256_unpacklo_epi64(t[0], t[2]); /* a0 a1 a2 a3 a4 a5 a6 a7 */ + in[1] = _mm256_unpackhi_epi64(t[0], t[2]); /* b0 b1 b2 b3 b4 b5 b6 b7 */ + in[2] = _mm256_unpacklo_epi64(t[1], t[3]); /* c0 c1 c2 c3 c4 c5 c6 c7 */ + in[3] = _mm256_unpackhi_epi64(t[1], t[3]); /* d0 d1 d2 d3 d4 d5 d6 d7 */ + + out[0] = _mm256_cvtepi32_ps(in[0]); + out[1] = _mm256_cvtepi32_ps(in[1]); + out[2] = _mm256_cvtepi32_ps(in[2]); + out[3] = _mm256_cvtepi32_ps(in[3]); + + out[0] = _mm256_mul_ps(out[0], factor); + out[1] = _mm256_mul_ps(out[1], factor); + out[2] = _mm256_mul_ps(out[2], factor); + out[3] = _mm256_mul_ps(out[3], factor); + + _mm256_store_ps(&d0[n], out[0]); + _mm256_store_ps(&d1[n], out[1]); + _mm256_store_ps(&d2[n], out[2]); + _mm256_store_ps(&d3[n], out[3]); + + s += 8*n_channels; + } + for(; n < n_samples; n++) { + __m128 out[4], factor = _mm_set1_ps(1.0f / S24_SCALE); + out[0] = _mm_cvtsi32_ss(out[0], s[0]>>8); + out[1] = _mm_cvtsi32_ss(out[1], s[1]>>8); + out[2] = _mm_cvtsi32_ss(out[2], s[2]>>8); + out[3] = _mm_cvtsi32_ss(out[3], s[3]>>8); + out[0] = _mm_mul_ss(out[0], factor); + out[1] = _mm_mul_ss(out[1], factor); + out[2] = _mm_mul_ss(out[2], factor); + out[3] = _mm_mul_ss(out[3], factor); + _mm_store_ss(&d0[n], out[0]); + _mm_store_ss(&d1[n], out[1]); + _mm_store_ss(&d2[n], out[2]); + _mm_store_ss(&d3[n], out[3]); + s += n_channels; + } +} + +void +conv_s32_to_f32d_2s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA_RESTRICT src, + uint32_t n_channels, uint32_t n_samples) +{ + const int32_t *s = src; + float *d0 = dst[0], *d1 = dst[1]; + uint32_t n, unrolled; + __m256i in[4], t[4]; + __m256 out[4], factor = _mm256_set1_ps(1.0f / S24_SCALE); + __m256i mask = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7); + + if (SPA_IS_ALIGNED(d0, 32) && + SPA_IS_ALIGNED(d1, 32)) + unrolled = n_samples & ~7; + else + unrolled = 0; + + for(n = 0; n < unrolled; n += 8) { + in[0] = _mm256_setr_epi64x( + *(int64_t*)&s[0*n_channels], + *(int64_t*)&s[1*n_channels], + *(int64_t*)&s[2*n_channels], + *(int64_t*)&s[3*n_channels]); + in[1] = _mm256_setr_epi64x( + *(int64_t*)&s[4*n_channels], + *(int64_t*)&s[5*n_channels], + *(int64_t*)&s[6*n_channels], + *(int64_t*)&s[7*n_channels]); + + in[0] = _mm256_srai_epi32(in[0], 8); + in[1] = _mm256_srai_epi32(in[1], 8); + + t[0] = _mm256_permutevar8x32_epi32(in[0], mask); + t[1] = _mm256_permutevar8x32_epi32(in[1], mask); + + in[0] = _mm256_permute2x128_si256(t[0], t[1], 0 | (2 << 4)); + in[1] = _mm256_permute2x128_si256(t[0], t[1], 1 | (3 << 4)); + + out[0] = _mm256_cvtepi32_ps(in[0]); + out[1] = _mm256_cvtepi32_ps(in[1]); + + out[0] = _mm256_mul_ps(out[0], factor); + out[1] = _mm256_mul_ps(out[1], factor); + + _mm256_store_ps(&d0[n], out[0]); + _mm256_store_ps(&d1[n], out[1]); + + s += 8*n_channels; + } + for(; n < n_samples; n++) { + __m128 out[2], factor = _mm_set1_ps(1.0f / S24_SCALE); + out[0] = _mm_cvtsi32_ss(out[0], s[0]>>8); + out[1] = _mm_cvtsi32_ss(out[1], s[1]>>8); + out[0] = _mm_mul_ss(out[0], factor); + out[1] = _mm_mul_ss(out[1], factor); + _mm_store_ss(&d0[n], out[0]); + _mm_store_ss(&d1[n], out[1]); + s += n_channels; + } +} + void conv_s32_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA_RESTRICT src, uint32_t n_channels, uint32_t n_samples) @@ -337,26 +478,31 @@ conv_s32_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA const int32_t *s = src; float *d0 = dst[0]; uint32_t n, unrolled; - __m128i in; - __m128 out, factor = _mm_set1_ps(1.0f / S24_SCALE); + __m256i in; + __m256 out, factor = _mm256_set1_ps(1.0f / S24_SCALE); - if (SPA_IS_ALIGNED(d0, 16)) - unrolled = n_samples & ~3; + if (SPA_IS_ALIGNED(d0, 32)) + unrolled = n_samples & ~7; else unrolled = 0; - for(n = 0; n < unrolled; n += 4) { - in = _mm_setr_epi32(s[0*n_channels], + for(n = 0; n < unrolled; n += 8) { + in = _mm256_setr_epi32(s[0*n_channels], s[1*n_channels], s[2*n_channels], - s[3*n_channels]); - in = _mm_srai_epi32(in, 8); - out = _mm_cvtepi32_ps(in); - out = _mm_mul_ps(out, factor); - _mm_store_ps(&d0[n], out); - s += 4*n_channels; + s[3*n_channels], + s[4*n_channels], + s[5*n_channels], + s[6*n_channels], + s[7*n_channels]); + in = _mm256_srai_epi32(in, 8); + out = _mm256_cvtepi32_ps(in); + out = _mm256_mul_ps(out, factor); + _mm256_store_ps(&d0[n], out); + s += 8*n_channels; } for(; n < n_samples; n++) { + __m128 out, factor = _mm_set1_ps(1.0f / S24_SCALE); out = _mm_cvtsi32_ss(out, s[0]>>8); out = _mm_mul_ss(out, factor); _mm_store_ss(&d0[n], out); @@ -371,6 +517,10 @@ conv_s32_to_f32d_avx2(struct convert *conv, void * SPA_RESTRICT dst[], const voi const int32_t *s = src[0]; uint32_t i = 0, n_channels = conv->n_channels; + for(; i + 3 < n_channels; i += 4) + conv_s32_to_f32d_4s_avx2(conv, &dst[i], &s[i], n_channels, n_samples); + for(; i + 1 < n_channels; i += 2) + conv_s32_to_f32d_2s_avx2(conv, &dst[i], &s[i], n_channels, n_samples); for(; i < n_channels; i++) conv_s32_to_f32d_1s_avx2(conv, &dst[i], &s[i], n_channels, n_samples); }