diff --git a/spa/plugins/audioconvert/fmt-ops-avx2.c b/spa/plugins/audioconvert/fmt-ops-avx2.c index 4fc7a30c1..f7f6da270 100644 --- a/spa/plugins/audioconvert/fmt-ops-avx2.c +++ b/spa/plugins/audioconvert/fmt-ops-avx2.c @@ -346,6 +346,10 @@ conv_s32_to_f32d_4s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA uint32_t n, unrolled; __m256i in[4], t[4]; __m256 out[4], factor = _mm256_set1_ps(1.0f / S24_SCALE); + __m256i mask1 = _mm256_setr_epi64x(0*n_channels, 0*n_channels+2, 4*n_channels, 4*n_channels+2); + __m256i mask2 = _mm256_setr_epi64x(1*n_channels, 1*n_channels+2, 5*n_channels, 5*n_channels+2); + __m256i mask3 = _mm256_setr_epi64x(2*n_channels, 2*n_channels+2, 6*n_channels, 6*n_channels+2); + __m256i mask4 = _mm256_setr_epi64x(3*n_channels, 3*n_channels+2, 7*n_channels, 7*n_channels+2); if (SPA_IS_ALIGNED(d0, 32) && SPA_IS_ALIGNED(d1, 32) && @@ -356,15 +360,15 @@ conv_s32_to_f32d_4s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA unrolled = 0; for(n = 0; n < unrolled; n += 8) { - in[0] = _mm256_loadu2_m128i((__m128i*)&s[4*n_channels], (__m128i*)&s[0*n_channels]); /* a0 b0 c0 d0 a4 b4 c4 d4 */ - in[1] = _mm256_loadu2_m128i((__m128i*)&s[5*n_channels], (__m128i*)&s[1*n_channels]); /* a1 b1 c1 d1 a5 b5 c5 d5 */ - in[2] = _mm256_loadu2_m128i((__m128i*)&s[6*n_channels], (__m128i*)&s[2*n_channels]); /* a2 b2 c2 d2 a6 b6 c6 d6 */ - in[3] = _mm256_loadu2_m128i((__m128i*)&s[7*n_channels], (__m128i*)&s[3*n_channels]); /* a3 b3 c3 d3 a7 b7 c7 d7 */ + in[0] = _mm256_i64gather_epi64((long long int *)&s[0*n_channels], mask1, 4); + in[1] = _mm256_i64gather_epi64((long long int *)&s[0*n_channels], mask2, 4); + in[2] = _mm256_i64gather_epi64((long long int *)&s[0*n_channels], mask3, 4); + in[3] = _mm256_i64gather_epi64((long long int *)&s[0*n_channels], mask4, 4); - in[0] = _mm256_srai_epi32(in[0], 8); - in[1] = _mm256_srai_epi32(in[1], 8); - in[2] = _mm256_srai_epi32(in[2], 8); - in[3] = _mm256_srai_epi32(in[3], 8); + in[0] = _mm256_srai_epi32(in[0], 8); /* a0 b0 c0 d0 a4 b4 c4 d4 */ + in[1] = _mm256_srai_epi32(in[1], 8); /* a1 b1 c1 d1 a5 b5 c5 d5 */ + in[2] = _mm256_srai_epi32(in[2], 8); /* a2 b2 c2 d2 a6 b6 c6 d6 */ + in[3] = _mm256_srai_epi32(in[3], 8); /* a3 b3 c3 d3 a7 b7 c7 d7 */ t[0] = _mm256_unpacklo_epi32(in[0], in[1]); /* a0 a1 b0 b1 a4 a5 b4 b5 */ t[1] = _mm256_unpackhi_epi32(in[0], in[1]); /* c0 c1 d0 d1 c4 c5 d4 d5 */ @@ -419,7 +423,9 @@ conv_s32_to_f32d_2s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA uint32_t n, unrolled; __m256i in[4], t[4]; __m256 out[4], factor = _mm256_set1_ps(1.0f / S24_SCALE); - __m256i mask = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7); + __m256i perm = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7); + __m256i mask1 = _mm256_setr_epi64x(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels); + __m256i mask2 = _mm256_setr_epi64x(4*n_channels, 5*n_channels, 6*n_channels, 7*n_channels); if (SPA_IS_ALIGNED(d0, 32) && SPA_IS_ALIGNED(d1, 32)) @@ -428,22 +434,14 @@ conv_s32_to_f32d_2s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA unrolled = 0; for(n = 0; n < unrolled; n += 8) { - in[0] = _mm256_setr_epi64x( - *(int64_t*)&s[0*n_channels], - *(int64_t*)&s[1*n_channels], - *(int64_t*)&s[2*n_channels], - *(int64_t*)&s[3*n_channels]); - in[1] = _mm256_setr_epi64x( - *(int64_t*)&s[4*n_channels], - *(int64_t*)&s[5*n_channels], - *(int64_t*)&s[6*n_channels], - *(int64_t*)&s[7*n_channels]); + in[0] = _mm256_i64gather_epi64((long long int *)s, mask1, 4); + in[1] = _mm256_i64gather_epi64((long long int *)s, mask2, 4); in[0] = _mm256_srai_epi32(in[0], 8); in[1] = _mm256_srai_epi32(in[1], 8); - t[0] = _mm256_permutevar8x32_epi32(in[0], mask); - t[1] = _mm256_permutevar8x32_epi32(in[1], mask); + t[0] = _mm256_permutevar8x32_epi32(in[0], perm); + t[1] = _mm256_permutevar8x32_epi32(in[1], perm); in[0] = _mm256_permute2x128_si256(t[0], t[1], 0 | (2 << 4)); in[1] = _mm256_permute2x128_si256(t[0], t[1], 1 | (3 << 4)); @@ -478,28 +476,37 @@ conv_s32_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA const int32_t *s = src; float *d0 = dst[0]; uint32_t n, unrolled; - __m256i in; - __m256 out, factor = _mm256_set1_ps(1.0f / S24_SCALE); + __m256i in[2]; + __m256 out[2], factor = _mm256_set1_ps(1.0f / S24_SCALE); + __m256i mask1 = _mm256_setr_epi64x(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels); + __m256i mask2 = _mm256_setr_epi64x(4*n_channels, 5*n_channels, 6*n_channels, 7*n_channels); if (SPA_IS_ALIGNED(d0, 32)) - unrolled = n_samples & ~7; + unrolled = n_samples & ~15; else unrolled = 0; - for(n = 0; n < unrolled; n += 8) { - in = _mm256_setr_epi32(s[0*n_channels], - s[1*n_channels], - s[2*n_channels], - s[3*n_channels], - s[4*n_channels], - s[5*n_channels], - s[6*n_channels], - s[7*n_channels]); - in = _mm256_srai_epi32(in, 8); - out = _mm256_cvtepi32_ps(in); - out = _mm256_mul_ps(out, factor); - _mm256_store_ps(&d0[n], out); - s += 8*n_channels; + for(n = 0; n < unrolled; n += 16) { + in[0] = _mm256_setr_m128i( + _mm256_i64gather_epi32(&s[ 0*n_channels], mask1, 4), + _mm256_i64gather_epi32(&s[ 0*n_channels], mask2, 4)), + in[1] = _mm256_setr_m128i( + _mm256_i64gather_epi32(&s[ 8*n_channels], mask1, 4), + _mm256_i64gather_epi32(&s[ 8*n_channels], mask2, 4)), + + in[0] = _mm256_srai_epi32(in[0], 8); + in[1] = _mm256_srai_epi32(in[1], 8); + + out[0] = _mm256_cvtepi32_ps(in[0]); + out[1] = _mm256_cvtepi32_ps(in[1]); + + out[0] = _mm256_mul_ps(out[0], factor); + out[1] = _mm256_mul_ps(out[1], factor); + + _mm256_store_ps(&d0[n+0], out[0]); + _mm256_store_ps(&d0[n+8], out[1]); + + s += 16*n_channels; } for(; n < n_samples; n++) { __m128 out, factor = _mm_set1_ps(1.0f / S24_SCALE);