audioconvert: improve some more AVX2 code

This commit is contained in:
Wim Taymans 2022-12-05 09:37:29 +01:00
parent d6101d73e7
commit 177479dfd1

View file

@ -336,12 +336,10 @@ conv_s32_to_f32d_4s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
const int32_t *s = src; const int32_t *s = src;
float *d0 = dst[0], *d1 = dst[1], *d2 = dst[2], *d3 = dst[3]; float *d0 = dst[0], *d1 = dst[1], *d2 = dst[2], *d3 = dst[3];
uint32_t n, unrolled; uint32_t n, unrolled;
__m256i in[4], t[4]; __m256i in[4];
__m256 out[4], factor = _mm256_set1_ps(1.0f / S24_SCALE); __m256 out[4], factor = _mm256_set1_ps(1.0f / S24_SCALE);
__m256i mask1 = _mm256_setr_epi64x(0*n_channels, 0*n_channels+2, 4*n_channels, 4*n_channels+2); __m256i mask1 = _mm256_setr_epi32(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels,
__m256i mask2 = _mm256_setr_epi64x(1*n_channels, 1*n_channels+2, 5*n_channels, 5*n_channels+2); 3*n_channels, 5*n_channels, 6*n_channels, 7*n_channels);
__m256i mask3 = _mm256_setr_epi64x(2*n_channels, 2*n_channels+2, 6*n_channels, 6*n_channels+2);
__m256i mask4 = _mm256_setr_epi64x(3*n_channels, 3*n_channels+2, 7*n_channels, 7*n_channels+2);
if (SPA_IS_ALIGNED(d0, 32) && if (SPA_IS_ALIGNED(d0, 32) &&
SPA_IS_ALIGNED(d1, 32) && SPA_IS_ALIGNED(d1, 32) &&
@ -352,19 +350,10 @@ conv_s32_to_f32d_4s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
unrolled = 0; unrolled = 0;
for(n = 0; n < unrolled; n += 8) { for(n = 0; n < unrolled; n += 8) {
in[0] = _mm256_i64gather_epi64((long long int *)&s[0*n_channels], mask1, 4); in[0] = _mm256_i32gather_epi32((int*)&s[0], mask1, 4);
in[1] = _mm256_i64gather_epi64((long long int *)&s[0*n_channels], mask2, 4); in[1] = _mm256_i32gather_epi32((int*)&s[1], mask1, 4);
in[2] = _mm256_i64gather_epi64((long long int *)&s[0*n_channels], mask3, 4); in[2] = _mm256_i32gather_epi32((int*)&s[2], mask1, 4);
in[3] = _mm256_i64gather_epi64((long long int *)&s[0*n_channels], mask4, 4); in[3] = _mm256_i32gather_epi32((int*)&s[3], mask1, 4);
t[0] = _mm256_unpacklo_epi32(in[0], in[1]); /* a0 a1 b0 b1 a4 a5 b4 b5 */
t[1] = _mm256_unpackhi_epi32(in[0], in[1]); /* c0 c1 d0 d1 c4 c5 d4 d5 */
t[2] = _mm256_unpacklo_epi32(in[2], in[3]); /* a2 a3 b2 b3 a6 a7 b6 b7 */
t[3] = _mm256_unpackhi_epi32(in[2], in[3]); /* c2 c3 d2 d3 c6 c7 d6 d7 */
in[0] = _mm256_unpacklo_epi64(t[0], t[2]); /* a0 a1 a2 a3 a4 a5 a6 a7 */
in[1] = _mm256_unpackhi_epi64(t[0], t[2]); /* b0 b1 b2 b3 b4 b5 b6 b7 */
in[2] = _mm256_unpacklo_epi64(t[1], t[3]); /* c0 c1 c2 c3 c4 c5 c6 c7 */
in[3] = _mm256_unpackhi_epi64(t[1], t[3]); /* d0 d1 d2 d3 d4 d5 d6 d7 */
in[0] = _mm256_srai_epi32(in[0], 8); in[0] = _mm256_srai_epi32(in[0], 8);
in[1] = _mm256_srai_epi32(in[1], 8); in[1] = _mm256_srai_epi32(in[1], 8);
@ -413,11 +402,10 @@ conv_s32_to_f32d_2s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
const int32_t *s = src; const int32_t *s = src;
float *d0 = dst[0], *d1 = dst[1]; float *d0 = dst[0], *d1 = dst[1];
uint32_t n, unrolled; uint32_t n, unrolled;
__m256i in[4], t[4]; __m256i in[4];
__m256 out[4], factor = _mm256_set1_ps(1.0f / S24_SCALE); __m256 out[4], factor = _mm256_set1_ps(1.0f / S24_SCALE);
__m256i perm = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7); __m256i mask1 = _mm256_setr_epi32(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels,
__m256i mask1 = _mm256_setr_epi64x(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels); 3*n_channels, 5*n_channels, 6*n_channels, 7*n_channels);
__m256i mask2 = _mm256_setr_epi64x(4*n_channels, 5*n_channels, 6*n_channels, 7*n_channels);
if (SPA_IS_ALIGNED(d0, 32) && if (SPA_IS_ALIGNED(d0, 32) &&
SPA_IS_ALIGNED(d1, 32)) SPA_IS_ALIGNED(d1, 32))
@ -426,14 +414,8 @@ conv_s32_to_f32d_2s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
unrolled = 0; unrolled = 0;
for(n = 0; n < unrolled; n += 8) { for(n = 0; n < unrolled; n += 8) {
in[0] = _mm256_i64gather_epi64((long long int *)s, mask1, 4); in[0] = _mm256_i32gather_epi32((int*)&s[0], mask1, 4);
in[1] = _mm256_i64gather_epi64((long long int *)s, mask2, 4); in[1] = _mm256_i32gather_epi32((int*)&s[1], mask1, 4);
t[0] = _mm256_permutevar8x32_epi32(in[0], perm);
t[1] = _mm256_permutevar8x32_epi32(in[1], perm);
in[0] = _mm256_permute2x128_si256(t[0], t[1], 0 | (2 << 4));
in[1] = _mm256_permute2x128_si256(t[0], t[1], 1 | (3 << 4));
in[0] = _mm256_srai_epi32(in[0], 8); in[0] = _mm256_srai_epi32(in[0], 8);
in[1] = _mm256_srai_epi32(in[1], 8); in[1] = _mm256_srai_epi32(in[1], 8);
@ -470,8 +452,8 @@ conv_s32_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
uint32_t n, unrolled; uint32_t n, unrolled;
__m256i in[2]; __m256i in[2];
__m256 out[2], factor = _mm256_set1_ps(1.0f / S24_SCALE); __m256 out[2], factor = _mm256_set1_ps(1.0f / S24_SCALE);
__m256i mask1 = _mm256_setr_epi64x(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels); __m256i mask1 = _mm256_setr_epi32(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels,
__m256i mask2 = _mm256_setr_epi64x(4*n_channels, 5*n_channels, 6*n_channels, 7*n_channels); 3*n_channels, 5*n_channels, 6*n_channels, 7*n_channels);
if (SPA_IS_ALIGNED(d0, 32)) if (SPA_IS_ALIGNED(d0, 32))
unrolled = n_samples & ~15; unrolled = n_samples & ~15;
@ -479,12 +461,8 @@ conv_s32_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
unrolled = 0; unrolled = 0;
for(n = 0; n < unrolled; n += 16) { for(n = 0; n < unrolled; n += 16) {
in[0] = _mm256_setr_m128i( in[0] = _mm256_i32gather_epi32(&s[0*n_channels], mask1, 4);
_mm256_i64gather_epi32(&s[ 0*n_channels], mask1, 4), in[1] = _mm256_i32gather_epi32(&s[8*n_channels], mask1, 4);
_mm256_i64gather_epi32(&s[ 0*n_channels], mask2, 4));
in[1] = _mm256_setr_m128i(
_mm256_i64gather_epi32(&s[ 8*n_channels], mask1, 4),
_mm256_i64gather_epi32(&s[ 8*n_channels], mask2, 4));
in[0] = _mm256_srai_epi32(in[0], 8); in[0] = _mm256_srai_epi32(in[0], 8);
in[1] = _mm256_srai_epi32(in[1], 8); in[1] = _mm256_srai_epi32(in[1], 8);