audioconvert: fix rare unaligned load exceptions

Supposed causes described in the issue. Also improve float semantics.

Fixes #3790
This commit is contained in:
Dmitry Sharshakov 2024-01-16 14:30:09 +03:00 committed by Wim Taymans
parent a769a014e0
commit e2844e4421

View file

@ -16,18 +16,18 @@ static inline void inner_product_avx(float *d, const float * SPA_RESTRICT s,
uint32_t n_taps4 = n_taps & ~0xf; uint32_t n_taps4 = n_taps & ~0xf;
for (; i < n_taps4; i += 16) { for (; i < n_taps4; i += 16) {
ty = (__m256)_mm256_lddqu_si256((__m256i*)(s + i + 0)); ty = (__m256)_mm256_loadu_ps(s + i + 0);
sy[0] = _mm256_fmadd_ps(ty, _mm256_load_ps(taps + i + 0), sy[0]); sy[0] = _mm256_fmadd_ps(ty, _mm256_load_ps(taps + i + 0), sy[0]);
ty = (__m256)_mm256_lddqu_si256((__m256i*)(s + i + 8)); ty = (__m256)_mm256_loadu_ps(s + i + 8);
sy[1] = _mm256_fmadd_ps(ty, _mm256_load_ps(taps + i + 8), sy[1]); sy[1] = _mm256_fmadd_ps(ty, _mm256_load_ps(taps + i + 8), sy[1]);
} }
sy[0] = _mm256_add_ps(sy[1], sy[0]); sy[0] = _mm256_add_ps(sy[1], sy[0]);
sx[1] = _mm256_extractf128_ps(sy[0], 1); sx[1] = _mm256_extractf128_ps(sy[0], 1);
sx[0] = _mm256_extractf128_ps(sy[0], 0); sx[0] = _mm256_extractf128_ps(sy[0], 0);
for (; i < n_taps; i += 8) { for (; i < n_taps; i += 8) {
tx = (__m128)_mm_lddqu_si128((__m128i*)(s + i + 0)); tx = (__m128)_mm_loadu_ps(s + i + 0);
sx[0] = _mm_fmadd_ps(tx, _mm_load_ps(taps + i + 0), sx[0]); sx[0] = _mm_fmadd_ps(tx, _mm_load_ps(taps + i + 0), sx[0]);
tx = (__m128)_mm_lddqu_si128((__m128i*)(s + i + 4)); tx = (__m128)_mm_loadu_ps(s + i + 4);
sx[1] = _mm_fmadd_ps(tx, _mm_load_ps(taps + i + 4), sx[1]); sx[1] = _mm_fmadd_ps(tx, _mm_load_ps(taps + i + 4), sx[1]);
} }
sx[0] = _mm_add_ps(sx[0], sx[1]); sx[0] = _mm_add_ps(sx[0], sx[1]);
@ -45,10 +45,10 @@ static inline void inner_product_ip_avx(float *d, const float * SPA_RESTRICT s,
uint32_t i, n_taps4 = n_taps & ~0xf; uint32_t i, n_taps4 = n_taps & ~0xf;
for (i = 0; i < n_taps4; i += 16) { for (i = 0; i < n_taps4; i += 16) {
ty = (__m256)_mm256_lddqu_si256((__m256i*)(s + i + 0)); ty = (__m256)_mm256_loadu_ps(s + i + 0);
sy[0] = _mm256_fmadd_ps(ty, _mm256_load_ps(t0 + i + 0), sy[0]); sy[0] = _mm256_fmadd_ps(ty, _mm256_load_ps(t0 + i + 0), sy[0]);
sy[1] = _mm256_fmadd_ps(ty, _mm256_load_ps(t1 + i + 0), sy[1]); sy[1] = _mm256_fmadd_ps(ty, _mm256_load_ps(t1 + i + 0), sy[1]);
ty = (__m256)_mm256_lddqu_si256((__m256i*)(s + i + 8)); ty = (__m256)_mm256_loadu_ps(s + i + 8);
sy[0] = _mm256_fmadd_ps(ty, _mm256_load_ps(t0 + i + 8), sy[0]); sy[0] = _mm256_fmadd_ps(ty, _mm256_load_ps(t0 + i + 8), sy[0]);
sy[1] = _mm256_fmadd_ps(ty, _mm256_load_ps(t1 + i + 8), sy[1]); sy[1] = _mm256_fmadd_ps(ty, _mm256_load_ps(t1 + i + 8), sy[1]);
} }
@ -56,10 +56,10 @@ static inline void inner_product_ip_avx(float *d, const float * SPA_RESTRICT s,
sx[1] = _mm_add_ps(_mm256_extractf128_ps(sy[1], 0), _mm256_extractf128_ps(sy[1], 1)); sx[1] = _mm_add_ps(_mm256_extractf128_ps(sy[1], 0), _mm256_extractf128_ps(sy[1], 1));
for (; i < n_taps; i += 8) { for (; i < n_taps; i += 8) {
tx = (__m128)_mm_lddqu_si128((__m128i*)(s + i + 0)); tx = (__m128)_mm_loadu_ps(s + i + 0);
sx[0] = _mm_fmadd_ps(tx, _mm_load_ps(t0 + i + 0), sx[0]); sx[0] = _mm_fmadd_ps(tx, _mm_load_ps(t0 + i + 0), sx[0]);
sx[1] = _mm_fmadd_ps(tx, _mm_load_ps(t1 + i + 0), sx[1]); sx[1] = _mm_fmadd_ps(tx, _mm_load_ps(t1 + i + 0), sx[1]);
tx = (__m128)_mm_lddqu_si128((__m128i*)(s + i + 4)); tx = (__m128)_mm_loadu_ps(s + i + 4);
sx[0] = _mm_fmadd_ps(tx, _mm_load_ps(t0 + i + 4), sx[0]); sx[0] = _mm_fmadd_ps(tx, _mm_load_ps(t0 + i + 4), sx[0]);
sx[1] = _mm_fmadd_ps(tx, _mm_load_ps(t1 + i + 4), sx[1]); sx[1] = _mm_fmadd_ps(tx, _mm_load_ps(t1 + i + 4), sx[1]);
} }