2018-09-13 17:03:56 +02:00
|
|
|
/* Spa
|
|
|
|
|
*
|
2018-11-05 17:48:52 +01:00
|
|
|
* Copyright © 2018 Wim Taymans
|
2018-09-13 17:03:56 +02:00
|
|
|
*
|
2018-11-05 17:48:52 +01:00
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
2018-09-13 17:03:56 +02:00
|
|
|
*
|
2018-11-05 17:48:52 +01:00
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
2018-09-13 17:03:56 +02:00
|
|
|
*/
|
|
|
|
|
|
2019-03-28 21:07:53 +01:00
|
|
|
#include "channelmix-ops.h"
|
|
|
|
|
|
2018-09-13 17:03:56 +02:00
|
|
|
#include <xmmintrin.h>
|
|
|
|
|
|
2022-06-13 11:48:30 +02:00
|
|
|
static inline void clear_sse(float *d, uint32_t n_samples)
|
|
|
|
|
{
|
|
|
|
|
memset(d, 0, n_samples * sizeof(float));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void copy_sse(float *d, const float *s, uint32_t n_samples)
|
|
|
|
|
{
|
|
|
|
|
spa_memcpy(d, s, n_samples * sizeof(float));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void vol_sse(float *d, const float *s, float vol, uint32_t n_samples)
|
|
|
|
|
{
|
|
|
|
|
uint32_t n, unrolled;
|
|
|
|
|
if (vol == 0.0f) {
|
|
|
|
|
clear_sse(d, n_samples);
|
|
|
|
|
} else if (vol == 1.0f) {
|
|
|
|
|
copy_sse(d, s, n_samples);
|
|
|
|
|
} else {
|
|
|
|
|
__m128 t[4];
|
|
|
|
|
const __m128 v = _mm_set1_ps(vol);
|
|
|
|
|
|
|
|
|
|
if (SPA_IS_ALIGNED(d, 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s, 16))
|
|
|
|
|
unrolled = n_samples & ~15;
|
|
|
|
|
else
|
|
|
|
|
unrolled = 0;
|
|
|
|
|
|
|
|
|
|
for(n = 0; n < unrolled; n += 16) {
|
|
|
|
|
t[0] = _mm_load_ps(&s[n]);
|
|
|
|
|
t[1] = _mm_load_ps(&s[n+4]);
|
|
|
|
|
t[2] = _mm_load_ps(&s[n+8]);
|
|
|
|
|
t[3] = _mm_load_ps(&s[n+12]);
|
|
|
|
|
_mm_store_ps(&d[n], _mm_mul_ps(t[0], v));
|
|
|
|
|
_mm_store_ps(&d[n+4], _mm_mul_ps(t[1], v));
|
|
|
|
|
_mm_store_ps(&d[n+8], _mm_mul_ps(t[2], v));
|
|
|
|
|
_mm_store_ps(&d[n+12], _mm_mul_ps(t[3], v));
|
|
|
|
|
}
|
|
|
|
|
for(; n < n_samples; n++)
|
|
|
|
|
_mm_store_ss(&d[n], _mm_mul_ss(_mm_load_ss(&s[n]), v));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-24 09:13:57 +01:00
|
|
|
void channelmix_copy_sse(struct channelmix *mix, void * SPA_RESTRICT dst[],
|
|
|
|
|
const void * SPA_RESTRICT src[], uint32_t n_samples)
|
2018-09-13 17:03:56 +02:00
|
|
|
{
|
2022-06-13 11:48:30 +02:00
|
|
|
uint32_t i, n_dst = mix->dst_chan;
|
2018-09-13 17:03:56 +02:00
|
|
|
float **d = (float **)dst;
|
2019-03-20 19:29:14 +01:00
|
|
|
const float **s = (const float **)src;
|
2022-06-13 11:48:30 +02:00
|
|
|
for (i = 0; i < n_dst; i++)
|
|
|
|
|
vol_sse(d[i], s[i], mix->matrix[i][i], n_samples);
|
|
|
|
|
}
|
2018-09-13 17:03:56 +02:00
|
|
|
|
2022-09-28 17:50:59 +02:00
|
|
|
void
|
|
|
|
|
channelmix_f32_n_m_sse(struct channelmix *mix, void * SPA_RESTRICT dst[],
|
|
|
|
|
const void * SPA_RESTRICT src[], uint32_t n_samples)
|
|
|
|
|
{
|
|
|
|
|
float **d = (float **) dst;
|
|
|
|
|
const float **s = (const float **) src;
|
|
|
|
|
uint32_t n, unrolled;
|
|
|
|
|
uint32_t i, j, n_dst = mix->dst_chan, n_src = mix->src_chan;
|
|
|
|
|
__m128 mi[n_src], sum[2];
|
|
|
|
|
bool aligned = true;
|
|
|
|
|
|
|
|
|
|
for (j = 0; j < n_src; j++)
|
|
|
|
|
aligned &= SPA_IS_ALIGNED(s[j], 16);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < n_dst; i++) {
|
|
|
|
|
float *di = d[i];
|
|
|
|
|
|
|
|
|
|
for (j = 0; j < n_src; j++)
|
|
|
|
|
mi[j] = _mm_set1_ps(mix->matrix[i][j]);
|
|
|
|
|
|
|
|
|
|
if (aligned && SPA_IS_ALIGNED(d[i], 16))
|
|
|
|
|
unrolled = n_samples & ~7;
|
|
|
|
|
else
|
|
|
|
|
unrolled = 0;
|
|
|
|
|
|
|
|
|
|
for (n = 0; n < unrolled; n += 8) {
|
|
|
|
|
sum[0] = sum[1] = _mm_setzero_ps();
|
|
|
|
|
for (j = 0; j < n_src; j++) {
|
|
|
|
|
sum[0] = _mm_add_ps(sum[0], _mm_mul_ps(_mm_load_ps(&s[j][n + 0]), mi[j]));
|
|
|
|
|
sum[1] = _mm_add_ps(sum[1], _mm_mul_ps(_mm_load_ps(&s[j][n + 4]), mi[j]));
|
|
|
|
|
}
|
|
|
|
|
_mm_store_ps(&di[n + 0], sum[0]);
|
|
|
|
|
_mm_store_ps(&di[n + 4], sum[1]);
|
|
|
|
|
}
|
|
|
|
|
for (; n < n_samples; n++) {
|
|
|
|
|
sum[0] = _mm_setzero_ps();
|
|
|
|
|
for (j = 0; j < n_src; j++)
|
|
|
|
|
sum[0] = _mm_add_ss(sum[0], _mm_mul_ss(_mm_load_ss(&s[j][n]), mi[j]));
|
|
|
|
|
_mm_store_ss(&di[n], sum[0]);
|
|
|
|
|
}
|
|
|
|
|
lr4_process(&mix->lr4[i], d[i], d[i], 1.0f, n_samples);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-13 11:48:30 +02:00
|
|
|
/* FL+FR+FC+LFE -> FL+FR */
|
|
|
|
|
void
|
|
|
|
|
channelmix_f32_3p1_2_sse(struct channelmix *mix, void * SPA_RESTRICT dst[],
|
|
|
|
|
const void * SPA_RESTRICT src[], uint32_t n_samples)
|
|
|
|
|
{
|
|
|
|
|
float **d = (float **) dst;
|
|
|
|
|
const float **s = (const float **) src;
|
|
|
|
|
const float m0 = mix->matrix[0][0];
|
|
|
|
|
const float m1 = mix->matrix[1][1];
|
|
|
|
|
const float m2 = (mix->matrix[0][2] + mix->matrix[1][2]) * 0.5f;
|
|
|
|
|
const float m3 = (mix->matrix[0][3] + mix->matrix[1][3]) * 0.5f;
|
|
|
|
|
|
|
|
|
|
if (m0 == 0.0f && m1 == 0.0f && m2 == 0.0f && m3 == 0.0f) {
|
|
|
|
|
clear_sse(d[0], n_samples);
|
|
|
|
|
clear_sse(d[1], n_samples);
|
2018-09-13 17:03:56 +02:00
|
|
|
}
|
|
|
|
|
else {
|
2022-06-13 11:48:30 +02:00
|
|
|
uint32_t n, unrolled;
|
|
|
|
|
const __m128 v0 = _mm_set1_ps(m0);
|
|
|
|
|
const __m128 v1 = _mm_set1_ps(m1);
|
|
|
|
|
const __m128 clev = _mm_set1_ps(m2);
|
|
|
|
|
const __m128 llev = _mm_set1_ps(m3);
|
|
|
|
|
__m128 ctr;
|
2018-10-08 18:19:15 +02:00
|
|
|
|
2022-06-13 11:48:30 +02:00
|
|
|
if (SPA_IS_ALIGNED(s[0], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[1], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[2], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[3], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(d[0], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(d[1], 16))
|
|
|
|
|
unrolled = n_samples & ~3;
|
|
|
|
|
else
|
|
|
|
|
unrolled = 0;
|
2018-10-08 18:19:15 +02:00
|
|
|
|
2022-06-13 11:48:30 +02:00
|
|
|
for(n = 0; n < unrolled; n += 4) {
|
|
|
|
|
ctr = _mm_add_ps(
|
|
|
|
|
_mm_mul_ps(_mm_load_ps(&s[2][n]), clev),
|
|
|
|
|
_mm_mul_ps(_mm_load_ps(&s[3][n]), llev));
|
|
|
|
|
_mm_store_ps(&d[0][n], _mm_add_ps(_mm_mul_ps(_mm_load_ps(&s[0][n]), v0), ctr));
|
|
|
|
|
_mm_store_ps(&d[1][n], _mm_add_ps(_mm_mul_ps(_mm_load_ps(&s[1][n]), v1), ctr));
|
|
|
|
|
}
|
|
|
|
|
for(; n < n_samples; n++) {
|
|
|
|
|
ctr = _mm_add_ss(_mm_mul_ss(_mm_load_ss(&s[2][n]), clev),
|
|
|
|
|
_mm_mul_ss(_mm_load_ss(&s[3][n]), llev));
|
|
|
|
|
_mm_store_ss(&d[0][n], _mm_add_ss(_mm_mul_ss(_mm_load_ss(&s[0][n]), v0), ctr));
|
|
|
|
|
_mm_store_ss(&d[1][n], _mm_add_ss(_mm_mul_ss(_mm_load_ss(&s[1][n]), v1), ctr));
|
2018-09-13 17:03:56 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-17 09:41:41 +02:00
|
|
|
/* FL+FR+FC+LFE+SL+SR -> FL+FR */
|
2019-03-28 21:07:53 +01:00
|
|
|
void
|
2022-02-24 09:13:57 +01:00
|
|
|
channelmix_f32_5p1_2_sse(struct channelmix *mix, void * SPA_RESTRICT dst[],
|
|
|
|
|
const void * SPA_RESTRICT src[], uint32_t n_samples)
|
2018-09-13 17:03:56 +02:00
|
|
|
{
|
2019-03-29 17:39:59 +01:00
|
|
|
uint32_t n, unrolled;
|
2018-09-13 17:03:56 +02:00
|
|
|
float **d = (float **) dst;
|
2019-03-20 19:29:14 +01:00
|
|
|
const float **s = (const float **) src;
|
2020-09-03 13:20:36 +02:00
|
|
|
const float m00 = mix->matrix[0][0];
|
|
|
|
|
const float m11 = mix->matrix[1][1];
|
2020-09-02 11:15:23 +02:00
|
|
|
const __m128 clev = _mm_set1_ps((mix->matrix[0][2] + mix->matrix[1][2]) * 0.5f);
|
|
|
|
|
const __m128 llev = _mm_set1_ps((mix->matrix[0][3] + mix->matrix[1][3]) * 0.5f);
|
|
|
|
|
const __m128 slev0 = _mm_set1_ps(mix->matrix[0][4]);
|
|
|
|
|
const __m128 slev1 = _mm_set1_ps(mix->matrix[1][5]);
|
2018-09-13 17:03:56 +02:00
|
|
|
__m128 in, ctr;
|
2019-01-24 18:28:52 +01:00
|
|
|
|
2022-06-13 11:48:30 +02:00
|
|
|
if (SPA_IS_ALIGNED(s[0], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[1], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[2], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[3], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[4], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[5], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(d[0], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(d[1], 16))
|
2019-03-28 16:45:57 +01:00
|
|
|
unrolled = n_samples & ~3;
|
2019-01-24 18:28:52 +01:00
|
|
|
else
|
|
|
|
|
unrolled = 0;
|
2018-09-13 17:03:56 +02:00
|
|
|
|
2020-09-03 13:20:36 +02:00
|
|
|
if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
|
2022-06-13 11:48:30 +02:00
|
|
|
clear_sse(d[0], n_samples);
|
|
|
|
|
clear_sse(d[1], n_samples);
|
2018-09-13 17:03:56 +02:00
|
|
|
}
|
|
|
|
|
else {
|
2020-09-03 13:20:36 +02:00
|
|
|
const __m128 v0 = _mm_set1_ps(m00);
|
|
|
|
|
const __m128 v1 = _mm_set1_ps(m11);
|
2019-03-28 16:45:57 +01:00
|
|
|
for(n = 0; n < unrolled; n += 4) {
|
2022-06-13 11:48:30 +02:00
|
|
|
ctr = _mm_add_ps(_mm_mul_ps(_mm_load_ps(&s[2][n]), clev),
|
|
|
|
|
_mm_mul_ps(_mm_load_ps(&s[3][n]), llev));
|
|
|
|
|
in = _mm_mul_ps(_mm_load_ps(&s[4][n]), slev0);
|
2022-02-23 07:39:23 +01:00
|
|
|
in = _mm_add_ps(in, ctr);
|
2022-06-13 11:48:30 +02:00
|
|
|
in = _mm_add_ps(in, _mm_mul_ps(_mm_load_ps(&s[0][n]), v0));
|
|
|
|
|
_mm_store_ps(&d[0][n], in);
|
|
|
|
|
in = _mm_mul_ps(_mm_load_ps(&s[5][n]), slev1);
|
2018-09-13 17:03:56 +02:00
|
|
|
in = _mm_add_ps(in, ctr);
|
2022-06-13 11:48:30 +02:00
|
|
|
in = _mm_add_ps(in, _mm_mul_ps(_mm_load_ps(&s[1][n]), v1));
|
|
|
|
|
_mm_store_ps(&d[1][n], in);
|
2018-09-13 17:03:56 +02:00
|
|
|
}
|
2019-01-24 18:28:52 +01:00
|
|
|
for(; n < n_samples; n++) {
|
2022-06-13 11:48:30 +02:00
|
|
|
ctr = _mm_mul_ss(_mm_load_ss(&s[2][n]), clev);
|
|
|
|
|
ctr = _mm_add_ss(ctr, _mm_mul_ss(_mm_load_ss(&s[3][n]), llev));
|
|
|
|
|
in = _mm_mul_ss(_mm_load_ss(&s[4][n]), slev0);
|
2018-09-13 17:03:56 +02:00
|
|
|
in = _mm_add_ss(in, ctr);
|
2022-06-13 11:48:30 +02:00
|
|
|
in = _mm_add_ss(in, _mm_mul_ss(_mm_load_ss(&s[0][n]), v0));
|
|
|
|
|
_mm_store_ss(&d[0][n], in);
|
|
|
|
|
in = _mm_mul_ss(_mm_load_ss(&s[5][n]), slev1);
|
2018-09-13 17:03:56 +02:00
|
|
|
in = _mm_add_ss(in, ctr);
|
2022-06-13 11:48:30 +02:00
|
|
|
in = _mm_add_ss(in, _mm_mul_ss(_mm_load_ss(&s[1][n]), v1));
|
|
|
|
|
_mm_store_ss(&d[1][n], in);
|
2018-09-13 17:03:56 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-21 16:45:31 +01:00
|
|
|
/* FL+FR+FC+LFE+SL+SR -> FL+FR+FC+LFE*/
|
2019-03-28 21:07:53 +01:00
|
|
|
void
|
2022-02-24 09:13:57 +01:00
|
|
|
channelmix_f32_5p1_3p1_sse(struct channelmix *mix, void * SPA_RESTRICT dst[],
|
|
|
|
|
const void * SPA_RESTRICT src[], uint32_t n_samples)
|
2018-11-21 16:45:31 +01:00
|
|
|
{
|
2022-02-24 09:13:57 +01:00
|
|
|
uint32_t i, n, unrolled, n_dst = mix->dst_chan;
|
2018-11-21 16:45:31 +01:00
|
|
|
float **d = (float **) dst;
|
2019-03-20 19:29:14 +01:00
|
|
|
const float **s = (const float **) src;
|
2019-01-24 18:28:52 +01:00
|
|
|
|
2022-06-13 11:48:30 +02:00
|
|
|
if (SPA_IS_ALIGNED(s[0], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[1], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[2], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[3], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[4], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(s[5], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(d[0], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(d[1], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(d[2], 16) &&
|
|
|
|
|
SPA_IS_ALIGNED(d[3], 16))
|
|
|
|
|
unrolled = n_samples & ~3;
|
2019-01-24 18:28:52 +01:00
|
|
|
else
|
|
|
|
|
unrolled = 0;
|
2018-11-21 16:45:31 +01:00
|
|
|
|
2020-09-03 13:20:36 +02:00
|
|
|
if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
|
2018-11-21 16:45:31 +01:00
|
|
|
for (i = 0; i < n_dst; i++)
|
2022-06-13 11:48:30 +02:00
|
|
|
clear_sse(d[i], n_samples);
|
2018-11-21 16:45:31 +01:00
|
|
|
}
|
|
|
|
|
else {
|
2022-06-13 11:48:30 +02:00
|
|
|
const __m128 v0 = _mm_set1_ps(mix->matrix[0][0]);
|
|
|
|
|
const __m128 v1 = _mm_set1_ps(mix->matrix[1][1]);
|
|
|
|
|
const __m128 slev0 = _mm_set1_ps(mix->matrix[0][4]);
|
|
|
|
|
const __m128 slev1 = _mm_set1_ps(mix->matrix[1][5]);
|
2019-08-14 14:56:16 +02:00
|
|
|
|
2022-06-13 11:48:30 +02:00
|
|
|
for(n = 0; n < unrolled; n += 4) {
|
|
|
|
|
_mm_store_ps(&d[0][n], _mm_add_ps(
|
|
|
|
|
_mm_mul_ps(_mm_load_ps(&s[0][n]), v0),
|
|
|
|
|
_mm_mul_ps(_mm_load_ps(&s[4][n]), slev0)));
|
2019-08-14 14:56:16 +02:00
|
|
|
|
2022-06-13 11:48:30 +02:00
|
|
|
_mm_store_ps(&d[1][n], _mm_add_ps(
|
|
|
|
|
_mm_mul_ps(_mm_load_ps(&s[1][n]), v1),
|
|
|
|
|
_mm_mul_ps(_mm_load_ps(&s[5][n]), slev1)));
|
2018-11-21 16:45:31 +01:00
|
|
|
}
|
2019-01-24 18:28:52 +01:00
|
|
|
for(; n < n_samples; n++) {
|
2022-06-13 11:48:30 +02:00
|
|
|
_mm_store_ss(&d[0][n], _mm_add_ss(
|
|
|
|
|
_mm_mul_ss(_mm_load_ss(&s[0][n]), v0),
|
|
|
|
|
_mm_mul_ss(_mm_load_ss(&s[4][n]), slev0)));
|
2019-08-14 14:56:16 +02:00
|
|
|
|
2022-06-13 11:48:30 +02:00
|
|
|
_mm_store_ss(&d[1][n], _mm_add_ss(
|
|
|
|
|
_mm_mul_ss(_mm_load_ss(&s[1][n]), v1),
|
|
|
|
|
_mm_mul_ss(_mm_load_ss(&s[5][n]), slev1)));
|
2018-11-21 16:45:31 +01:00
|
|
|
}
|
2022-06-13 11:48:30 +02:00
|
|
|
vol_sse(d[2], s[2], mix->matrix[2][2], n_samples);
|
|
|
|
|
vol_sse(d[3], s[3], mix->matrix[3][3], n_samples);
|
2018-11-21 16:45:31 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-17 09:41:41 +02:00
|
|
|
/* FL+FR+FC+LFE+SL+SR -> FL+FR+RL+RR*/
|
2019-03-28 21:07:53 +01:00
|
|
|
void
|
2022-02-24 09:13:57 +01:00
|
|
|
channelmix_f32_5p1_4_sse(struct channelmix *mix, void * SPA_RESTRICT dst[],
|
|
|
|
|
const void * SPA_RESTRICT src[], uint32_t n_samples)
|
2018-09-13 17:03:56 +02:00
|
|
|
{
|
2022-06-13 11:48:30 +02:00
|
|
|
uint32_t i, n_dst = mix->dst_chan;
|
2018-09-13 17:03:56 +02:00
|
|
|
float **d = (float **) dst;
|
2019-03-20 19:29:14 +01:00
|
|
|
const float **s = (const float **) src;
|
2022-06-13 11:48:30 +02:00
|
|
|
const float v4 = mix->matrix[2][4];
|
|
|
|
|
const float v5 = mix->matrix[3][5];
|
2018-09-13 17:03:56 +02:00
|
|
|
|
2020-09-03 13:20:36 +02:00
|
|
|
if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
|
2018-09-13 17:03:56 +02:00
|
|
|
for (i = 0; i < n_dst; i++)
|
2022-06-13 11:48:30 +02:00
|
|
|
clear_sse(d[i], n_samples);
|
2018-09-13 17:03:56 +02:00
|
|
|
}
|
|
|
|
|
else {
|
2022-06-13 11:48:30 +02:00
|
|
|
channelmix_f32_3p1_2_sse(mix, dst, src, n_samples);
|
2020-09-03 13:20:36 +02:00
|
|
|
|
2022-06-13 11:48:30 +02:00
|
|
|
vol_sse(d[2], s[4], v4, n_samples);
|
|
|
|
|
vol_sse(d[3], s[5], v5, n_samples);
|
2018-09-13 17:03:56 +02:00
|
|
|
}
|
|
|
|
|
}
|