channelmix: more improvements

work with the default channel layout
remap to default channel layout in fmconvert
Pass channel positions in resample format
This commit is contained in:
Wim Taymans 2018-09-17 09:41:41 +02:00
parent fef616615e
commit 78b7da608f
5 changed files with 201 additions and 143 deletions

View file

@ -63,58 +63,58 @@ channelmix_f32_2_4_sse(void *data, int n_dst, void *dst[n_dst],
float v = m[0];
__m128 vol = _mm_set1_ps(v);
__m128 in;
float *dFL = d[0], *dFR = d[1], *dRL = d[2], *dRR = d[3];
float *sFL = s[0], *sFR = s[1];
if (v <= VOLUME_MIN) {
for (i = 0; i < n_dst; i++)
memset(d[i], 0, n_bytes);
}
else if (v == VOLUME_NORM) {
float *d0 = d[0], *d1 = d[1], *d2 = d[2], *d3 = d[3], *s0 = s[0], *s1 = s[1];
unrolled = n_samples / 4;
remain = n_samples & 3;
for(n = 0; unrolled--; n += 4) {
in = _mm_loadu_ps(&s0[n]);
_mm_storeu_ps(&d0[n], in);
_mm_storeu_ps(&d2[n], in);
in = _mm_loadu_ps(&s1[n]);
_mm_storeu_ps(&d1[n], in);
_mm_storeu_ps(&d3[n], in);
in = _mm_loadu_ps(&sFL[n]);
_mm_storeu_ps(&dFL[n], in);
_mm_storeu_ps(&dRL[n], in);
in = _mm_loadu_ps(&sFR[n]);
_mm_storeu_ps(&dFR[n], in);
_mm_storeu_ps(&dRR[n], in);
}
for(; remain--; n++) {
in = _mm_load_ss(&s0[n]);
_mm_store_ss(&d0[n], in);
_mm_store_ss(&d2[n], in);
in = _mm_load_ss(&s1[n]);
_mm_store_ss(&d1[n], in);
_mm_store_ss(&d3[n], in);
in = _mm_load_ss(&sFL[n]);
_mm_store_ss(&dFL[n], in);
_mm_store_ss(&dRL[n], in);
in = _mm_load_ss(&sFR[n]);
_mm_store_ss(&dFR[n], in);
_mm_store_ss(&dRR[n], in);
}
}
else {
float *d0 = d[0], *d1 = d[1], *d2 = d[2], *d3 = d[3], *s0 = s[0], *s1 = s[1];
unrolled = n_samples / 4;
remain = n_samples & 3;
for(n = 0; unrolled--; n += 4) {
in = _mm_mul_ps(_mm_loadu_ps(&s0[n]), vol);
_mm_storeu_ps(&d0[n], in);
_mm_storeu_ps(&d2[n], in);
in = _mm_mul_ps(_mm_loadu_ps(&s1[n]), vol);
_mm_storeu_ps(&d1[n], in);
_mm_storeu_ps(&d3[n], in);
in = _mm_mul_ps(_mm_loadu_ps(&sFL[n]), vol);
_mm_storeu_ps(&dFL[n], in);
_mm_storeu_ps(&dRL[n], in);
in = _mm_mul_ps(_mm_loadu_ps(&sFR[n]), vol);
_mm_storeu_ps(&dFR[n], in);
_mm_storeu_ps(&dRR[n], in);
}
for(; remain--; n++) {
in = _mm_mul_ss(_mm_load_ss(&s0[n]), vol);
_mm_store_ss(&d0[n], in);
_mm_store_ss(&d2[n], in);
in = _mm_mul_ss(_mm_load_ss(&s1[n]), vol);
_mm_store_ss(&d1[n], in);
_mm_store_ss(&d3[n], in);
in = _mm_mul_ss(_mm_load_ss(&sFL[n]), vol);
_mm_store_ss(&dFL[n], in);
_mm_store_ss(&dRL[n], in);
in = _mm_mul_ss(_mm_load_ss(&sFR[n]), vol);
_mm_store_ss(&dFR[n], in);
_mm_store_ss(&dRR[n], in);
}
}
}
/* FL+FR+RL+RR+FC+LFE -> FL+FR */
/* FL+FR+FC+LFE+SL+SR -> FL+FR */
static void
channelmix_f32_5p1_2_sse(void *data, int n_dst, void *dst[n_dst],
int n_src, const void *src[n_src], void *matrix, int n_bytes)
@ -128,76 +128,75 @@ channelmix_f32_5p1_2_sse(void *data, int n_dst, void *dst[n_dst],
__m128 slev = _mm_set1_ps(0.7071f);
__m128 vol = _mm_set1_ps(v);
__m128 in, ctr;
float *dFL = d[0], *dFR = d[1];
float *sFL = s[0], *sFR = s[1], *sFC = s[2], *sSL = s[4], *sSR = s[5];
if (v <= VOLUME_MIN) {
memset(d[0], 0, n_bytes);
memset(d[1], 0, n_bytes);
memset(dFL, 0, n_bytes);
memset(dFR, 0, n_bytes);
}
else if (v == VOLUME_NORM) {
float *d0 = d[0], *d1 = d[1], *s0 = s[0], *s1 = s[1], *s2 = s[2], *s3 = s[3], *s4 = s[4];
unrolled = n_samples / 4;
remain = n_samples & 3;
for(n = 0; unrolled--; n += 4) {
ctr = _mm_mul_ps(_mm_loadu_ps(&s4[n]), clev);
in = _mm_mul_ps(_mm_loadu_ps(&s2[n]), slev);
ctr = _mm_mul_ps(_mm_loadu_ps(&sFC[n]), clev);
in = _mm_mul_ps(_mm_loadu_ps(&sSL[n]), slev);
in = _mm_add_ps(in, ctr);
in = _mm_add_ps(in, _mm_loadu_ps(&s0[n]));
_mm_storeu_ps(&d0[n], in);
in = _mm_mul_ps(_mm_loadu_ps(&s3[n]), slev);
in = _mm_add_ps(in, _mm_loadu_ps(&sFL[n]));
_mm_storeu_ps(&dFL[n], in);
in = _mm_mul_ps(_mm_loadu_ps(&sSR[n]), slev);
in = _mm_add_ps(in, ctr);
in = _mm_add_ps(in, _mm_loadu_ps(&s1[n]));
_mm_storeu_ps(&d1[n], in);
in = _mm_add_ps(in, _mm_loadu_ps(&sFR[n]));
_mm_storeu_ps(&dFR[n], in);
}
for(; remain--; n++) {
ctr = _mm_mul_ss(_mm_load_ss(&s4[n]), clev);
in = _mm_mul_ss(_mm_load_ss(&s2[n]), slev);
ctr = _mm_mul_ss(_mm_load_ss(&sFC[n]), clev);
in = _mm_mul_ss(_mm_load_ss(&sSL[n]), slev);
in = _mm_add_ss(in, ctr);
in = _mm_add_ss(in, _mm_load_ss(&s0[n]));
_mm_store_ss(&d0[n], in);
in = _mm_mul_ss(_mm_load_ss(&s3[n]), slev);
in = _mm_add_ss(in, _mm_load_ss(&sFL[n]));
_mm_store_ss(&dFL[n], in);
in = _mm_mul_ss(_mm_load_ss(&sSR[n]), slev);
in = _mm_add_ss(in, ctr);
in = _mm_add_ss(in, _mm_load_ss(&s1[n]));
_mm_store_ss(&d1[n], in);
in = _mm_add_ss(in, _mm_load_ss(&sFR[n]));
_mm_store_ss(&dFR[n], in);
}
}
else {
float *d0 = d[0], *d1 = d[1], *s0 = s[0], *s1 = s[1], *s2 = s[2], *s3 = s[3], *s4 = s[4];
unrolled = n_samples / 4;
remain = n_samples & 3;
for(n = 0; unrolled--; n += 4) {
ctr = _mm_mul_ps(_mm_loadu_ps(&s4[n]), clev);
in = _mm_mul_ps(_mm_loadu_ps(&s2[n]), slev);
ctr = _mm_mul_ps(_mm_loadu_ps(&sFC[n]), clev);
in = _mm_mul_ps(_mm_loadu_ps(&sSL[n]), slev);
in = _mm_add_ps(in, ctr);
in = _mm_add_ps(in, _mm_loadu_ps(&s0[n]));
in = _mm_add_ps(in, _mm_loadu_ps(&sFL[n]));
in = _mm_mul_ps(in, vol);
_mm_storeu_ps(&d0[n], in);
in = _mm_mul_ps(_mm_loadu_ps(&s3[n]), slev);
_mm_storeu_ps(&dFL[n], in);
in = _mm_mul_ps(_mm_loadu_ps(&sSR[n]), slev);
in = _mm_add_ps(in, ctr);
in = _mm_add_ps(in, _mm_loadu_ps(&s1[n]));
in = _mm_add_ps(in, _mm_loadu_ps(&sFR[n]));
in = _mm_mul_ps(in, vol);
_mm_storeu_ps(&d1[n], in);
_mm_storeu_ps(&dFR[n], in);
}
for(; remain--; n++) {
ctr = _mm_mul_ss(_mm_load_ss(&s4[n]), clev);
in = _mm_mul_ss(_mm_load_ss(&s2[n]), slev);
ctr = _mm_mul_ss(_mm_load_ss(&sFC[n]), clev);
in = _mm_mul_ss(_mm_load_ss(&sSL[n]), slev);
in = _mm_add_ss(in, ctr);
in = _mm_add_ss(in, _mm_load_ss(&s0[n]));
in = _mm_add_ss(in, _mm_load_ss(&sFL[n]));
in = _mm_mul_ss(in, vol);
_mm_store_ss(&d0[n], in);
in = _mm_mul_ss(_mm_load_ss(&s3[n]), slev);
_mm_store_ss(&dFL[n], in);
in = _mm_mul_ss(_mm_load_ss(&sSR[n]), slev);
in = _mm_add_ss(in, ctr);
in = _mm_add_ss(in, _mm_load_ss(&s1[n]));
in = _mm_add_ss(in, _mm_load_ss(&sFR[n]));
in = _mm_mul_ss(in, vol);
_mm_store_ss(&d1[n], in);
_mm_store_ss(&dFR[n], in);
}
}
}
/* FL+FR+RL+RR+FC+LFE -> FL+FR+RL+RR*/
/* FL+FR+FC+LFE+SL+SR -> FL+FR+RL+RR*/
static void
channelmix_f32_5p1_4_sse(void *data, int n_dst, void *dst[n_dst],
int n_src, const void *src[n_src], void *matrix, int n_bytes)
@ -210,53 +209,49 @@ channelmix_f32_5p1_4_sse(void *data, int n_dst, void *dst[n_dst],
__m128 clev = _mm_set1_ps(0.7071f);
__m128 vol = _mm_set1_ps(v);
__m128 ctr;
float *dFL = d[0], *dFR = d[1], *dRL = d[2], *dRR = d[3];
float *sFL = s[0], *sFR = s[1], *sFC = s[2], *sSL = s[4], *sSR = s[5];
if (v <= VOLUME_MIN) {
for (i = 0; i < n_dst; i++)
memset(d[i], 0, n_bytes);
}
else if (v == VOLUME_NORM) {
float *s0 = s[0], *s1 = s[1], *s2 = s[2], *s3 = s[3], *s4 = s[4];
float *d0 = d[0], *d1 = d[1], *d2 = d[2], *d3 = d[3];
unrolled = n_samples / 4;
remain = n_samples & 3;
for(n = 0; unrolled--; n += 4) {
ctr = _mm_mul_ps(_mm_loadu_ps(&s4[n]), clev);
_mm_storeu_ps(&d0[n], _mm_add_ps(_mm_loadu_ps(&s0[n]), ctr));
_mm_storeu_ps(&d1[n], _mm_add_ps(_mm_loadu_ps(&s1[n]), ctr));
_mm_storeu_ps(&d2[n], _mm_loadu_ps(&s2[n]));
_mm_storeu_ps(&d3[n], _mm_loadu_ps(&s3[n]));
ctr = _mm_mul_ps(_mm_loadu_ps(&sFC[n]), clev);
_mm_storeu_ps(&dFL[n], _mm_add_ps(_mm_loadu_ps(&sFL[n]), ctr));
_mm_storeu_ps(&dFR[n], _mm_add_ps(_mm_loadu_ps(&sFR[n]), ctr));
_mm_storeu_ps(&dRL[n], _mm_loadu_ps(&sSL[n]));
_mm_storeu_ps(&dRR[n], _mm_loadu_ps(&sSR[n]));
}
for(; remain--; n++) {
ctr = _mm_mul_ss(_mm_load_ss(&s4[n]), clev);
_mm_store_ss(&d0[n], _mm_add_ss(_mm_load_ss(&s0[n]), ctr));
_mm_store_ss(&d1[n], _mm_add_ss(_mm_load_ss(&s1[n]), ctr));
_mm_store_ss(&d2[n], _mm_load_ss(&s2[n]));
_mm_store_ss(&d3[n], _mm_load_ss(&s3[n]));
ctr = _mm_mul_ss(_mm_load_ss(&sFC[n]), clev);
_mm_store_ss(&dFL[n], _mm_add_ss(_mm_load_ss(&sFL[n]), ctr));
_mm_store_ss(&dFR[n], _mm_add_ss(_mm_load_ss(&sFR[n]), ctr));
_mm_store_ss(&dRL[n], _mm_load_ss(&sSL[n]));
_mm_store_ss(&dRR[n], _mm_load_ss(&sSR[n]));
}
}
else {
float *s0 = s[0], *s1 = s[1], *s2 = s[2], *s3 = s[3], *s4 = s[4];
float *d0 = d[0], *d1 = d[1], *d2 = d[2], *d3 = d[3];
unrolled = n_samples / 4;
remain = n_samples & 3;
for(n = 0; unrolled--; n += 4) {
ctr = _mm_mul_ps(_mm_loadu_ps(&s4[n]), clev);
_mm_storeu_ps(&d0[n], _mm_mul_ps(_mm_add_ps(_mm_loadu_ps(&s0[n]), ctr), vol));
_mm_storeu_ps(&d1[n], _mm_mul_ps(_mm_add_ps(_mm_loadu_ps(&s1[n]), ctr), vol));
_mm_storeu_ps(&d2[n], _mm_mul_ps(_mm_loadu_ps(&s2[n]), vol));
_mm_storeu_ps(&d3[n], _mm_mul_ps(_mm_loadu_ps(&s3[n]), vol));
ctr = _mm_mul_ps(_mm_loadu_ps(&sFC[n]), clev);
_mm_storeu_ps(&dFL[n], _mm_mul_ps(_mm_add_ps(_mm_loadu_ps(&sFL[n]), ctr), vol));
_mm_storeu_ps(&dFR[n], _mm_mul_ps(_mm_add_ps(_mm_loadu_ps(&sFR[n]), ctr), vol));
_mm_storeu_ps(&dRL[n], _mm_mul_ps(_mm_loadu_ps(&sSL[n]), vol));
_mm_storeu_ps(&dRR[n], _mm_mul_ps(_mm_loadu_ps(&sSR[n]), vol));
}
for(; remain--; n++) {
ctr = _mm_mul_ss(_mm_load_ss(&s4[n]), clev);
_mm_store_ss(&d0[n], _mm_mul_ss(_mm_add_ss(_mm_load_ss(&s0[n]), ctr), vol));
_mm_store_ss(&d1[n], _mm_mul_ss(_mm_add_ss(_mm_load_ss(&s1[n]), ctr), vol));
_mm_store_ss(&d2[n], _mm_mul_ss(_mm_load_ss(&s2[n]), vol));
_mm_store_ss(&d3[n], _mm_mul_ss(_mm_load_ss(&s3[n]), vol));
ctr = _mm_mul_ss(_mm_load_ss(&sFC[n]), clev);
_mm_store_ss(&dFL[n], _mm_mul_ss(_mm_add_ss(_mm_load_ss(&sFL[n]), ctr), vol));
_mm_store_ss(&dFR[n], _mm_mul_ss(_mm_add_ss(_mm_load_ss(&sFR[n]), ctr), vol));
_mm_store_ss(&dRL[n], _mm_mul_ss(_mm_load_ss(&sSL[n]), vol));
_mm_store_ss(&dRR[n], _mm_mul_ss(_mm_load_ss(&sSR[n]), vol));
}
}
}