mirror of
https://gitlab.freedesktop.org/pipewire/pipewire.git
synced 2025-10-31 22:25:38 -04:00
channelmix: apply channel volumes correctly
This commit is contained in:
parent
449d98910b
commit
87ae7a8011
5 changed files with 270 additions and 253 deletions
|
|
@ -31,24 +31,20 @@ channelmix_copy_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT ds
|
|||
uint32_t i, n;
|
||||
float **d = (float **)dst;
|
||||
const float **s = (const float **)src;
|
||||
const float *m = mix->matrix;
|
||||
const float v = mix->volume;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
else if (mix->identity) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
spa_memcpy(d[i], s[i], n_samples * sizeof(float));
|
||||
}
|
||||
else {
|
||||
for (i = 0; i < n_dst; i++) {
|
||||
const float vol = m[i * n_src + i];
|
||||
for (n = 0; n < n_samples; n++)
|
||||
d[i][n] = s[i][n] * vol;
|
||||
d[i][n] = s[i][n] * mix->matrix[i][i];
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -61,13 +57,12 @@ channelmix_f32_n_m_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT
|
|||
uint32_t i, j, n;
|
||||
float **d = (float **) dst;
|
||||
const float **s = (const float **) src;
|
||||
const float *m = mix->matrix;
|
||||
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
for (i = 0; i < n_dst; i++) {
|
||||
float sum = 0.0f;
|
||||
for (j = 0; j < n_src; j++)
|
||||
sum += s[j][n] * m[i * n_src + j];
|
||||
sum += s[j][n] * mix->matrix[i][j];
|
||||
d[i][n] = sum;
|
||||
}
|
||||
}
|
||||
|
|
@ -83,19 +78,26 @@ channelmix_f32_1_2_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT
|
|||
uint32_t n;
|
||||
float **d = (float **)dst;
|
||||
const float **s = (const float **)src;
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[1][0];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
memset(d[0], 0, n_samples * sizeof(float));
|
||||
memset(d[1], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
else if (mix->norm) {
|
||||
for (n = 0; n < n_samples; n++)
|
||||
d[0][n] = d[1][n] = s[0][n];
|
||||
}
|
||||
else {
|
||||
else if (mix->equal) {
|
||||
for (n = 0; n < n_samples; n++)
|
||||
d[0][n] = d[1][n] = s[0][n] * v;
|
||||
d[0][n] = d[1][n] = s[0][n] * v0;
|
||||
}
|
||||
else {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
d[0][n] = s[0][n] * v0;
|
||||
d[1][n] = s[0][n] * v1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -106,15 +108,19 @@ channelmix_f32_2_1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT
|
|||
uint32_t n;
|
||||
float **d = (float **)dst;
|
||||
const float **s = (const float **)src;
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[0][1];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
memset(d[0], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else {
|
||||
const float f = v * 0.5f;
|
||||
else if (mix->equal) {
|
||||
for (n = 0; n < n_samples; n++)
|
||||
d[0][n] = (s[0][n] + s[1][n]) * f;
|
||||
d[0][n] = (s[0][n] + s[1][n]) * v0;
|
||||
}
|
||||
else {
|
||||
for (n = 0; n < n_samples; n++)
|
||||
d[0][n] = s[0][n] * v0 + s[1][n] * v1;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -125,15 +131,22 @@ channelmix_f32_4_1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT
|
|||
uint32_t n;
|
||||
float **d = (float **)dst;
|
||||
const float **s = (const float **)src;
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[0][1];
|
||||
const float v2 = mix->matrix[0][2];
|
||||
const float v3 = mix->matrix[0][3];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
memset(d[0], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else {
|
||||
const float f = v * 0.25f;
|
||||
else if (mix->equal) {
|
||||
for (n = 0; n < n_samples; n++)
|
||||
d[0][n] = (s[0][n] + s[1][n] + s[2][n] + s[3][n]) * f;
|
||||
d[0][n] = (s[0][n] + s[1][n] + s[2][n] + s[3][n]) * v0;
|
||||
}
|
||||
else {
|
||||
for (n = 0; n < n_samples; n++)
|
||||
d[0][n] = s[0][n] * v0 + s[1][n] * v1 +
|
||||
s[2][n] * v2 + s[3][n] * v3;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -144,15 +157,20 @@ channelmix_f32_3p1_1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
uint32_t n;
|
||||
float **d = (float **)dst;
|
||||
const float **s = (const float **)src;
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[0][1];
|
||||
const float v2 = mix->matrix[0][2];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
memset(d[0], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else {
|
||||
const float f = v * 0.5f;
|
||||
else if (mix->equal) {
|
||||
for (n = 0; n < n_samples; n++)
|
||||
d[0][n] = (s[0][n] + s[1][n] + s[2][n]) * f;
|
||||
d[0][n] = (s[0][n] + s[1][n] + s[2][n] + s[3][n]) * v0;
|
||||
}
|
||||
else {
|
||||
for (n = 0; n < n_samples; n++)
|
||||
d[0][n] = s[0][n] * v0 + s[1][n] * v1 + s[2][n] * v2;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -166,22 +184,33 @@ channelmix_f32_2_4_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT
|
|||
uint32_t i, n;
|
||||
float **d = (float **)dst;
|
||||
const float **s = (const float **)src;
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[1][1];
|
||||
const float v2 = mix->matrix[2][0];
|
||||
const float v3 = mix->matrix[3][1];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
else if (mix->norm) {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
d[0][n] = d[2][n] = s[0][n];
|
||||
d[1][n] = d[3][n] = s[1][n];
|
||||
}
|
||||
}
|
||||
else if (v0 == v2 && v1 == v3) {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
d[0][n] = d[2][n] = s[0][n] * v0;
|
||||
d[1][n] = d[3][n] = s[1][n] * v1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
d[0][n] = d[2][n] = s[0][n] * v;
|
||||
d[1][n] = d[3][n] = s[1][n] * v;
|
||||
d[0][n] = s[0][n] * v0;
|
||||
d[1][n] = s[1][n] * v1;
|
||||
d[2][n] = s[0][n] * v2;
|
||||
d[3][n] = s[1][n] * v3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -194,13 +223,14 @@ channelmix_f32_2_3p1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
uint32_t i, n;
|
||||
float **d = (float **)dst;
|
||||
const float **s = (const float **)src;
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[1][1];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
else if (mix->norm) {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
d[0][n] = s[0][n];
|
||||
d[1][n] = s[1][n];
|
||||
|
|
@ -209,11 +239,10 @@ channelmix_f32_2_3p1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
}
|
||||
}
|
||||
else {
|
||||
const float f = 0.5f * v;
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
d[0][n] = s[0][n] * v;
|
||||
d[1][n] = s[1][n] * v;
|
||||
d[2][n] = (s[0][n] + s[1][n]) * f;
|
||||
d[0][n] = s[0][n] * v0;
|
||||
d[1][n] = s[1][n] * v1;
|
||||
d[2][n] = (d[0][n] + d[1][n]) * 0.5f;
|
||||
d[3][n] = 0.0f;
|
||||
}
|
||||
}
|
||||
|
|
@ -227,13 +256,16 @@ channelmix_f32_2_5p1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
uint32_t i, n;
|
||||
float **d = (float **)dst;
|
||||
const float **s = (const float **)src;
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[1][1];
|
||||
const float v4 = mix->matrix[4][0];
|
||||
const float v5 = mix->matrix[5][1];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
else if (mix->norm) {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
d[0][n] = d[4][n] = s[0][n];
|
||||
d[1][n] = d[5][n] = s[1][n];
|
||||
|
|
@ -242,12 +274,13 @@ channelmix_f32_2_5p1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
}
|
||||
}
|
||||
else {
|
||||
const float f = 0.5f * v;
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
d[0][n] = d[4][n] = s[0][n] * v;
|
||||
d[1][n] = d[5][n] = s[1][n] * v;
|
||||
d[2][n] = (s[0][n] + s[1][n]) * f;
|
||||
d[0][n] = s[0][n] * v0;
|
||||
d[1][n] = s[1][n] * v1;
|
||||
d[2][n] = (d[0][n] + d[1][n]) * 0.5f;
|
||||
d[3][n] = 0.0f;
|
||||
d[4][n] = s[0][n] * v4;
|
||||
d[5][n] = s[1][n] * v5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -260,28 +293,22 @@ channelmix_f32_5p1_2_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
uint32_t n;
|
||||
float **d = (float **) dst;
|
||||
const float **s = (const float **) src;
|
||||
const float *m = mix->matrix;
|
||||
const float clev = m[2];
|
||||
const float llev = m[3];
|
||||
const float slev = m[4];
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[1][1];
|
||||
const float clev = mix->matrix[2][0];
|
||||
const float llev = mix->matrix[3][0];
|
||||
const float slev0 = mix->matrix[4][0];
|
||||
const float slev1 = mix->matrix[4][1];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
memset(d[0], 0, n_samples * sizeof(float));
|
||||
memset(d[1], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
const float ctr = clev * s[2][n] + llev * s[3][n];
|
||||
d[0][n] = s[0][n] + ctr + (slev * s[4][n]);
|
||||
d[1][n] = s[1][n] + ctr + (slev * s[5][n]);
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
const float ctr = clev * s[2][n] + llev * s[3][n];
|
||||
d[0][n] = (s[0][n] + ctr + (slev * s[4][n])) * v;
|
||||
d[1][n] = (s[1][n] + ctr + (slev * s[5][n])) * v;
|
||||
d[0][n] = s[0][n] * v0 + ctr + (slev0 * s[4][n]);
|
||||
d[1][n] = s[1][n] * v1 + ctr + (slev1 * s[5][n]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -294,19 +321,23 @@ channelmix_f32_5p1_3p1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_REST
|
|||
uint32_t i, n;
|
||||
float **d = (float **) dst;
|
||||
const float **s = (const float **) src;
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[1][1];
|
||||
const float v2 = mix->matrix[2][2];
|
||||
const float v3 = mix->matrix[3][3];
|
||||
const float v4 = mix->matrix[0][4];
|
||||
const float v5 = mix->matrix[1][5];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else {
|
||||
const float f1 = 0.5f * v;
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
d[0][n] = (s[0][n] + s[4][n]) * f1;
|
||||
d[1][n] = (s[1][n] + s[5][n]) * f1;
|
||||
d[2][n] = s[2][n] * v;
|
||||
d[3][n] = s[3][n] * v;
|
||||
d[0][n] = s[0][n] * v0 + s[4][n] * v4;
|
||||
d[1][n] = s[1][n] * v1 + s[5][n] * v5;
|
||||
d[2][n] = s[2][n] * v2;
|
||||
d[3][n] = s[3][n] * v3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -319,31 +350,24 @@ channelmix_f32_5p1_4_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
uint32_t i, n;
|
||||
float **d = (float **) dst;
|
||||
const float **s = (const float **) src;
|
||||
const float *m = mix->matrix;
|
||||
const float clev = m[2];
|
||||
const float llev = m[3];
|
||||
float v = mix->volume;
|
||||
const float clev = mix->matrix[2][0];
|
||||
const float llev = mix->matrix[3][0];
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[1][1];
|
||||
const float v4 = mix->matrix[2][4];
|
||||
const float v5 = mix->matrix[3][5];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
const float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
d[0][n] = s[0][n] + ctr;
|
||||
d[1][n] = s[1][n] + ctr;
|
||||
d[2][n] = s[4][n];
|
||||
d[3][n] = s[5][n];
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
const float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
d[0][n] = (s[0][n] + ctr) * v;
|
||||
d[1][n] = (s[1][n] + ctr) * v;
|
||||
d[2][n] = s[4][n] * v;
|
||||
d[3][n] = s[5][n] * v;
|
||||
d[0][n] = s[0][n] * v0 + ctr;
|
||||
d[1][n] = s[1][n] * v1 + ctr;
|
||||
d[2][n] = s[4][n] * v4;
|
||||
d[3][n] = s[5][n] * v5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -358,28 +382,24 @@ channelmix_f32_7p1_2_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
uint32_t n;
|
||||
float **d = (float **) dst;
|
||||
const float **s = (const float **) src;
|
||||
const float *m = mix->matrix;
|
||||
const float clev = m[2];
|
||||
const float llev = m[3];
|
||||
const float slev = m[4];
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[1][1];
|
||||
const float clev = (mix->matrix[0][2] + mix->matrix[1][2]) * 0.5f;
|
||||
const float llev = (mix->matrix[0][3] + mix->matrix[1][3]) * 0.5f;
|
||||
const float slev0 = mix->matrix[0][4];
|
||||
const float slev1 = mix->matrix[1][5];
|
||||
const float rlev0 = mix->matrix[0][6];
|
||||
const float rlev1 = mix->matrix[1][7];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
memset(d[0], 0, n_samples * sizeof(float));
|
||||
memset(d[1], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
const float ctr = clev * s[2][n] + llev * s[3][n];
|
||||
d[0][n] = s[0][n] + ctr + (slev * (s[4][n] + s[6][n]));
|
||||
d[1][n] = s[1][n] + ctr + (slev * (s[5][n] + s[7][n]));
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
const float ctr = clev * s[2][n] + llev * s[3][n];
|
||||
d[0][n] = (s[0][n] + ctr + (slev * (s[4][n] + s[6][n]))) * v;
|
||||
d[1][n] = (s[1][n] + ctr + (slev * (s[5][n] + s[6][n]))) * v;
|
||||
d[0][n] = s[0][n] * v0 + ctr + s[4][n] * slev0 + s[6][n] * rlev0;
|
||||
d[1][n] = s[1][n] * v1 + ctr + s[5][n] * slev1 + s[6][n] * rlev1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -392,19 +412,23 @@ channelmix_f32_7p1_3p1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_REST
|
|||
uint32_t i, n;
|
||||
float **d = (float **) dst;
|
||||
const float **s = (const float **) src;
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[1][1];
|
||||
const float v2 = mix->matrix[2][2];
|
||||
const float v3 = mix->matrix[3][3];
|
||||
const float v4 = (mix->matrix[0][4] + mix->matrix[0][6]) * 0.5f;
|
||||
const float v5 = (mix->matrix[1][5] + mix->matrix[1][6]) * 0.5f;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else {
|
||||
const float f1 = 0.5 * v;
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
d[0][n] = s[0][n] + (s[4][n] + s[6][n]) * f1;
|
||||
d[1][n] = s[1][n] + (s[5][n] + s[7][n]) * f1;
|
||||
d[2][n] = s[2][n] * v;
|
||||
d[3][n] = s[3][n] * v;
|
||||
d[0][n] = s[0][n] * v0 + (s[4][n] + s[6][n]) * v4;
|
||||
d[1][n] = s[1][n] * v1 + (s[5][n] + s[7][n]) * v5;
|
||||
d[2][n] = s[2][n] * v2;
|
||||
d[3][n] = s[3][n] * v3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -417,36 +441,28 @@ channelmix_f32_7p1_4_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
uint32_t i, n;
|
||||
float **d = (float **) dst;
|
||||
const float **s = (const float **) src;
|
||||
const float *m = mix->matrix;
|
||||
const float clev = m[2];
|
||||
const float llev = m[3];
|
||||
const float slev = m[4];
|
||||
float v = mix->volume;
|
||||
const float v0 = mix->matrix[0][0];
|
||||
const float v1 = mix->matrix[1][1];
|
||||
const float clev = (mix->matrix[0][2] + mix->matrix[1][2]) * 0.5f;
|
||||
const float llev = (mix->matrix[0][3] + mix->matrix[1][3]) * 0.5f;
|
||||
const float slev0 = mix->matrix[0][4];
|
||||
const float slev1 = mix->matrix[1][5];
|
||||
const float rlev0 = mix->matrix[0][6];
|
||||
const float rlev1 = mix->matrix[1][7];
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
const float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
const float sl = s[4][n] * slev;
|
||||
const float sr = s[5][n] * slev;
|
||||
d[0][n] = s[0][n] + ctr + sl;
|
||||
d[1][n] = s[1][n] + ctr + sr;
|
||||
d[2][n] = s[6][n] + sl;
|
||||
d[3][n] = s[7][n] + sr;
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
const float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
const float sl = s[4][n] * slev;
|
||||
const float sr = s[5][n] * slev;
|
||||
d[0][n] = (s[0][n] + ctr + sl) * v;
|
||||
d[1][n] = (s[1][n] + ctr + sr) * v;
|
||||
d[2][n] = (s[6][n] + sl) * v;
|
||||
d[3][n] = (s[7][n] + sr) * v;
|
||||
const float sl = s[4][n] * slev0;
|
||||
const float sr = s[5][n] * slev1;
|
||||
d[0][n] = s[0][n] * v0 + ctr + sl;
|
||||
d[1][n] = s[1][n] * v1 + ctr + sr;
|
||||
d[2][n] = s[6][n] * rlev0 + sl;
|
||||
d[3][n] = s[7][n] * rlev1 + sr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,14 +32,12 @@ void channelmix_copy_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_REST
|
|||
uint32_t i, n, unrolled;
|
||||
float **d = (float **)dst;
|
||||
const float **s = (const float **)src;
|
||||
float *m = mix->matrix;
|
||||
float v = mix->volume;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
else if (mix->norm) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
spa_memcpy(d[i], s[i], n_samples * sizeof(float));
|
||||
}
|
||||
|
|
@ -48,7 +46,7 @@ void channelmix_copy_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_REST
|
|||
float *di = d[i];
|
||||
const float *si = s[i];
|
||||
__m128 t[4];
|
||||
const __m128 vol = _mm_set1_ps(m[i * n_src + i]);
|
||||
const __m128 vol = _mm_set1_ps(mix->matrix[i][i]);
|
||||
|
||||
if (SPA_IS_ALIGNED(di, 16) &&
|
||||
SPA_IS_ALIGNED(si, 16))
|
||||
|
|
@ -79,8 +77,8 @@ channelmix_f32_2_4_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
uint32_t i, n, unrolled;
|
||||
float **d = (float **)dst;
|
||||
const float **s = (const float **)src;
|
||||
float v = mix->volume;
|
||||
const __m128 vol = _mm_set1_ps(v);
|
||||
const __m128 v0 = _mm_set1_ps(mix->matrix[0][0]);
|
||||
const __m128 v1 = _mm_set1_ps(mix->matrix[1][1]);
|
||||
__m128 in;
|
||||
const float *sFL = s[0], *sFR = s[1];
|
||||
float *dFL = d[0], *dFR = d[1], *dRL = d[2], *dRR = d[3];
|
||||
|
|
@ -95,11 +93,11 @@ channelmix_f32_2_4_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
else
|
||||
unrolled = 0;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
else if (mix->norm) {
|
||||
for(n = 0; n < unrolled; n += 4) {
|
||||
in = _mm_load_ps(&sFL[n]);
|
||||
_mm_store_ps(&dFL[n], in);
|
||||
|
|
@ -119,18 +117,18 @@ channelmix_f32_2_4_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRI
|
|||
}
|
||||
else {
|
||||
for(n = 0; n < unrolled; n += 4) {
|
||||
in = _mm_mul_ps(_mm_load_ps(&sFL[n]), vol);
|
||||
in = _mm_mul_ps(_mm_load_ps(&sFL[n]), v0);
|
||||
_mm_store_ps(&dFL[n], in);
|
||||
_mm_store_ps(&dRL[n], in);
|
||||
in = _mm_mul_ps(_mm_load_ps(&sFR[n]), vol);
|
||||
in = _mm_mul_ps(_mm_load_ps(&sFR[n]), v1);
|
||||
_mm_store_ps(&dFR[n], in);
|
||||
_mm_store_ps(&dRR[n], in);
|
||||
}
|
||||
for(; n < n_samples; n++) {
|
||||
in = _mm_mul_ss(_mm_load_ss(&sFL[n]), vol);
|
||||
in = _mm_mul_ss(_mm_load_ss(&sFL[n]), v0);
|
||||
_mm_store_ss(&dFL[n], in);
|
||||
_mm_store_ss(&dRL[n], in);
|
||||
in = _mm_mul_ss(_mm_load_ss(&sFR[n]), vol);
|
||||
in = _mm_mul_ss(_mm_load_ss(&sFR[n]), v1);
|
||||
_mm_store_ss(&dFR[n], in);
|
||||
_mm_store_ss(&dRR[n], in);
|
||||
}
|
||||
|
|
@ -145,12 +143,12 @@ channelmix_f32_5p1_2_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_REST
|
|||
uint32_t n, unrolled;
|
||||
float **d = (float **) dst;
|
||||
const float **s = (const float **) src;
|
||||
float v = mix->volume;
|
||||
const float *m = mix->matrix;
|
||||
const __m128 clev = _mm_set1_ps(m[2]);
|
||||
const __m128 llev = _mm_set1_ps(m[3]);
|
||||
const __m128 slev = _mm_set1_ps(m[4]);
|
||||
const __m128 vol = _mm_set1_ps(v);
|
||||
const __m128 v0 = _mm_set1_ps(mix->matrix[0][0]);
|
||||
const __m128 v1 = _mm_set1_ps(mix->matrix[1][1]);
|
||||
const __m128 clev = _mm_set1_ps(mix->matrix[2][0]);
|
||||
const __m128 llev = _mm_set1_ps(mix->matrix[3][0]);
|
||||
const __m128 slev0 = _mm_set1_ps(mix->matrix[4][0]);
|
||||
const __m128 slev1 = _mm_set1_ps(mix->matrix[4][1]);
|
||||
__m128 in, ctr;
|
||||
const float *sFL = s[0], *sFR = s[1], *sFC = s[2], *sLFE = s[3], *sSL = s[4], *sSR = s[5];
|
||||
float *dFL = d[0], *dFR = d[1];
|
||||
|
|
@ -167,19 +165,19 @@ channelmix_f32_5p1_2_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_REST
|
|||
else
|
||||
unrolled = 0;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
memset(dFL, 0, n_samples * sizeof(float));
|
||||
memset(dFR, 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
else if (mix->norm) {
|
||||
for(n = 0; n < unrolled; n += 4) {
|
||||
ctr = _mm_mul_ps(_mm_load_ps(&sFC[n]), clev);
|
||||
ctr = _mm_add_ps(ctr, _mm_mul_ps(_mm_load_ps(&sLFE[n]), llev));
|
||||
in = _mm_mul_ps(_mm_load_ps(&sSL[n]), slev);
|
||||
in = _mm_mul_ps(_mm_load_ps(&sSL[n]), slev0);
|
||||
in = _mm_add_ps(in, ctr);
|
||||
in = _mm_add_ps(in, _mm_load_ps(&sFL[n]));
|
||||
_mm_store_ps(&dFL[n], in);
|
||||
in = _mm_mul_ps(_mm_load_ps(&sSR[n]), slev);
|
||||
in = _mm_mul_ps(_mm_load_ps(&sSR[n]), slev1);
|
||||
in = _mm_add_ps(in, ctr);
|
||||
in = _mm_add_ps(in, _mm_load_ps(&sFR[n]));
|
||||
_mm_store_ps(&dFR[n], in);
|
||||
|
|
@ -187,11 +185,11 @@ channelmix_f32_5p1_2_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_REST
|
|||
for(; n < n_samples; n++) {
|
||||
ctr = _mm_mul_ss(_mm_load_ss(&sFC[n]), clev);
|
||||
ctr = _mm_add_ss(ctr, _mm_mul_ss(_mm_load_ss(&sLFE[n]), llev));
|
||||
in = _mm_mul_ss(_mm_load_ss(&sSL[n]), slev);
|
||||
in = _mm_mul_ss(_mm_load_ss(&sSL[n]), slev0);
|
||||
in = _mm_add_ss(in, ctr);
|
||||
in = _mm_add_ss(in, _mm_load_ss(&sFL[n]));
|
||||
_mm_store_ss(&dFL[n], in);
|
||||
in = _mm_mul_ss(_mm_load_ss(&sSR[n]), slev);
|
||||
in = _mm_mul_ss(_mm_load_ss(&sSR[n]), slev1);
|
||||
in = _mm_add_ss(in, ctr);
|
||||
in = _mm_add_ss(in, _mm_load_ss(&sFR[n]));
|
||||
_mm_store_ss(&dFR[n], in);
|
||||
|
|
@ -201,29 +199,29 @@ channelmix_f32_5p1_2_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_REST
|
|||
for(n = 0; n < unrolled; n += 4) {
|
||||
ctr = _mm_mul_ps(_mm_load_ps(&sFC[n]), clev);
|
||||
ctr = _mm_add_ps(ctr, _mm_mul_ps(_mm_load_ps(&sLFE[n]), llev));
|
||||
in = _mm_mul_ps(_mm_load_ps(&sSL[n]), slev);
|
||||
in = _mm_mul_ps(_mm_load_ps(&sSL[n]), slev0);
|
||||
in = _mm_add_ps(in, ctr);
|
||||
in = _mm_add_ps(in, _mm_load_ps(&sFL[n]));
|
||||
in = _mm_mul_ps(in, vol);
|
||||
in = _mm_mul_ps(in, v0);
|
||||
_mm_store_ps(&dFL[n], in);
|
||||
in = _mm_mul_ps(_mm_load_ps(&sSR[n]), slev);
|
||||
in = _mm_mul_ps(_mm_load_ps(&sSR[n]), slev1);
|
||||
in = _mm_add_ps(in, ctr);
|
||||
in = _mm_add_ps(in, _mm_load_ps(&sFR[n]));
|
||||
in = _mm_mul_ps(in, vol);
|
||||
in = _mm_mul_ps(in, v1);
|
||||
_mm_store_ps(&dFR[n], in);
|
||||
}
|
||||
for(; n < n_samples; n++) {
|
||||
ctr = _mm_mul_ss(_mm_load_ss(&sFC[n]), clev);
|
||||
ctr = _mm_add_ss(ctr, _mm_mul_ss(_mm_load_ss(&sLFE[n]), llev));
|
||||
in = _mm_mul_ss(_mm_load_ss(&sSL[n]), slev);
|
||||
in = _mm_mul_ss(_mm_load_ss(&sSL[n]), slev0);
|
||||
in = _mm_add_ss(in, ctr);
|
||||
in = _mm_add_ss(in, _mm_load_ss(&sFL[n]));
|
||||
in = _mm_mul_ss(in, vol);
|
||||
in = _mm_mul_ss(in, v0);
|
||||
_mm_store_ss(&dFL[n], in);
|
||||
in = _mm_mul_ss(_mm_load_ss(&sSR[n]), slev);
|
||||
in = _mm_mul_ss(_mm_load_ss(&sSR[n]), slev1);
|
||||
in = _mm_add_ss(in, ctr);
|
||||
in = _mm_add_ss(in, _mm_load_ss(&sFR[n]));
|
||||
in = _mm_mul_ss(in, vol);
|
||||
in = _mm_mul_ss(in, v1);
|
||||
_mm_store_ss(&dFR[n], in);
|
||||
}
|
||||
}
|
||||
|
|
@ -237,9 +235,12 @@ channelmix_f32_5p1_3p1_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_RE
|
|||
uint32_t i, n, unrolled;
|
||||
float **d = (float **) dst;
|
||||
const float **s = (const float **) src;
|
||||
float v = mix->volume;
|
||||
const __m128 slev = _mm_set1_ps(v * 0.5f);
|
||||
const __m128 vol = _mm_set1_ps(v);
|
||||
const __m128 v0 = _mm_set1_ps(mix->matrix[0][0]);
|
||||
const __m128 v1 = _mm_set1_ps(mix->matrix[1][1]);
|
||||
const __m128 slev0 = _mm_set1_ps(mix->matrix[0][4]);
|
||||
const __m128 slev1 = _mm_set1_ps(mix->matrix[1][5]);
|
||||
const __m128 v2 = _mm_set1_ps(mix->matrix[2][2]);
|
||||
const __m128 v3 = _mm_set1_ps(mix->matrix[3][3]);
|
||||
__m128 avg[2];
|
||||
const float *sFL = s[0], *sFR = s[1], *sFC = s[2], *sLFE = s[3], *sSL = s[4], *sSR = s[5];
|
||||
float *dFL = d[0], *dFR = d[1], *dFC = d[2], *dLFE = d[3];
|
||||
|
|
@ -258,56 +259,48 @@ channelmix_f32_5p1_3p1_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_RE
|
|||
else
|
||||
unrolled = 0;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
for(n = 0; n < unrolled; n += 8) {
|
||||
avg[0] = _mm_add_ps(_mm_load_ps(&sFL[n]), _mm_load_ps(&sSL[n]));
|
||||
avg[1] = _mm_add_ps(_mm_load_ps(&sFL[n+4]), _mm_load_ps(&sSL[n+4]));
|
||||
_mm_store_ps(&dFL[n], _mm_mul_ps(avg[0], slev));
|
||||
_mm_store_ps(&dFL[n+4], _mm_mul_ps(avg[1], slev));
|
||||
avg[0] = _mm_add_ps(_mm_load_ps(&sFR[n]), _mm_load_ps(&sSR[n]));
|
||||
avg[1] = _mm_add_ps(_mm_load_ps(&sFR[n+4]), _mm_load_ps(&sSR[n+4]));
|
||||
_mm_store_ps(&dFR[n], _mm_mul_ps(avg[0], slev));
|
||||
_mm_store_ps(&dFR[n+4], _mm_mul_ps(avg[1], slev));
|
||||
_mm_store_ps(&dFC[n], _mm_load_ps(&sFC[n]));
|
||||
_mm_store_ps(&dFC[n+4], _mm_load_ps(&sFC[n+4]));
|
||||
_mm_store_ps(&dLFE[n], _mm_load_ps(&sLFE[n]));
|
||||
_mm_store_ps(&dLFE[n+4], _mm_load_ps(&sLFE[n+4]));
|
||||
}
|
||||
for(; n < n_samples; n++) {
|
||||
avg[0] = _mm_add_ss(_mm_load_ss(&sFL[n]), _mm_load_ss(&sSL[n]));
|
||||
_mm_store_ss(&dFL[n], _mm_mul_ss(avg[0], slev));
|
||||
avg[0] = _mm_add_ss(_mm_load_ss(&sFR[n]), _mm_load_ss(&sSR[n]));
|
||||
_mm_store_ss(&dFR[n], _mm_mul_ss(avg[0], slev));
|
||||
_mm_store_ss(&dFC[n], _mm_load_ss(&sFC[n]));
|
||||
_mm_store_ss(&dLFE[n], _mm_load_ss(&sLFE[n]));
|
||||
}
|
||||
}
|
||||
else {
|
||||
for(n = 0; n < unrolled; n += 8) {
|
||||
avg[0] = _mm_add_ps(_mm_load_ps(&sFL[n]), _mm_load_ps(&sSL[n]));
|
||||
avg[1] = _mm_add_ps(_mm_load_ps(&sFL[n+4]), _mm_load_ps(&sSL[n+4]));
|
||||
_mm_store_ps(&dFL[n], _mm_mul_ps(avg[0], slev));
|
||||
_mm_store_ps(&dFL[n+4], _mm_mul_ps(avg[1], slev));
|
||||
avg[0] = _mm_add_ps(_mm_load_ps(&sFR[n]), _mm_load_ps(&sSR[n]));
|
||||
avg[1] = _mm_add_ps(_mm_load_ps(&sFR[n+4]), _mm_load_ps(&sSR[n+4]));
|
||||
_mm_store_ps(&dFR[n], _mm_mul_ps(avg[0], slev));
|
||||
_mm_store_ps(&dFR[n+4], _mm_mul_ps(avg[1], slev));
|
||||
_mm_store_ps(&dFC[n], _mm_mul_ps(_mm_load_ps(&sFC[n]), vol));
|
||||
_mm_store_ps(&dFC[n+4], _mm_mul_ps(_mm_load_ps(&sFC[n+4]), vol));
|
||||
_mm_store_ps(&dLFE[n], _mm_mul_ps(_mm_load_ps(&sLFE[n]), vol));
|
||||
_mm_store_ps(&dLFE[n+4], _mm_mul_ps(_mm_load_ps(&sLFE[n+4]), vol));
|
||||
avg[0] = _mm_add_ps(
|
||||
_mm_mul_ps(_mm_load_ps(&sFL[n]), v0),
|
||||
_mm_mul_ps(_mm_load_ps(&sSL[n]), slev0));
|
||||
avg[1] = _mm_add_ps(
|
||||
_mm_mul_ps(_mm_load_ps(&sFL[n+4]), v0),
|
||||
_mm_mul_ps(_mm_load_ps(&sSL[n+4]), slev0));
|
||||
_mm_store_ps(&dFL[n], avg[0]);
|
||||
_mm_store_ps(&dFL[n+4], avg[1]);
|
||||
|
||||
avg[0] = _mm_add_ps(
|
||||
_mm_mul_ps(_mm_load_ps(&sFR[n]), v1),
|
||||
_mm_mul_ps(_mm_load_ps(&sSR[n]), slev1));
|
||||
avg[1] = _mm_add_ps(
|
||||
_mm_mul_ps(_mm_load_ps(&sFR[n+4]), v1),
|
||||
_mm_mul_ps(_mm_load_ps(&sSR[n+4]), slev1));
|
||||
_mm_store_ps(&dFR[n], avg[0]);
|
||||
_mm_store_ps(&dFR[n+4], avg[1]);
|
||||
|
||||
_mm_store_ps(&dFC[n], _mm_mul_ps(_mm_load_ps(&sFC[n]), v2));
|
||||
_mm_store_ps(&dFC[n+4], _mm_mul_ps(_mm_load_ps(&sFC[n+4]), v2));
|
||||
_mm_store_ps(&dLFE[n], _mm_mul_ps(_mm_load_ps(&sLFE[n]), v3));
|
||||
_mm_store_ps(&dLFE[n+4], _mm_mul_ps(_mm_load_ps(&sLFE[n+4]), v3));
|
||||
}
|
||||
for(; n < n_samples; n++) {
|
||||
avg[0] = _mm_add_ss(_mm_load_ss(&sFL[n]), _mm_load_ss(&sSL[n]));
|
||||
_mm_store_ss(&dFL[n], _mm_mul_ss(avg[0], slev));
|
||||
avg[0] = _mm_add_ss(_mm_load_ss(&sFR[n]), _mm_load_ss(&sSR[n]));
|
||||
_mm_store_ss(&dFR[n], _mm_mul_ss(avg[0], slev));
|
||||
_mm_store_ss(&dFC[n], _mm_mul_ss(_mm_load_ss(&sFC[n]), vol));
|
||||
_mm_store_ss(&dLFE[n], _mm_mul_ss(_mm_load_ss(&sLFE[n]), vol));
|
||||
avg[0] = _mm_add_ss(
|
||||
_mm_mul_ss(_mm_load_ss(&sFL[n]), v0),
|
||||
_mm_mul_ss(_mm_load_ss(&sSL[n]), slev0));
|
||||
_mm_store_ss(&dFL[n], avg[0]);
|
||||
|
||||
avg[0] = _mm_add_ss(
|
||||
_mm_mul_ss(_mm_load_ss(&sFR[n]), v1),
|
||||
_mm_mul_ss(_mm_load_ss(&sSR[n]), slev1));
|
||||
_mm_store_ss(&dFR[n], avg[0]);
|
||||
|
||||
_mm_store_ss(&dFC[n], _mm_mul_ss(_mm_load_ss(&sFC[n]), v2));
|
||||
_mm_store_ss(&dLFE[n], _mm_mul_ss(_mm_load_ss(&sLFE[n]), v3));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -320,11 +313,10 @@ channelmix_f32_5p1_4_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_REST
|
|||
uint32_t i, n, unrolled;
|
||||
float **d = (float **) dst;
|
||||
const float **s = (const float **) src;
|
||||
const float *m = mix->matrix;
|
||||
float v = mix->volume;
|
||||
const __m128 clev = _mm_set1_ps(m[2]);
|
||||
const __m128 llev = _mm_set1_ps(m[3]);
|
||||
const __m128 vol = _mm_set1_ps(v);
|
||||
const __m128 clev = _mm_set1_ps(mix->matrix[2][2]);
|
||||
const __m128 llev = _mm_set1_ps(mix->matrix[3][3]);
|
||||
const __m128 v0 = _mm_set1_ps(mix->matrix[0][0]);
|
||||
const __m128 v1 = _mm_set1_ps(mix->matrix[1][1]);
|
||||
__m128 ctr;
|
||||
const float *sFL = s[0], *sFR = s[1], *sFC = s[2], *sLFE = s[3], *sSL = s[4], *sSR = s[5];
|
||||
float *dFL = d[0], *dFR = d[1], *dRL = d[2], *dRR = d[3];
|
||||
|
|
@ -343,11 +335,11 @@ channelmix_f32_5p1_4_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_REST
|
|||
else
|
||||
unrolled = 0;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
if (mix->zero) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
memset(d[i], 0, n_samples * sizeof(float));
|
||||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
else if (mix->norm) {
|
||||
for(n = 0; n < unrolled; n += 4) {
|
||||
ctr = _mm_mul_ps(_mm_load_ps(&sFC[n]), clev);
|
||||
ctr = _mm_add_ps(ctr, _mm_mul_ps(_mm_load_ps(&sLFE[n]), llev));
|
||||
|
|
@ -369,18 +361,18 @@ channelmix_f32_5p1_4_sse(struct channelmix *mix, uint32_t n_dst, void * SPA_REST
|
|||
for(n = 0; n < unrolled; n += 4) {
|
||||
ctr = _mm_mul_ps(_mm_load_ps(&sFC[n]), clev);
|
||||
ctr = _mm_add_ps(ctr, _mm_mul_ps(_mm_load_ps(&sLFE[n]), llev));
|
||||
_mm_store_ps(&dFL[n], _mm_mul_ps(_mm_add_ps(_mm_load_ps(&sFL[n]), ctr), vol));
|
||||
_mm_store_ps(&dFR[n], _mm_mul_ps(_mm_add_ps(_mm_load_ps(&sFR[n]), ctr), vol));
|
||||
_mm_store_ps(&dRL[n], _mm_mul_ps(_mm_load_ps(&sSL[n]), vol));
|
||||
_mm_store_ps(&dRR[n], _mm_mul_ps(_mm_load_ps(&sSR[n]), vol));
|
||||
_mm_store_ps(&dFL[n], _mm_mul_ps(_mm_add_ps(_mm_load_ps(&sFL[n]), ctr), v0));
|
||||
_mm_store_ps(&dFR[n], _mm_mul_ps(_mm_add_ps(_mm_load_ps(&sFR[n]), ctr), v1));
|
||||
_mm_store_ps(&dRL[n], _mm_mul_ps(_mm_load_ps(&sSL[n]), v0));
|
||||
_mm_store_ps(&dRR[n], _mm_mul_ps(_mm_load_ps(&sSR[n]), v1));
|
||||
}
|
||||
for(; n < n_samples; n++) {
|
||||
ctr = _mm_mul_ss(_mm_load_ss(&sFC[n]), clev);
|
||||
ctr = _mm_add_ss(ctr, _mm_mul_ss(_mm_load_ss(&sLFE[n]), llev));
|
||||
_mm_store_ss(&dFL[n], _mm_mul_ss(_mm_add_ss(_mm_load_ss(&sFL[n]), ctr), vol));
|
||||
_mm_store_ss(&dFR[n], _mm_mul_ss(_mm_add_ss(_mm_load_ss(&sFR[n]), ctr), vol));
|
||||
_mm_store_ss(&dRL[n], _mm_mul_ss(_mm_load_ss(&sSL[n]), vol));
|
||||
_mm_store_ss(&dRR[n], _mm_mul_ss(_mm_load_ss(&sSR[n]), vol));
|
||||
_mm_store_ss(&dFL[n], _mm_mul_ss(_mm_add_ss(_mm_load_ss(&sFL[n]), ctr), v0));
|
||||
_mm_store_ss(&dFR[n], _mm_mul_ss(_mm_add_ss(_mm_load_ss(&sFR[n]), ctr), v1));
|
||||
_mm_store_ss(&dRL[n], _mm_mul_ss(_mm_load_ss(&sSL[n]), v0));
|
||||
_mm_store_ss(&dRR[n], _mm_mul_ss(_mm_load_ss(&sSR[n]), v1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -162,7 +162,7 @@ static int make_matrix(struct channelmix *mix)
|
|||
uint64_t src_mask = mix->src_mask;
|
||||
uint64_t dst_mask = mix->dst_mask;
|
||||
uint64_t unassigned;
|
||||
uint32_t i, j, matrix_encoding = MATRIX_NORMAL, c;
|
||||
uint32_t i, j, ic, jc, matrix_encoding = MATRIX_NORMAL;
|
||||
float clev = SQRT1_2;
|
||||
float slev = SQRT1_2;
|
||||
float llev = 0.5f;
|
||||
|
|
@ -325,17 +325,17 @@ static int make_matrix(struct channelmix *mix)
|
|||
}
|
||||
}
|
||||
|
||||
c = 0;
|
||||
for (i = 0; i < NUM_CHAN; i++) {
|
||||
for (ic = 0, i = 0; i < NUM_CHAN; i++) {
|
||||
float sum = 0.0f;
|
||||
if ((dst_mask & (1UL << (i + 2))) == 0)
|
||||
continue;
|
||||
for (j = 0; j < NUM_CHAN; j++) {
|
||||
for (jc = 0, j = 0; j < NUM_CHAN; j++) {
|
||||
if ((src_mask & (1UL << (j + 2))) == 0)
|
||||
continue;
|
||||
mix->matrix_orig[c++] = matrix[i][j];
|
||||
mix->matrix_orig[ic][jc++] = matrix[i][j];
|
||||
sum += fabs(matrix[i][j]);
|
||||
}
|
||||
ic++;
|
||||
max = SPA_MAX(max, sum);
|
||||
}
|
||||
return 0;
|
||||
|
|
@ -345,46 +345,56 @@ static void impl_channelmix_set_volume(struct channelmix *mix, float volume, boo
|
|||
uint32_t n_channel_volumes, float *channel_volumes)
|
||||
{
|
||||
float volumes[SPA_AUDIO_MAX_CHANNELS];
|
||||
float vol = mute ? 0.0f : volume, sum;
|
||||
float vol = mute ? 0.0f : volume, sum, t;
|
||||
uint32_t i, j;
|
||||
uint32_t src_chan = mix->src_chan;
|
||||
uint32_t dst_chan = mix->dst_chan;
|
||||
|
||||
/** apply global volume to channels */
|
||||
sum = 0.0;
|
||||
mix->norm = true;
|
||||
for (i = 0; i < n_channel_volumes; i++) {
|
||||
volumes[i] = channel_volumes[i] * vol;
|
||||
if (volumes[i] != 1.0f)
|
||||
mix->norm = false;
|
||||
sum += volumes[i];
|
||||
}
|
||||
mix->volume = sum / n_channel_volumes;
|
||||
|
||||
if (n_channel_volumes == src_chan) {
|
||||
for (i = 0; i < dst_chan; i++) {
|
||||
for (j = 0; j < src_chan; j++) {
|
||||
float v = mix->matrix_orig[i * src_chan + j];
|
||||
mix->matrix[i * src_chan + j] = v * volumes[j];
|
||||
mix->matrix[i][j] = mix->matrix_orig[i][j] * volumes[j];
|
||||
}
|
||||
}
|
||||
} else if (n_channel_volumes == dst_chan) {
|
||||
for (i = 0; i < dst_chan; i++) {
|
||||
for (j = 0; j < src_chan; j++) {
|
||||
float v = mix->matrix_orig[i * src_chan + j];
|
||||
mix->matrix[i * src_chan + j] = v * volumes[i];
|
||||
mix->matrix[i][j] = mix->matrix_orig[i][j] * volumes[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mix->is_identity = dst_chan == src_chan;
|
||||
mix->zero = true;
|
||||
mix->equal = true;
|
||||
mix->identity = dst_chan == src_chan;
|
||||
t = 0.0;
|
||||
|
||||
for (i = 0; i < dst_chan; i++) {
|
||||
for (j = 0; j < src_chan; j++) {
|
||||
float v = mix->matrix[i * src_chan + j];
|
||||
float v = mix->matrix[i][j];
|
||||
spa_log_debug(mix->log, "%d %d: %f", i, j, v);
|
||||
if (i == 0 && j == 0)
|
||||
t = v;
|
||||
else if (t != v)
|
||||
mix->equal = false;
|
||||
if (v != 0.0)
|
||||
mix->zero = false;
|
||||
if ((i == j && v != 1.0f) ||
|
||||
(i != j && v != 0.0f))
|
||||
mix->is_identity = false;
|
||||
mix->identity = false;
|
||||
}
|
||||
}
|
||||
spa_log_debug(mix->log, "vol:%f, identity:%d", mix->volume, mix->is_identity);
|
||||
spa_log_debug(mix->log, "zero:%d norm:%d identity:%d", mix->zero, mix->norm, mix->identity);
|
||||
}
|
||||
|
||||
static void impl_channelmix_free(struct channelmix *mix)
|
||||
|
|
|
|||
|
|
@ -49,11 +49,12 @@ struct channelmix {
|
|||
|
||||
struct spa_log *log;
|
||||
|
||||
unsigned int is_identity:1;
|
||||
float volume;
|
||||
float matrix_orig[SPA_AUDIO_MAX_CHANNELS * SPA_AUDIO_MAX_CHANNELS];
|
||||
|
||||
float matrix[SPA_AUDIO_MAX_CHANNELS * SPA_AUDIO_MAX_CHANNELS];
|
||||
unsigned int zero:1; /* all zero components */
|
||||
unsigned int identity:1; /* identity matrix */
|
||||
unsigned int norm:1; /* all normal values */
|
||||
unsigned int equal:1; /* all values are equal */
|
||||
float matrix_orig[SPA_AUDIO_MAX_CHANNELS][SPA_AUDIO_MAX_CHANNELS];
|
||||
float matrix[SPA_AUDIO_MAX_CHANNELS][SPA_AUDIO_MAX_CHANNELS];
|
||||
|
||||
void (*process) (struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples);
|
||||
|
|
|
|||
|
|
@ -245,11 +245,11 @@ static int setup_convert(struct impl *this,
|
|||
|
||||
emit_params_changed(this);
|
||||
|
||||
spa_log_info(this->log, NAME " %p: got channelmix features %08x:%08x %d",
|
||||
spa_log_info(this->log, NAME " %p: got channelmix features %08x:%08x identity:%d",
|
||||
this, this->cpu_flags, this->mix.cpu_flags,
|
||||
this->mix.is_identity);
|
||||
this->mix.identity);
|
||||
|
||||
this->is_passthrough = this->mix.is_identity;
|
||||
this->is_passthrough = this->mix.identity;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -916,9 +916,7 @@ static int impl_node_process(void *object)
|
|||
void *dst_datas[n_dst_datas];
|
||||
bool is_passthrough;
|
||||
|
||||
is_passthrough = this->is_passthrough &&
|
||||
this->mix.volume == 1.0f &&
|
||||
this->mix.is_identity;
|
||||
is_passthrough = this->is_passthrough && this->mix.identity;
|
||||
|
||||
n_samples = sb->datas[0].chunk->size / inport->stride;
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue