mirror of
https://gitlab.freedesktop.org/pipewire/pipewire.git
synced 2025-11-06 13:30:01 -05:00
audioconvert: implement passthrough
Add some const and SPA_RESTRICT to methods When the input and output is the same, work in passthrough mode where we simply copy place the input pointer onto the output buffer without doing a memcpy. Do memcpy when the resampler is not active.
This commit is contained in:
parent
74cf412f47
commit
0505f2dc98
9 changed files with 247 additions and 167 deletions
|
|
@ -25,13 +25,14 @@
|
|||
#include <xmmintrin.h>
|
||||
|
||||
static void
|
||||
channelmix_copy_sse(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_copy_sse(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n, unrolled;
|
||||
float **d = (float **)dst;
|
||||
float **s = (float **)src;
|
||||
__m128 vol = _mm_set1_ps(v);
|
||||
const float **s = (const float **)src;
|
||||
const __m128 vol = _mm_set1_ps(v);
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
|
|
@ -43,16 +44,26 @@ channelmix_copy_sse(void *data, int n_dst, void *dst[n_dst],
|
|||
}
|
||||
else {
|
||||
for (i = 0; i < n_dst; i++) {
|
||||
float *di = d[i], *si = s[i];
|
||||
float *di = d[i];
|
||||
const float *si = s[i];
|
||||
__m128 t[4];
|
||||
|
||||
if (SPA_IS_ALIGNED(di, 16) &&
|
||||
SPA_IS_ALIGNED(si, 16))
|
||||
unrolled = n_samples / 4;
|
||||
unrolled = n_samples / 16;
|
||||
else
|
||||
unrolled = 0;
|
||||
|
||||
for(n = 0; unrolled--; n += 4)
|
||||
_mm_store_ps(&di[n], _mm_mul_ps(_mm_load_ps(&si[n]), vol));
|
||||
for(n = 0; unrolled--; n += 16) {
|
||||
t[0] = _mm_load_ps(&si[n]);
|
||||
t[1] = _mm_load_ps(&si[n+4]);
|
||||
t[2] = _mm_load_ps(&si[n+8]);
|
||||
t[3] = _mm_load_ps(&si[n+12]);
|
||||
_mm_store_ps(&di[n], _mm_mul_ps(t[0], vol));
|
||||
_mm_store_ps(&di[n+4], _mm_mul_ps(t[1], vol));
|
||||
_mm_store_ps(&di[n+8], _mm_mul_ps(t[2], vol));
|
||||
_mm_store_ps(&di[n+12], _mm_mul_ps(t[3], vol));
|
||||
}
|
||||
for(; n < n_samples; n++)
|
||||
_mm_store_ss(&di[n], _mm_mul_ss(_mm_load_ss(&si[n]), vol));
|
||||
}
|
||||
|
|
@ -60,15 +71,16 @@ channelmix_copy_sse(void *data, int n_dst, void *dst[n_dst],
|
|||
}
|
||||
|
||||
static void
|
||||
channelmix_f32_2_4_sse(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_2_4_sse(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n, unrolled;
|
||||
float **d = (float **)dst;
|
||||
float **s = (float **)src;
|
||||
__m128 vol = _mm_set1_ps(v);
|
||||
const float **s = (const float **)src;
|
||||
const __m128 vol = _mm_set1_ps(v);
|
||||
__m128 in;
|
||||
float *sFL = s[0], *sFR = s[1];
|
||||
const float *sFL = s[0], *sFR = s[1];
|
||||
float *dFL = d[0], *dFR = d[1], *dRL = d[2], *dRR = d[3];
|
||||
|
||||
if (SPA_IS_ALIGNED(sFL, 16) &&
|
||||
|
|
@ -125,19 +137,20 @@ channelmix_f32_2_4_sse(void *data, int n_dst, void *dst[n_dst],
|
|||
|
||||
/* FL+FR+FC+LFE+SL+SR -> FL+FR */
|
||||
static void
|
||||
channelmix_f32_5p1_2_sse(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_5p1_2_sse(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int n, unrolled;
|
||||
float **d = (float **) dst;
|
||||
float **s = (float **) src;
|
||||
float *m = matrix;
|
||||
__m128 clev = _mm_set1_ps(m[2]);
|
||||
__m128 llev = _mm_set1_ps(m[3]);
|
||||
__m128 slev = _mm_set1_ps(m[4]);
|
||||
__m128 vol = _mm_set1_ps(v);
|
||||
const float **s = (const float **) src;
|
||||
const float *m = matrix;
|
||||
const __m128 clev = _mm_set1_ps(m[2]);
|
||||
const __m128 llev = _mm_set1_ps(m[3]);
|
||||
const __m128 slev = _mm_set1_ps(m[4]);
|
||||
const __m128 vol = _mm_set1_ps(v);
|
||||
__m128 in, ctr;
|
||||
float *sFL = s[0], *sFR = s[1], *sFC = s[2], *sLFE = s[3], *sSL = s[4], *sSR = s[5];
|
||||
const float *sFL = s[0], *sFR = s[1], *sFC = s[2], *sLFE = s[3], *sSL = s[4], *sSR = s[5];
|
||||
float *dFL = d[0], *dFR = d[1];
|
||||
|
||||
if (SPA_IS_ALIGNED(sFL, 16) &&
|
||||
|
|
@ -216,16 +229,17 @@ channelmix_f32_5p1_2_sse(void *data, int n_dst, void *dst[n_dst],
|
|||
|
||||
/* FL+FR+FC+LFE+SL+SR -> FL+FR+FC+LFE*/
|
||||
static void
|
||||
channelmix_f32_5p1_3p1_sse(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_5p1_3p1_sse(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n, unrolled;
|
||||
float **d = (float **) dst;
|
||||
float **s = (float **) src;
|
||||
__m128 mix = _mm_set1_ps(v * 0.5f);
|
||||
__m128 vol = _mm_set1_ps(v);
|
||||
const float **s = (const float **) src;
|
||||
const __m128 mix = _mm_set1_ps(v * 0.5f);
|
||||
const __m128 vol = _mm_set1_ps(v);
|
||||
__m128 avg[2];
|
||||
float *sFL = s[0], *sFR = s[1], *sFC = s[2], *sLFE = s[3], *sSL = s[4], *sSR = s[5];
|
||||
const float *sFL = s[0], *sFR = s[1], *sFC = s[2], *sLFE = s[3], *sSL = s[4], *sSR = s[5];
|
||||
float *dFL = d[0], *dFR = d[1], *dFC = d[2], *dLFE = d[3];
|
||||
|
||||
if (SPA_IS_ALIGNED(sFL, 16) &&
|
||||
|
|
@ -298,18 +312,19 @@ channelmix_f32_5p1_3p1_sse(void *data, int n_dst, void *dst[n_dst],
|
|||
|
||||
/* FL+FR+FC+LFE+SL+SR -> FL+FR+RL+RR*/
|
||||
static void
|
||||
channelmix_f32_5p1_4_sse(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_5p1_4_sse(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n, unrolled;
|
||||
float **d = (float **) dst;
|
||||
float **s = (float **) src;
|
||||
float *m = matrix;
|
||||
__m128 clev = _mm_set1_ps(m[2]);
|
||||
__m128 llev = _mm_set1_ps(m[3]);
|
||||
__m128 vol = _mm_set1_ps(v);
|
||||
const float **s = (const float **) src;
|
||||
const float *m = matrix;
|
||||
const __m128 clev = _mm_set1_ps(m[2]);
|
||||
const __m128 llev = _mm_set1_ps(m[3]);
|
||||
const __m128 vol = _mm_set1_ps(v);
|
||||
__m128 ctr;
|
||||
float *sFL = s[0], *sFR = s[1], *sFC = s[2], *sLFE = s[3], *sSL = s[4], *sSR = s[5];
|
||||
const float *sFL = s[0], *sFR = s[1], *sFC = s[2], *sLFE = s[3], *sSL = s[4], *sSR = s[5];
|
||||
float *dFL = d[0], *dFR = d[1], *dRL = d[2], *dRR = d[3];
|
||||
|
||||
if (SPA_IS_ALIGNED(sFL, 16) &&
|
||||
|
|
|
|||
|
|
@ -36,12 +36,13 @@
|
|||
#endif
|
||||
|
||||
static void
|
||||
channelmix_copy(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_copy(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n;
|
||||
float **d = (float **)dst;
|
||||
float **s = (float **)src;
|
||||
const float **s = (const float **)src;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
|
|
@ -61,13 +62,14 @@ channelmix_copy(void *data, int n_dst, void *dst[n_dst],
|
|||
#define _M(ch) (1UL << SPA_AUDIO_CHANNEL_ ## ch)
|
||||
|
||||
static void
|
||||
channelmix_f32_n_m(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_n_m(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, j, n;
|
||||
float **d = (float **) dst;
|
||||
float **s = (float **) src;
|
||||
float *m = matrix;
|
||||
const float **s = (const float **) src;
|
||||
const float *m = matrix;
|
||||
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
for (i = 0; i < n_dst; i++) {
|
||||
|
|
@ -83,12 +85,13 @@ channelmix_f32_n_m(void *data, int n_dst, void *dst[n_dst],
|
|||
#define MASK_STEREO _M(FL)|_M(FR)|_M(UNKNOWN)
|
||||
|
||||
static void
|
||||
channelmix_f32_1_2(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_1_2(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int n;
|
||||
float **d = (float **)dst;
|
||||
float **s = (float **)src;
|
||||
const float **s = (const float **)src;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
memset(d[0], 0, n_samples * sizeof(float));
|
||||
|
|
@ -105,12 +108,13 @@ channelmix_f32_1_2(void *data, int n_dst, void *dst[n_dst],
|
|||
}
|
||||
|
||||
static void
|
||||
channelmix_f32_2_1(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_2_1(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int n;
|
||||
float **d = (float **)dst;
|
||||
float **s = (float **)src;
|
||||
const float **s = (const float **)src;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
memset(d[0], 0, n_samples * sizeof(float));
|
||||
|
|
@ -123,12 +127,13 @@ channelmix_f32_2_1(void *data, int n_dst, void *dst[n_dst],
|
|||
}
|
||||
|
||||
static void
|
||||
channelmix_f32_4_1(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_4_1(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int n;
|
||||
float **d = (float **)dst;
|
||||
float **s = (float **)src;
|
||||
const float **s = (const float **)src;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
memset(d[0], 0, n_samples * sizeof(float));
|
||||
|
|
@ -141,12 +146,13 @@ channelmix_f32_4_1(void *data, int n_dst, void *dst[n_dst],
|
|||
}
|
||||
|
||||
static void
|
||||
channelmix_f32_3p1_1(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_3p1_1(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int n;
|
||||
float **d = (float **)dst;
|
||||
float **s = (float **)src;
|
||||
const float **s = (const float **)src;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
memset(d[0], 0, n_samples * sizeof(float));
|
||||
|
|
@ -162,12 +168,13 @@ channelmix_f32_3p1_1(void *data, int n_dst, void *dst[n_dst],
|
|||
#define MASK_QUAD _M(FL)|_M(FR)|_M(RL)|_M(RR)|_M(UNKNOWN)
|
||||
|
||||
static void
|
||||
channelmix_f32_2_4(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_2_4(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n;
|
||||
float **d = (float **)dst;
|
||||
float **s = (float **)src;
|
||||
const float **s = (const float **)src;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
|
|
@ -189,12 +196,13 @@ channelmix_f32_2_4(void *data, int n_dst, void *dst[n_dst],
|
|||
|
||||
#define MASK_3_1 _M(FL)|_M(FR)|_M(FC)|_M(LFE)
|
||||
static void
|
||||
channelmix_f32_2_3p1(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_2_3p1(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n;
|
||||
float **d = (float **)dst;
|
||||
float **s = (float **)src;
|
||||
const float **s = (const float **)src;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
|
|
@ -221,12 +229,13 @@ channelmix_f32_2_3p1(void *data, int n_dst, void *dst[n_dst],
|
|||
|
||||
#define MASK_5_1 _M(FL)|_M(FR)|_M(FC)|_M(LFE)|_M(SL)|_M(SR)|_M(RL)|_M(RR)
|
||||
static void
|
||||
channelmix_f32_2_5p1(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_2_5p1(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n;
|
||||
float **d = (float **)dst;
|
||||
float **s = (float **)src;
|
||||
const float **s = (const float **)src;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
|
|
@ -253,13 +262,14 @@ channelmix_f32_2_5p1(void *data, int n_dst, void *dst[n_dst],
|
|||
|
||||
/* FL+FR+FC+LFE+SL+SR -> FL+FR */
|
||||
static void
|
||||
channelmix_f32_5p1_2(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_5p1_2(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int n;
|
||||
float **d = (float **) dst;
|
||||
float **s = (float **) src;
|
||||
float *m = matrix;
|
||||
const float **s = (const float **) src;
|
||||
const float *m = matrix;
|
||||
const float clev = m[2];
|
||||
const float llev = m[3];
|
||||
const float slev = m[4];
|
||||
|
|
@ -286,12 +296,13 @@ channelmix_f32_5p1_2(void *data, int n_dst, void *dst[n_dst],
|
|||
|
||||
/* FL+FR+FC+LFE+SL+SR -> FL+FR+FC+LFE*/
|
||||
static void
|
||||
channelmix_f32_5p1_3p1(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_5p1_3p1(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n;
|
||||
float **d = (float **) dst;
|
||||
float **s = (float **) src;
|
||||
const float **s = (const float **) src;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
|
|
@ -310,13 +321,14 @@ channelmix_f32_5p1_3p1(void *data, int n_dst, void *dst[n_dst],
|
|||
|
||||
/* FL+FR+FC+LFE+SL+SR -> FL+FR+RL+RR*/
|
||||
static void
|
||||
channelmix_f32_5p1_4(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_5p1_4(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n;
|
||||
float **d = (float **) dst;
|
||||
float **s = (float **) src;
|
||||
float *m = matrix;
|
||||
const float **s = (const float **) src;
|
||||
const float *m = matrix;
|
||||
const float clev = m[2];
|
||||
const float llev = m[3];
|
||||
|
||||
|
|
@ -326,7 +338,7 @@ channelmix_f32_5p1_4(void *data, int n_dst, void *dst[n_dst],
|
|||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
const float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
d[0][n] = s[0][n] + ctr;
|
||||
d[1][n] = s[1][n] + ctr;
|
||||
d[2][n] = s[4][n];
|
||||
|
|
@ -335,7 +347,7 @@ channelmix_f32_5p1_4(void *data, int n_dst, void *dst[n_dst],
|
|||
}
|
||||
else {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
const float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
d[0][n] = (s[0][n] + ctr) * v;
|
||||
d[1][n] = (s[1][n] + ctr) * v;
|
||||
d[2][n] = s[4][n] * v;
|
||||
|
|
@ -348,13 +360,14 @@ channelmix_f32_5p1_4(void *data, int n_dst, void *dst[n_dst],
|
|||
|
||||
/* FL+FR+FC+LFE+SL+SR+RL+RR -> FL+FR */
|
||||
static void
|
||||
channelmix_f32_7p1_2(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_7p1_2(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int n;
|
||||
float **d = (float **) dst;
|
||||
float **s = (float **) src;
|
||||
float *m = matrix;
|
||||
const float **s = (const float **) src;
|
||||
const float *m = matrix;
|
||||
const float clev = m[2];
|
||||
const float llev = m[3];
|
||||
const float slev = m[4];
|
||||
|
|
@ -381,12 +394,13 @@ channelmix_f32_7p1_2(void *data, int n_dst, void *dst[n_dst],
|
|||
|
||||
/* FL+FR+FC+LFE+SL+SR+RL+RR -> FL+FR+FC+LFE*/
|
||||
static void
|
||||
channelmix_f32_7p1_3p1(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_7p1_3p1(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n;
|
||||
float **d = (float **) dst;
|
||||
float **s = (float **) src;
|
||||
const float **s = (const float **) src;
|
||||
|
||||
if (v <= VOLUME_MIN) {
|
||||
for (i = 0; i < n_dst; i++)
|
||||
|
|
@ -405,13 +419,14 @@ channelmix_f32_7p1_3p1(void *data, int n_dst, void *dst[n_dst],
|
|||
|
||||
/* FL+FR+FC+LFE+SL+SR+RL+RR -> FL+FR+RL+RR*/
|
||||
static void
|
||||
channelmix_f32_7p1_4(void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src], void *matrix, float v, int n_samples)
|
||||
channelmix_f32_7p1_4(void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void *matrix, float v, int n_samples)
|
||||
{
|
||||
int i, n;
|
||||
float **d = (float **) dst;
|
||||
float **s = (float **) src;
|
||||
float *m = matrix;
|
||||
const float **s = (const float **) src;
|
||||
const float *m = matrix;
|
||||
const float clev = m[2];
|
||||
const float llev = m[3];
|
||||
const float slev = m[4];
|
||||
|
|
@ -422,9 +437,9 @@ channelmix_f32_7p1_4(void *data, int n_dst, void *dst[n_dst],
|
|||
}
|
||||
else if (v == VOLUME_NORM) {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
float sl = s[4][n] * slev;
|
||||
float sr = s[5][n] * slev;
|
||||
const float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
const float sl = s[4][n] * slev;
|
||||
const float sr = s[5][n] * slev;
|
||||
d[0][n] = s[0][n] + ctr + sl;
|
||||
d[1][n] = s[1][n] + ctr + sr;
|
||||
d[2][n] = s[6][n] + sl;
|
||||
|
|
@ -433,9 +448,9 @@ channelmix_f32_7p1_4(void *data, int n_dst, void *dst[n_dst],
|
|||
}
|
||||
else {
|
||||
for (n = 0; n < n_samples; n++) {
|
||||
float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
float sl = s[4][n] * slev;
|
||||
float sr = s[5][n] * slev;
|
||||
const float ctr = s[2][n] * clev + s[3][n] * llev;
|
||||
const float sl = s[4][n] * slev;
|
||||
const float sr = s[5][n] * slev;
|
||||
d[0][n] = (s[0][n] + ctr + sl) * v;
|
||||
d[1][n] = (s[1][n] + ctr + sr) * v;
|
||||
d[2][n] = (s[6][n] + sl) * v;
|
||||
|
|
@ -444,9 +459,9 @@ channelmix_f32_7p1_4(void *data, int n_dst, void *dst[n_dst],
|
|||
}
|
||||
}
|
||||
|
||||
typedef void (*channelmix_func_t) (void *data, int n_dst, void *dst[n_dst],
|
||||
int n_src, const void *src[n_src],
|
||||
void *matrix, float v, int n_samples);
|
||||
typedef void (*channelmix_func_t) (void *data, int n_dst, void * SPA_RESTRICT dst[n_dst],
|
||||
int n_src, const void * SPA_RESTRICT src[n_src],
|
||||
const void * SPA_RESTRICT matrix, float v, int n_samples);
|
||||
|
||||
|
||||
#define ANY ((uint32_t)-1)
|
||||
|
|
|
|||
|
|
@ -43,7 +43,8 @@
|
|||
#define DEFAULT_RATE 44100
|
||||
#define DEFAULT_CHANNELS 2
|
||||
|
||||
#define MAX_BUFFERS 32
|
||||
#define MAX_BUFFERS 32
|
||||
#define MAX_DATAS 32
|
||||
|
||||
struct impl;
|
||||
|
||||
|
|
@ -68,6 +69,7 @@ struct buffer {
|
|||
struct spa_list link;
|
||||
struct spa_buffer *outbuf;
|
||||
struct spa_meta_header *h;
|
||||
void *datas[MAX_DATAS];
|
||||
};
|
||||
|
||||
struct port {
|
||||
|
|
@ -912,7 +914,7 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
{
|
||||
struct impl *this;
|
||||
struct port *port;
|
||||
uint32_t i, size = SPA_ID_INVALID;
|
||||
uint32_t i, j, size = SPA_ID_INVALID;
|
||||
|
||||
spa_return_val_if_fail(node != NULL, -EINVAL);
|
||||
|
||||
|
|
@ -930,6 +932,7 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
|
||||
for (i = 0; i < n_buffers; i++) {
|
||||
struct buffer *b;
|
||||
uint32_t n_datas = buffers[i]->n_datas;
|
||||
struct spa_data *d = buffers[i]->datas;
|
||||
|
||||
b = &port->buffers[i];
|
||||
|
|
@ -938,18 +941,24 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
b->outbuf = buffers[i];
|
||||
b->h = spa_buffer_find_meta_data(buffers[i], SPA_META_Header, sizeof(*b->h));
|
||||
|
||||
if (size == SPA_ID_INVALID)
|
||||
size = d[0].maxsize;
|
||||
else
|
||||
if (size != d[0].maxsize)
|
||||
for (j = 0; j < n_datas; j++) {
|
||||
if (size == SPA_ID_INVALID)
|
||||
size = d[j].maxsize;
|
||||
else if (size != d[j].maxsize)
|
||||
return -EINVAL;
|
||||
|
||||
if (!((d[0].type == SPA_DATA_MemPtr ||
|
||||
d[0].type == SPA_DATA_MemFd ||
|
||||
d[0].type == SPA_DATA_DmaBuf) && d[0].data != NULL)) {
|
||||
spa_log_error(this->log, NAME " %p: invalid memory on buffer %p", this,
|
||||
buffers[i]);
|
||||
return -EINVAL;
|
||||
if (!((d[j].type == SPA_DATA_MemPtr ||
|
||||
d[j].type == SPA_DATA_MemFd ||
|
||||
d[j].type == SPA_DATA_DmaBuf) && d[j].data != NULL)) {
|
||||
spa_log_error(this->log, NAME " %p: invalid memory on buffer %p", this,
|
||||
buffers[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!SPA_IS_ALIGNED(d[j].data, 16)) {
|
||||
spa_log_warn(this->log, NAME " %p: memory %d on buffer %d not aligned",
|
||||
this, j, i);
|
||||
}
|
||||
b->datas[j] = d[j].data;
|
||||
}
|
||||
if (direction == SPA_DIRECTION_OUTPUT)
|
||||
spa_list_append(&port->queue, &b->link);
|
||||
|
|
@ -1113,20 +1122,25 @@ static int impl_node_process(struct spa_node *node)
|
|||
uint32_t n_dst_datas = db->n_datas;
|
||||
const void *src_datas[n_src_datas];
|
||||
void *dst_datas[n_dst_datas];
|
||||
bool is_passthrough;
|
||||
float v;
|
||||
|
||||
v = this->props.mute ? 0.0f : this->props.volume;
|
||||
is_passthrough = v == 1.0f;
|
||||
n_samples = sb->datas[0].chunk->size / inport->stride;
|
||||
|
||||
for (i = 0; i < n_src_datas; i++)
|
||||
src_datas[i] = sb->datas[i].data;
|
||||
for (i = 0; i < n_dst_datas; i++) {
|
||||
dst_datas[i] = db->datas[i].data;
|
||||
dst_datas[i] = is_passthrough ? (void*)src_datas[i] : dbuf->datas[i];
|
||||
db->datas[i].data = dst_datas[i];
|
||||
db->datas[i].chunk->size = n_samples * outport->stride;
|
||||
}
|
||||
|
||||
this->convert(this, n_dst_datas, dst_datas,
|
||||
n_src_datas, src_datas,
|
||||
this->matrix, this->props.mute ? 0.0f : this->props.volume,
|
||||
n_samples);
|
||||
if (!is_passthrough)
|
||||
this->convert(this, n_dst_datas, dst_datas,
|
||||
n_src_datas, src_datas,
|
||||
this->matrix, v, n_samples);
|
||||
}
|
||||
|
||||
outio->status = SPA_STATUS_HAVE_BUFFER;
|
||||
|
|
|
|||
|
|
@ -55,7 +55,6 @@
|
|||
#define S32_TO_F32(v) S24_TO_F32((v) >> 8)
|
||||
#define F32_TO_S32(v) (F32_TO_S24(v) << 8)
|
||||
|
||||
|
||||
static inline int32_t read_s24(const void *src)
|
||||
{
|
||||
const int8_t *s = src;
|
||||
|
|
|
|||
|
|
@ -44,7 +44,8 @@
|
|||
#define DEFAULT_RATE 48000
|
||||
#define DEFAULT_CHANNELS 2
|
||||
|
||||
#define MAX_BUFFERS 64
|
||||
#define MAX_BUFFERS 64
|
||||
#define MAX_DATAS 32
|
||||
#define MAX_PORTS 128
|
||||
|
||||
#define PROP_DEFAULT_TRUNCATE false
|
||||
|
|
@ -70,6 +71,7 @@ struct buffer {
|
|||
struct spa_list link;
|
||||
struct spa_buffer *outbuf;
|
||||
struct spa_meta_header *h;
|
||||
void *datas[MAX_DATAS];
|
||||
};
|
||||
|
||||
struct port {
|
||||
|
|
@ -119,6 +121,7 @@ struct impl {
|
|||
|
||||
uint32_t cpu_flags;
|
||||
convert_func_t convert;
|
||||
int is_passthrough:1;
|
||||
};
|
||||
|
||||
#define CHECK_PORT(this,d,id) (id == 0)
|
||||
|
|
@ -187,6 +190,7 @@ static int setup_convert(struct impl *this)
|
|||
this->cpu_flags, conv->features);
|
||||
|
||||
this->convert = conv->func;
|
||||
this->is_passthrough = src_fmt == dst_fmt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -664,16 +668,15 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (size == SPA_ID_INVALID)
|
||||
size = d[0].maxsize;
|
||||
else
|
||||
if (size != d[0].maxsize) {
|
||||
spa_log_error(this->log, NAME " %p: expected size %d on buffer %d", this,
|
||||
size, i);
|
||||
for (j = 0; j < n_datas; j++) {
|
||||
if (size == SPA_ID_INVALID)
|
||||
size = d[j].maxsize;
|
||||
else if (size != d[j].maxsize) {
|
||||
spa_log_error(this->log, NAME " %p: expected size %d on buffer %d",
|
||||
this, size, i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (j = 0; j < n_datas; j++) {
|
||||
if (!((d[j].type == SPA_DATA_MemPtr ||
|
||||
d[j].type == SPA_DATA_MemFd ||
|
||||
d[j].type == SPA_DATA_DmaBuf) && d[j].data != NULL)) {
|
||||
|
|
@ -685,6 +688,7 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
spa_log_warn(this->log, NAME " %p: memory %d on buffer %d not aligned",
|
||||
this, j, i);
|
||||
}
|
||||
b->datas[j] = d[j].data;
|
||||
}
|
||||
|
||||
if (direction == SPA_DIRECTION_OUTPUT)
|
||||
|
|
@ -794,7 +798,7 @@ static int impl_node_process(struct spa_node *node)
|
|||
struct spa_buffer *inb, *outb;
|
||||
const void **src_datas;
|
||||
void **dst_datas;
|
||||
uint32_t i, n_src_datas, n_dst_datas;
|
||||
uint32_t i, n_src_datas, n_dst_datas, n_datas;
|
||||
int res = 0;
|
||||
uint32_t n_samples, size, maxsize, offs;
|
||||
|
||||
|
|
@ -853,16 +857,22 @@ static int impl_node_process(struct spa_node *node)
|
|||
maxsize = SPA_MIN(outport->ctrl->max_size, maxsize);
|
||||
n_samples = SPA_MIN(n_samples, maxsize / outport->stride);
|
||||
|
||||
spa_log_trace(this->log, NAME " %p: n_src:%d n_dst:%d size:%d maxsize:%d n_samples:%d",
|
||||
this, n_src_datas, n_dst_datas, size, maxsize, n_samples);
|
||||
|
||||
n_datas = SPA_MAX(n_src_datas, n_dst_datas);
|
||||
|
||||
for (i = 0; i < n_dst_datas; i++) {
|
||||
dst_datas[i] = outb->datas[this->remap[i]].data;
|
||||
dst_datas[i] = this->is_passthrough ?
|
||||
(void*)src_datas[i] :
|
||||
outbuf->datas[this->remap[i]];
|
||||
outb->datas[this->remap[i]].data = dst_datas[i];
|
||||
outb->datas[i].chunk->offset = 0;
|
||||
outb->datas[i].chunk->size = n_samples * outport->stride;
|
||||
}
|
||||
|
||||
spa_log_trace(this->log, NAME " %p: n_src:%d n_dst:%d size:%d maxsize:%d n_samples:%d",
|
||||
this, n_src_datas, n_dst_datas, size, maxsize, n_samples);
|
||||
|
||||
this->convert(this, dst_datas, src_datas, SPA_MAX(n_src_datas, n_dst_datas), n_samples);
|
||||
if (!this->is_passthrough)
|
||||
this->convert(this, dst_datas, src_datas, n_datas, n_samples);
|
||||
|
||||
inio->status = SPA_STATUS_NEED_BUFFER;
|
||||
res |= SPA_STATUS_NEED_BUFFER;
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@
|
|||
|
||||
#define MAX_SAMPLES 2048
|
||||
#define MAX_BUFFERS 64
|
||||
#define MAX_DATAS 32
|
||||
#define MAX_PORTS 128
|
||||
|
||||
struct buffer {
|
||||
|
|
@ -54,6 +55,7 @@ struct buffer {
|
|||
uint32_t flags;
|
||||
struct spa_list link;
|
||||
struct spa_buffer *buf;
|
||||
void *datas[MAX_DATAS];
|
||||
};
|
||||
|
||||
struct port {
|
||||
|
|
@ -95,16 +97,16 @@ struct impl {
|
|||
struct spa_hook_list hooks;
|
||||
|
||||
uint32_t port_count;
|
||||
uint32_t monitor_count;
|
||||
struct port in_ports[MAX_PORTS];
|
||||
struct port out_ports[MAX_PORTS + 1];
|
||||
uint32_t monitor_count;
|
||||
|
||||
bool started;
|
||||
uint32_t cpu_flags;
|
||||
convert_func_t convert;
|
||||
|
||||
bool monitor;
|
||||
bool have_profile;
|
||||
uint32_t cpu_flags;
|
||||
int is_passthrough:1;
|
||||
int started:1;
|
||||
int monitor:1;
|
||||
int have_profile:1;
|
||||
|
||||
float empty[MAX_SAMPLES + 15];
|
||||
};
|
||||
|
|
@ -563,6 +565,7 @@ static int setup_convert(struct impl *this)
|
|||
this->cpu_flags, conv->features);
|
||||
|
||||
this->convert = conv->func;
|
||||
this->is_passthrough = src_fmt == dst_fmt;
|
||||
return 0;
|
||||
}
|
||||
return -ENOTSUP;
|
||||
|
|
@ -784,9 +787,11 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
this, j, i, d[j].type, d[j].data);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!SPA_IS_ALIGNED(d[j].data, 16))
|
||||
if (!SPA_IS_ALIGNED(d[j].data, 16)) {
|
||||
spa_log_warn(this->log, NAME " %p: memory %d on buffer %d not aligned",
|
||||
this, j, i);
|
||||
}
|
||||
b->datas[j] = d[j].data;
|
||||
}
|
||||
|
||||
if (direction == SPA_DIRECTION_OUTPUT)
|
||||
|
|
@ -907,7 +912,7 @@ static inline int handle_monitor(struct impl *this, const void *data, int n_samp
|
|||
size = SPA_MIN(dd->maxsize, n_samples * outport->stride);
|
||||
dd->chunk->offset = 0;
|
||||
dd->chunk->size = size;
|
||||
memcpy(dd->data, data, size);
|
||||
dd->data = (void*)data;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
|
@ -977,14 +982,15 @@ static int impl_node_process(struct spa_node *node)
|
|||
handle_monitor(this, src_datas[i], n_samples, GET_OUT_PORT(this, i + 1));
|
||||
|
||||
for (i = 0; i < n_dst_datas; i++) {
|
||||
dst_datas[i] = dbuf->buf->datas[i].data;
|
||||
dst_datas[i] = this->is_passthrough ? (void*)src_datas[i] : dbuf->datas[i];
|
||||
dbuf->buf->datas[i].data = dst_datas[i];
|
||||
dbuf->buf->datas[i].chunk->offset = 0;
|
||||
dbuf->buf->datas[i].chunk->size = n_samples * outport->stride;
|
||||
spa_log_trace(this->log, NAME " %p %p %d", this, dst_datas[i],
|
||||
n_samples * outport->stride);
|
||||
}
|
||||
|
||||
this->convert(this, dst_datas, src_datas, SPA_MAX(n_dst_datas, n_src_datas), n_samples);
|
||||
if (!this->is_passthrough)
|
||||
this->convert(this, dst_datas, src_datas, SPA_MAX(n_dst_datas, n_src_datas), n_samples);
|
||||
|
||||
return res | SPA_STATUS_HAVE_BUFFER;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,7 +40,13 @@ static void impl_speex_update_rate(struct resample *r, double rate)
|
|||
static void impl_speex_process(struct resample *r, int channel,
|
||||
void *src, uint32_t *in_len, void *dst, uint32_t *out_len)
|
||||
{
|
||||
speex_resampler_process_float(r->data, channel, src, in_len, dst, out_len);
|
||||
if (r->i_rate == r->o_rate) {
|
||||
*out_len = *in_len = SPA_MIN(*in_len, *out_len);
|
||||
spa_memcpy(dst, src, *out_len * sizeof(float));
|
||||
}
|
||||
else {
|
||||
speex_resampler_process_float(r->data, channel, src, in_len, dst, out_len);
|
||||
}
|
||||
}
|
||||
|
||||
static void impl_speex_reset (struct resample *r)
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@
|
|||
#define DEFAULT_RATE 44100
|
||||
#define DEFAULT_CHANNELS 2
|
||||
|
||||
#define MAX_BUFFERS 32
|
||||
#define MAX_BUFFERS 32
|
||||
|
||||
struct impl;
|
||||
|
||||
|
|
@ -562,7 +562,7 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
{
|
||||
struct impl *this;
|
||||
struct port *port;
|
||||
uint32_t i, size = SPA_ID_INVALID;
|
||||
uint32_t i, j, size = SPA_ID_INVALID;
|
||||
|
||||
spa_return_val_if_fail(node != NULL, -EINVAL);
|
||||
|
||||
|
|
@ -588,19 +588,22 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
b->outbuf = buffers[i];
|
||||
b->h = spa_buffer_find_meta_data(buffers[i], SPA_META_Header, sizeof(*b->h));
|
||||
|
||||
if (size == SPA_ID_INVALID)
|
||||
size = d[0].maxsize;
|
||||
else
|
||||
if (size != d[0].maxsize)
|
||||
return -EINVAL;
|
||||
for (j = 0; j < buffers[i]->n_datas; j++) {
|
||||
if (size == SPA_ID_INVALID)
|
||||
size = d[j].maxsize;
|
||||
else
|
||||
if (size != d[j].maxsize)
|
||||
return -EINVAL;
|
||||
|
||||
if (!((d[0].type == SPA_DATA_MemPtr ||
|
||||
d[0].type == SPA_DATA_MemFd ||
|
||||
d[0].type == SPA_DATA_DmaBuf) && d[0].data != NULL)) {
|
||||
spa_log_error(this->log, NAME " %p: invalid memory on buffer %p", this,
|
||||
buffers[i]);
|
||||
return -EINVAL;
|
||||
if (!((d[j].type == SPA_DATA_MemPtr ||
|
||||
d[j].type == SPA_DATA_MemFd ||
|
||||
d[j].type == SPA_DATA_DmaBuf) && d[j].data != NULL)) {
|
||||
spa_log_error(this->log, NAME " %p: invalid memory on buffer %p", this,
|
||||
buffers[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (direction == SPA_DIRECTION_OUTPUT)
|
||||
spa_list_append(&port->queue, &b->link);
|
||||
else
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@
|
|||
|
||||
#define MAX_SAMPLES 2048
|
||||
#define MAX_BUFFERS 64
|
||||
#define MAX_DATAS 32
|
||||
#define MAX_PORTS 128
|
||||
|
||||
struct buffer {
|
||||
|
|
@ -54,6 +55,7 @@ struct buffer {
|
|||
uint32_t flags;
|
||||
struct spa_list link;
|
||||
struct spa_buffer *buf;
|
||||
void *datas[MAX_DATAS];
|
||||
};
|
||||
|
||||
struct port {
|
||||
|
|
@ -101,9 +103,10 @@ struct impl {
|
|||
struct port out_ports[MAX_PORTS];
|
||||
uint32_t port_count;
|
||||
|
||||
bool started;
|
||||
uint32_t cpu_flags;
|
||||
convert_func_t convert;
|
||||
int is_passthrough:1;
|
||||
int started:1;
|
||||
|
||||
bool have_profile;
|
||||
|
||||
|
|
@ -550,6 +553,7 @@ static int setup_convert(struct impl *this)
|
|||
this->cpu_flags, conv->features);
|
||||
|
||||
this->convert = conv->func;
|
||||
this->is_passthrough = src_fmt == dst_fmt;
|
||||
return 0;
|
||||
}
|
||||
return -ENOTSUP;
|
||||
|
|
@ -712,7 +716,7 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
{
|
||||
struct impl *this;
|
||||
struct port *port;
|
||||
uint32_t i;
|
||||
uint32_t i, j;
|
||||
|
||||
spa_return_val_if_fail(node != NULL, -EINVAL);
|
||||
|
||||
|
|
@ -730,6 +734,7 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
|
||||
for (i = 0; i < n_buffers; i++) {
|
||||
struct buffer *b;
|
||||
uint32_t n_datas = buffers[i]->n_datas;
|
||||
struct spa_data *d = buffers[i]->datas;
|
||||
|
||||
b = &port->buffers[i];
|
||||
|
|
@ -737,16 +742,19 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
b->buf = buffers[i];
|
||||
b->flags = 0;
|
||||
|
||||
if (!((d[0].type == SPA_DATA_MemPtr ||
|
||||
d[0].type == SPA_DATA_MemFd ||
|
||||
d[0].type == SPA_DATA_DmaBuf) && d[0].data != NULL)) {
|
||||
spa_log_error(this->log, NAME " %p: invalid memory on buffer %d %d %p", this,
|
||||
i, d[0].type, d[0].data);
|
||||
return -EINVAL;
|
||||
for (j = 0; j < n_datas; j++) {
|
||||
if (!((d[j].type == SPA_DATA_MemPtr ||
|
||||
d[j].type == SPA_DATA_MemFd ||
|
||||
d[j].type == SPA_DATA_DmaBuf) && d[j].data != NULL)) {
|
||||
spa_log_error(this->log, NAME " %p: invalid memory %d on buffer %d",
|
||||
this, j, i);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!SPA_IS_ALIGNED(d[j].data, 16))
|
||||
spa_log_warn(this->log, NAME " %p: memory %d on buffer %d not aligned",
|
||||
this, j, i);
|
||||
b->datas[j] = d[j].data;
|
||||
}
|
||||
if (!SPA_IS_ALIGNED(d[0].data, 16))
|
||||
spa_log_warn(this->log, NAME " %p: memory on buffer %d not aligned", this, i);
|
||||
|
||||
if (direction == SPA_DIRECTION_OUTPUT)
|
||||
queue_buffer(this, port, i);
|
||||
}
|
||||
|
|
@ -895,7 +903,10 @@ static int impl_node_process(struct spa_node *node)
|
|||
n_samples = SPA_MIN(n_samples, maxsize / outport->stride);
|
||||
|
||||
for (j = 0; j < dbuf->buf->n_datas; j++) {
|
||||
dst_datas[n_dst_datas++] = dd[j].data;
|
||||
dst_datas[n_dst_datas] = this->is_passthrough ?
|
||||
(void *)src_datas[n_dst_datas] :
|
||||
dbuf->datas[j];
|
||||
dd[j].data = dst_datas[n_dst_datas++];
|
||||
dd[j].chunk->offset = 0;
|
||||
dd[j].chunk->size = n_samples * outport->stride;
|
||||
}
|
||||
|
|
@ -907,7 +918,8 @@ static int impl_node_process(struct spa_node *node)
|
|||
spa_log_trace(this->log, NAME " %p: %d %d %d %d %d", this,
|
||||
n_src_datas, n_dst_datas, n_samples, maxsize, inport->stride);
|
||||
|
||||
this->convert(this, dst_datas, src_datas, SPA_MAX(n_dst_datas, n_src_datas), n_samples);
|
||||
if (!this->is_passthrough)
|
||||
this->convert(this, dst_datas, src_datas, SPA_MAX(n_dst_datas, n_src_datas), n_samples);
|
||||
|
||||
inio->status = SPA_STATUS_NEED_BUFFER;
|
||||
res |= SPA_STATUS_NEED_BUFFER;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue