mirror of
https://gitlab.freedesktop.org/pipewire/pipewire.git
synced 2025-11-01 22:58:50 -04:00
audiomixer: improve sse and sse2 mixer
Add mixer benchmark. Improve sse and sse2 mixer function by removin some read/write to the temporary buffer at the expense of more scattered reads.
This commit is contained in:
parent
240d212822
commit
7a0445cb28
5 changed files with 339 additions and 95 deletions
217
spa/plugins/audiomixer/benchmark-mix-ops.c
Normal file
217
spa/plugins/audiomixer/benchmark-mix-ops.c
Normal file
|
|
@ -0,0 +1,217 @@
|
|||
/* Spa
|
||||
*
|
||||
* Copyright © 2019 Wim Taymans
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "test-helper.h"
|
||||
#include "mix-ops.h"
|
||||
|
||||
static uint32_t cpu_flags;
|
||||
|
||||
typedef void (*mix_func_t) (struct mix_ops *ops, void * SPA_RESTRICT dst,
|
||||
const void * SPA_RESTRICT src[], uint32_t n_src, uint32_t n_samples);
|
||||
struct stats {
|
||||
uint32_t n_samples;
|
||||
uint32_t n_src;
|
||||
uint64_t perf;
|
||||
const char *name;
|
||||
const char *impl;
|
||||
};
|
||||
|
||||
#define MAX_SAMPLES 4096
|
||||
#define MAX_SRC 11
|
||||
|
||||
#define MAX_COUNT 100
|
||||
|
||||
static uint8_t samp_in[MAX_SAMPLES * MAX_SRC * 8];
|
||||
static uint8_t samp_out[MAX_SAMPLES * 8];
|
||||
|
||||
static const int sample_sizes[] = { 0, 1, 128, 513, 4096 };
|
||||
static const int src_counts[] = { 1, 2, 4, 6, 8, 11 };
|
||||
|
||||
#define MAX_RESULTS SPA_N_ELEMENTS(sample_sizes) * SPA_N_ELEMENTS(src_counts) * 70
|
||||
|
||||
static uint32_t n_results = 0;
|
||||
static struct stats results[MAX_RESULTS];
|
||||
|
||||
static void run_test1(const char *name, const char *impl, mix_func_t func, int n_src, int n_samples)
|
||||
{
|
||||
int i, j;
|
||||
const void *ip[n_src];
|
||||
void *op;
|
||||
struct timespec ts;
|
||||
uint64_t count, t1, t2;
|
||||
struct mix_ops mix;
|
||||
|
||||
mix.n_channels = 1;
|
||||
|
||||
for (j = 0; j < n_src; j++)
|
||||
ip[j] = SPA_PTR_ALIGN(&samp_in[j * n_samples * 4], 16, void);
|
||||
op = SPA_PTR_ALIGN(samp_out, 16, void);
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
t1 = SPA_TIMESPEC_TO_NSEC(&ts);
|
||||
|
||||
count = 0;
|
||||
for (i = 0; i < MAX_COUNT; i++) {
|
||||
func(&mix, op, ip, n_src, n_samples);
|
||||
count++;
|
||||
}
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
t2 = SPA_TIMESPEC_TO_NSEC(&ts);
|
||||
|
||||
spa_assert(n_results < MAX_RESULTS);
|
||||
|
||||
results[n_results++] = (struct stats) {
|
||||
.n_samples = n_samples,
|
||||
.n_src = n_src,
|
||||
.perf = count * (uint64_t)SPA_NSEC_PER_SEC / (t2 - t1),
|
||||
.name = name,
|
||||
.impl = impl
|
||||
};
|
||||
}
|
||||
|
||||
static void run_test(const char *name, const char *impl, mix_func_t func)
|
||||
{
|
||||
size_t i, j;
|
||||
|
||||
for (i = 0; i < SPA_N_ELEMENTS(sample_sizes); i++) {
|
||||
for (j = 0; j < SPA_N_ELEMENTS(src_counts); j++) {
|
||||
run_test1(name, impl, func, src_counts[j],
|
||||
(sample_sizes[i] + (src_counts[j] -1)) / src_counts[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void test_s8(void)
|
||||
{
|
||||
run_test("test_s8", "c", mix_s8_c);
|
||||
}
|
||||
static void test_u8(void)
|
||||
{
|
||||
run_test("test_u8", "c", mix_u8_c);
|
||||
}
|
||||
|
||||
static void test_s16(void)
|
||||
{
|
||||
run_test("test_s16", "c", mix_s16_c);
|
||||
}
|
||||
static void test_u16(void)
|
||||
{
|
||||
run_test("test_u8", "c", mix_u16_c);
|
||||
}
|
||||
|
||||
static void test_s24(void)
|
||||
{
|
||||
run_test("test_s24", "c", mix_s24_c);
|
||||
}
|
||||
static void test_u24(void)
|
||||
{
|
||||
run_test("test_u24", "c", mix_u24_c);
|
||||
}
|
||||
static void test_s24_32(void)
|
||||
{
|
||||
run_test("test_s24_32", "c", mix_s24_32_c);
|
||||
}
|
||||
static void test_u24_32(void)
|
||||
{
|
||||
run_test("test_u24_32", "c", mix_u24_32_c);
|
||||
}
|
||||
|
||||
static void test_s32(void)
|
||||
{
|
||||
run_test("test_s32", "c", mix_s32_c);
|
||||
}
|
||||
static void test_u32(void)
|
||||
{
|
||||
run_test("test_u32", "c", mix_u32_c);
|
||||
}
|
||||
|
||||
static void test_f32(void)
|
||||
{
|
||||
run_test("test_f32", "c", mix_f32_c);
|
||||
#if defined (HAVE_SSE)
|
||||
if (cpu_flags & SPA_CPU_FLAG_SSE) {
|
||||
run_test("test_f32", "sse", mix_f32_sse);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void test_f64(void)
|
||||
{
|
||||
run_test("test_f64", "c", mix_f64_c);
|
||||
#if defined (HAVE_SSE2)
|
||||
if (cpu_flags & SPA_CPU_FLAG_SSE2) {
|
||||
run_test("test_f64", "sse2", mix_f64_sse2);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int compare_func(const void *_a, const void *_b)
|
||||
{
|
||||
const struct stats *a = _a, *b = _b;
|
||||
int diff;
|
||||
if ((diff = strcmp(a->name, b->name)) != 0) return diff;
|
||||
if ((diff = a->n_samples - b->n_samples) != 0) return diff;
|
||||
if ((diff = a->n_src - b->n_src) != 0) return diff;
|
||||
if ((diff = b->perf - a->perf) != 0) return diff;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
cpu_flags = get_cpu_flags();
|
||||
printf("got get CPU flags %d\n", cpu_flags);
|
||||
|
||||
test_s8();
|
||||
test_u8();
|
||||
test_s16();
|
||||
test_u16();
|
||||
test_s24();
|
||||
test_u24();
|
||||
test_s32();
|
||||
test_u32();
|
||||
test_s24_32();
|
||||
test_u24_32();
|
||||
test_f32();
|
||||
test_f64();
|
||||
|
||||
qsort(results, n_results, sizeof(struct stats), compare_func);
|
||||
|
||||
for (i = 0; i < n_results; i++) {
|
||||
struct stats *s = &results[i];
|
||||
fprintf(stderr, "%-12."PRIu64" \t%-32.32s %s \t samples %d, src %d\n",
|
||||
s->perf, s->name, s->impl, s->n_samples, s->n_src);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -95,3 +95,32 @@ foreach a : test_apps
|
|||
)
|
||||
endif
|
||||
endforeach
|
||||
|
||||
benchmark_apps = [
|
||||
'benchmark-mix-ops',
|
||||
]
|
||||
|
||||
foreach a : benchmark_apps
|
||||
benchmark(a,
|
||||
executable(a, a + '.c',
|
||||
dependencies : [ spa_dep, dl_lib, pthread_lib, mathlib, audiomixer_dep ],
|
||||
include_directories : [ configinc ],
|
||||
c_args : [ simd_cargs ],
|
||||
install_rpath : spa_plugindir / 'audiomixer',
|
||||
install : installed_tests_enabled,
|
||||
install_dir : installed_tests_execdir / 'audiomixer'),
|
||||
env : [
|
||||
'SPA_PLUGIN_DIR=@0@'.format(spa_dep.get_variable('plugindir')),
|
||||
])
|
||||
|
||||
if installed_tests_enabled
|
||||
test_conf = configuration_data()
|
||||
test_conf.set('exec', installed_tests_execdir / 'audiomixer' / a)
|
||||
configure_file(
|
||||
input: installed_tests_template,
|
||||
output: a + '.test',
|
||||
install_dir: installed_tests_metadir / 'audiomixer',
|
||||
configuration: test_conf
|
||||
)
|
||||
endif
|
||||
endforeach
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ void mix_ ##name## _c(struct mix_ops *ops, \
|
|||
type *d = dst; \
|
||||
const type **s = (const type **)src; \
|
||||
n_samples *= ops->n_channels; \
|
||||
if (n_src == 0 && zero) \
|
||||
if (n_src == 0 && zero) \
|
||||
memset(dst, 0, n_samples * sizeof(type)); \
|
||||
else if (n_src == 1) { \
|
||||
if (dst != src[0]) \
|
||||
|
|
|
|||
|
|
@ -32,58 +32,57 @@
|
|||
|
||||
#include <xmmintrin.h>
|
||||
|
||||
static inline void mix_2(float * dst, const float * SPA_RESTRICT src, uint32_t n_samples)
|
||||
{
|
||||
uint32_t n, unrolled;
|
||||
__m128 in1[4], in2[4];
|
||||
|
||||
if (SPA_LIKELY(SPA_IS_ALIGNED(src, 16) &&
|
||||
SPA_IS_ALIGNED(dst, 16)))
|
||||
unrolled = n_samples & ~15;
|
||||
else
|
||||
unrolled = 0;
|
||||
|
||||
for (n = 0; n < unrolled; n += 16) {
|
||||
in1[0] = _mm_load_ps(&dst[n+ 0]);
|
||||
in1[1] = _mm_load_ps(&dst[n+ 4]);
|
||||
in1[2] = _mm_load_ps(&dst[n+ 8]);
|
||||
in1[3] = _mm_load_ps(&dst[n+12]);
|
||||
|
||||
in2[0] = _mm_load_ps(&src[n+ 0]);
|
||||
in2[1] = _mm_load_ps(&src[n+ 4]);
|
||||
in2[2] = _mm_load_ps(&src[n+ 8]);
|
||||
in2[3] = _mm_load_ps(&src[n+12]);
|
||||
|
||||
in1[0] = _mm_add_ps(in1[0], in2[0]);
|
||||
in1[1] = _mm_add_ps(in1[1], in2[1]);
|
||||
in1[2] = _mm_add_ps(in1[2], in2[2]);
|
||||
in1[3] = _mm_add_ps(in1[3], in2[3]);
|
||||
|
||||
_mm_store_ps(&dst[n+ 0], in1[0]);
|
||||
_mm_store_ps(&dst[n+ 4], in1[1]);
|
||||
_mm_store_ps(&dst[n+ 8], in1[2]);
|
||||
_mm_store_ps(&dst[n+12], in1[3]);
|
||||
}
|
||||
for (; n < n_samples; n++) {
|
||||
in1[0] = _mm_load_ss(&dst[n]),
|
||||
in2[0] = _mm_load_ss(&src[n]),
|
||||
in1[0] = _mm_add_ss(in1[0], in2[0]);
|
||||
_mm_store_ss(&dst[n], in1[0]);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
mix_f32_sse(struct mix_ops *ops, void * SPA_RESTRICT dst, const void * SPA_RESTRICT src[],
|
||||
uint32_t n_src, uint32_t n_samples)
|
||||
{
|
||||
uint32_t i;
|
||||
n_samples *= ops->n_channels;
|
||||
|
||||
if (n_src == 0)
|
||||
memset(dst, 0, n_samples * ops->n_channels * sizeof(float));
|
||||
else if (dst != src[0])
|
||||
spa_memcpy(dst, src[0], n_samples * ops->n_channels * sizeof(float));
|
||||
if (n_src == 0) {
|
||||
memset(dst, 0, n_samples * sizeof(float));
|
||||
} else if (n_src == 1) {
|
||||
if (dst != src[0])
|
||||
spa_memcpy(dst, src[0], n_samples * sizeof(float));
|
||||
} else {
|
||||
uint32_t n, i, unrolled;
|
||||
__m128 in[4];
|
||||
const float **s = (const float **)src;
|
||||
float *d = dst;
|
||||
bool aligned = true;
|
||||
|
||||
for (i = 1; i < n_src; i++) {
|
||||
mix_2(dst, src[i], n_samples * ops->n_channels);
|
||||
if (SPA_UNLIKELY(!SPA_IS_ALIGNED(dst, 16)))
|
||||
aligned = false;
|
||||
else {
|
||||
for (i = 0; i < n_src && aligned; i++) {
|
||||
if (SPA_UNLIKELY(!SPA_IS_ALIGNED(src[i], 16)))
|
||||
aligned = false;
|
||||
}
|
||||
}
|
||||
|
||||
unrolled = aligned ? n_samples & ~15 : 0;
|
||||
|
||||
for (n = 0; n < unrolled; n += 16) {
|
||||
in[0] = _mm_load_ps(&s[0][n+ 0]);
|
||||
in[1] = _mm_load_ps(&s[0][n+ 4]);
|
||||
in[2] = _mm_load_ps(&s[0][n+ 8]);
|
||||
in[3] = _mm_load_ps(&s[0][n+12]);
|
||||
|
||||
for (i = 1; i < n_src; i++) {
|
||||
in[0] = _mm_add_ps(in[0], _mm_load_ps(&s[i][n+ 0]));
|
||||
in[1] = _mm_add_ps(in[1], _mm_load_ps(&s[i][n+ 4]));
|
||||
in[2] = _mm_add_ps(in[2], _mm_load_ps(&s[i][n+ 8]));
|
||||
in[3] = _mm_add_ps(in[3], _mm_load_ps(&s[i][n+12]));
|
||||
}
|
||||
_mm_store_ps(&d[n+ 0], in[0]);
|
||||
_mm_store_ps(&d[n+ 4], in[1]);
|
||||
_mm_store_ps(&d[n+ 8], in[2]);
|
||||
_mm_store_ps(&d[n+12], in[3]);
|
||||
}
|
||||
for (; n < n_samples; n++) {
|
||||
in[0] = _mm_load_ss(&s[0][n]);
|
||||
for (i = 1; i < n_src; i++)
|
||||
in[0] = _mm_add_ss(in[0], _mm_load_ss(&s[i][n]));
|
||||
_mm_store_ss(&d[n], in[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,58 +32,57 @@
|
|||
|
||||
#include <emmintrin.h>
|
||||
|
||||
static inline void mix_2(double * dst, const double * SPA_RESTRICT src, uint32_t n_samples)
|
||||
{
|
||||
uint32_t n, unrolled;
|
||||
__m128d in1[4], in2[4];
|
||||
|
||||
if (SPA_IS_ALIGNED(src, 16) &&
|
||||
SPA_IS_ALIGNED(dst, 16))
|
||||
unrolled = n_samples & ~7;
|
||||
else
|
||||
unrolled = 0;
|
||||
|
||||
for (n = 0; n < unrolled; n += 8) {
|
||||
in1[0] = _mm_load_pd(&dst[n+ 0]);
|
||||
in1[1] = _mm_load_pd(&dst[n+ 2]);
|
||||
in1[2] = _mm_load_pd(&dst[n+ 4]);
|
||||
in1[3] = _mm_load_pd(&dst[n+ 6]);
|
||||
|
||||
in2[0] = _mm_load_pd(&src[n+ 0]);
|
||||
in2[1] = _mm_load_pd(&src[n+ 2]);
|
||||
in2[2] = _mm_load_pd(&src[n+ 4]);
|
||||
in2[3] = _mm_load_pd(&src[n+ 6]);
|
||||
|
||||
in1[0] = _mm_add_pd(in1[0], in2[0]);
|
||||
in1[1] = _mm_add_pd(in1[1], in2[1]);
|
||||
in1[2] = _mm_add_pd(in1[2], in2[2]);
|
||||
in1[3] = _mm_add_pd(in1[3], in2[3]);
|
||||
|
||||
_mm_store_pd(&dst[n+ 0], in1[0]);
|
||||
_mm_store_pd(&dst[n+ 2], in1[1]);
|
||||
_mm_store_pd(&dst[n+ 4], in1[2]);
|
||||
_mm_store_pd(&dst[n+ 6], in1[3]);
|
||||
}
|
||||
for (; n < n_samples; n++) {
|
||||
in1[0] = _mm_load_sd(&dst[n]),
|
||||
in2[0] = _mm_load_sd(&src[n]),
|
||||
in1[0] = _mm_add_sd(in1[0], in2[0]);
|
||||
_mm_store_sd(&dst[n], in1[0]);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
mix_f64_sse2(struct mix_ops *ops, void * SPA_RESTRICT dst, const void * SPA_RESTRICT src[],
|
||||
uint32_t n_src, uint32_t n_samples)
|
||||
{
|
||||
uint32_t i;
|
||||
n_samples *= ops->n_channels;
|
||||
|
||||
if (n_src == 0)
|
||||
memset(dst, 0, n_samples * ops->n_channels * sizeof(double));
|
||||
else if (dst != src[0])
|
||||
spa_memcpy(dst, src[0], n_samples * ops->n_channels * sizeof(double));
|
||||
if (n_src == 0) {
|
||||
memset(dst, 0, n_samples * sizeof(double));
|
||||
} else if (n_src == 1) {
|
||||
if (dst != src[0])
|
||||
spa_memcpy(dst, src[0], n_samples * sizeof(double));
|
||||
} else {
|
||||
uint32_t n, i, unrolled;
|
||||
__m128d in[4];
|
||||
const double **s = (const double **)src;
|
||||
double *d = dst;
|
||||
bool aligned = true;
|
||||
|
||||
for (i = 1; i < n_src; i++) {
|
||||
mix_2(dst, src[i], n_samples * ops->n_channels);
|
||||
if (SPA_UNLIKELY(!SPA_IS_ALIGNED(dst, 16)))
|
||||
aligned = false;
|
||||
else {
|
||||
for (i = 0; i < n_src && aligned; i++) {
|
||||
if (SPA_UNLIKELY(!SPA_IS_ALIGNED(src[i], 16)))
|
||||
aligned = false;
|
||||
}
|
||||
}
|
||||
|
||||
unrolled = aligned ? n_samples & ~7 : 0;
|
||||
|
||||
for (n = 0; n < unrolled; n += 8) {
|
||||
in[0] = _mm_load_pd(&s[0][n+0]);
|
||||
in[1] = _mm_load_pd(&s[0][n+2]);
|
||||
in[2] = _mm_load_pd(&s[0][n+4]);
|
||||
in[3] = _mm_load_pd(&s[0][n+6]);
|
||||
|
||||
for (i = 1; i < n_src; i++) {
|
||||
in[0] = _mm_add_pd(in[0], _mm_load_pd(&s[i][n+0]));
|
||||
in[1] = _mm_add_pd(in[1], _mm_load_pd(&s[i][n+2]));
|
||||
in[2] = _mm_add_pd(in[2], _mm_load_pd(&s[i][n+4]));
|
||||
in[3] = _mm_add_pd(in[3], _mm_load_pd(&s[i][n+6]));
|
||||
}
|
||||
_mm_store_pd(&d[n+0], in[0]);
|
||||
_mm_store_pd(&d[n+2], in[1]);
|
||||
_mm_store_pd(&d[n+4], in[2]);
|
||||
_mm_store_pd(&d[n+6], in[3]);
|
||||
}
|
||||
for (; n < n_samples; n++) {
|
||||
in[0] = _mm_load_sd(&s[0][n]);
|
||||
for (i = 1; i < n_src; i++)
|
||||
in[0] = _mm_add_sd(in[0], _mm_load_sd(&s[i][n]));
|
||||
_mm_store_sd(&d[n], in[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue