svolume: Fix ARM alignment issues

As Peter Meerwald <p.meerwald@bct-electronic.com> discovered, our ARM
svolume code performance is quite terrible when the incoming samples are
not word-aligned. This can very easily be the case, since the
architecture only requires that the samples be 16-bit aligned, and we
might end up running the innermost loop after processing modulo-4
samples. The performance degradation was ~50x on a Cortex A9
(Pandaboard).

This reworks the svolume logic to first consume enough samples to make
sure the rest is word aligned, and reordering the processing to work
with 4 samples at a time first, and then finally deal with the
remainder.

With this, performance is comparable for arbitrary alignments (~3x
faster than the C code).
This commit is contained in:
Arun Raghavan 2012-10-30 12:04:42 +05:30
parent 96fa87086d
commit 94039790f8

View file

@ -40,63 +40,36 @@
" addcs r0, %1 \n\t" \ " addcs r0, %1 \n\t" \
" movcs r6, r0 \n\t" " movcs r6, r0 \n\t"
static pa_do_volume_func_t _volume_ref;
static void pa_volume_s16ne_arm(int16_t *samples, const int32_t *volumes, unsigned channels, unsigned length) { static void pa_volume_s16ne_arm(int16_t *samples, const int32_t *volumes, unsigned channels, unsigned length) {
/* Channels must be at least 4, and always a multiple of the original number. /* Channels must be at least 4, and always a multiple of the original number.
* This is also the max amount we overread the volume array, which should * This is also the max amount we overread the volume array, which should
* have enough padding. */ * have enough padding. */
const int32_t *ve = volumes + (channels == 3 ? 6 : PA_MAX (4U, channels)); const int32_t *ve = volumes + (channels == 3 ? 6 : PA_MAX (4U, channels));
unsigned rem = PA_ALIGN((size_t) samples) - (size_t) samples;
/* Make sure we're word-aligned, else performance _really_ sucks */
if (rem) {
_volume_ref(samples, volumes, channels, rem < length ? rem : length);
if (rem < length) {
length -= rem;
samples += rem / sizeof(*samples);
} else
return; /* we're done */
}
__asm__ __volatile__ ( __asm__ __volatile__ (
" mov r6, %1 \n\t" /* r6 = volumes */ " mov r6, %4 \n\t" /* r6 = volumes + rem */
" mov %3, %3, LSR #1 \n\t" /* length /= sizeof (int16_t) */ " mov %3, %3, LSR #1 \n\t" /* length /= sizeof (int16_t) */
" tst %3, #1 \n\t" /* check for odd samples */
" beq 2f \n\t"
"1: \n\t" /* odd samples volumes */ " cmp %3, #4 \n\t" /* check for 4+ samples */
" ldr r0, [r6], #4 \n\t" /* r0 = volume */ " blt 2f \n\t"
" ldrh r2, [%0] \n\t" /* r2 = sample */
" smulwb r0, r0, r2 \n\t" /* r0 = (r0 * r2) >> 16 */ /* See final case for how the multiplication works */
" ssat r0, #16, r0 \n\t" /* r0 = PA_CLAMP(r0, 0x7FFF) */
" strh r0, [%0], #2 \n\t" /* sample = r0 */ "1: \n\t"
MOD_INC()
"2: \n\t"
" mov %3, %3, LSR #1 \n\t"
" tst %3, #1 \n\t" /* check for odd samples */
" beq 4f \n\t"
"3: \n\t"
" ldrd r2, [r6], #8 \n\t" /* 2 samples at a time */
" ldr r0, [%0] \n\t"
#ifdef WORDS_BIGENDIAN
" smulwt r2, r2, r0 \n\t"
" smulwb r3, r3, r0 \n\t"
#else
" smulwb r2, r2, r0 \n\t"
" smulwt r3, r3, r0 \n\t"
#endif
" ssat r2, #16, r2 \n\t"
" ssat r3, #16, r3 \n\t"
#ifdef WORDS_BIGENDIAN
" pkhbt r0, r3, r2, LSL #16 \n\t"
#else
" pkhbt r0, r2, r3, LSL #16 \n\t"
#endif
" str r0, [%0], #4 \n\t"
MOD_INC()
"4: \n\t"
" movs %3, %3, LSR #1 \n\t"
" beq 6f \n\t"
"5: \n\t"
" ldrd r2, [r6], #8 \n\t" /* 4 samples at a time */ " ldrd r2, [r6], #8 \n\t" /* 4 samples at a time */
" ldrd r4, [r6], #8 \n\t" " ldrd r4, [r6], #8 \n\t"
" ldrd r0, [%0] \n\t" " ldrd r0, [%0] \n\t"
@ -129,12 +102,55 @@ static void pa_volume_s16ne_arm(int16_t *samples, const int32_t *volumes, unsign
MOD_INC() MOD_INC()
" subs %3, %3, #1 \n\t" " subs %3, %3, #4 \n\t"
" bne 5b \n\t" " cmp %3, #4 \n\t"
"6: \n\t" " bge 1b \n\t"
"2: \n\t"
" cmp %3, #2 \n\t"
" blt 3f \n\t"
" ldrd r2, [r6], #8 \n\t" /* 2 samples at a time */
" ldr r0, [%0] \n\t"
#ifdef WORDS_BIGENDIAN
" smulwt r2, r2, r0 \n\t"
" smulwb r3, r3, r0 \n\t"
#else
" smulwb r2, r2, r0 \n\t"
" smulwt r3, r3, r0 \n\t"
#endif
" ssat r2, #16, r2 \n\t"
" ssat r3, #16, r3 \n\t"
#ifdef WORDS_BIGENDIAN
" pkhbt r0, r3, r2, LSL #16 \n\t"
#else
" pkhbt r0, r2, r3, LSL #16 \n\t"
#endif
" str r0, [%0], #4 \n\t"
MOD_INC()
" subs %3, %3, #2 \n\t"
"3: \n\t" /* check for odd # of samples */
" cmp %3, #1 \n\t"
" bne 4f \n\t"
" ldr r0, [r6], #4 \n\t" /* r0 = volume */
" ldrh r2, [%0] \n\t" /* r2 = sample */
" smulwb r0, r0, r2 \n\t" /* r0 = (r0 * r2) >> 16 */
" ssat r0, #16, r0 \n\t" /* r0 = PA_CLAMP(r0, 0x7FFF) */
" strh r0, [%0], #2 \n\t" /* sample = r0 */
"4: \n\t"
: "+r" (samples), "+r" (volumes), "+r" (ve), "+r" (length) : "+r" (samples), "+r" (volumes), "+r" (ve), "+r" (length)
: : "r" (volumes + ((rem / sizeof(*samples)) % channels))
: "r6", "r5", "r4", "r3", "r2", "r1", "r0", "cc" : "r6", "r5", "r4", "r3", "r2", "r1", "r0", "cc"
); );
} }
@ -145,6 +161,9 @@ void pa_volume_func_init_arm(pa_cpu_arm_flag_t flags) {
#if defined (__arm__) && defined (HAVE_ARMV6) #if defined (__arm__) && defined (HAVE_ARMV6)
pa_log_info("Initialising ARM optimized volume functions."); pa_log_info("Initialising ARM optimized volume functions.");
if (!_volume_ref)
_volume_ref = pa_get_volume_func(PA_SAMPLE_S16NE);
pa_set_volume_func(PA_SAMPLE_S16NE, (pa_do_volume_func_t) pa_volume_s16ne_arm); pa_set_volume_func(PA_SAMPLE_S16NE, (pa_do_volume_func_t) pa_volume_s16ne_arm);
#endif /* defined (__arm__) && defined (HAVE_ARMV6) */ #endif /* defined (__arm__) && defined (HAVE_ARMV6) */
} }