ringbuffer: improve ringbuffer

Simplify the ringbuffer code by using ideas from the portaudio ringbuffer
implementation.
This commit is contained in:
Wim Taymans 2016-10-25 15:43:01 +02:00
parent 4148e0ff78
commit 984375c0b2
2 changed files with 102 additions and 78 deletions

51
spa/include/spa/barrier.h Normal file
View file

@ -0,0 +1,51 @@
/* Simple Plugin API
* Copyright (C) 2016 Wim Taymans <wim.taymans@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifndef __SPA_BARRIER_H__
#define __SPA_BARRIER_H__
#ifdef __cplusplus
extern "C" {
#endif
#include <spa/defs.h>
#if defined(__GNUC__)
# if defined( __i386__ ) || defined( __i486__ ) || defined( __i586__ ) || defined( __i686__ ) || defined( __x86_64__ )
# define spa_barrier_full() asm volatile("mfence":::"memory")
# define spa_barrier_read() asm volatile("lfence":::"memory")
# define spa_barrier_write() asm volatile("sfence":::"memory")
# elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
# define spa_barrier_full() __sync_synchronize()
# define spa_barrier_read() __sync_synchronize()
# define spa_barrier_write() __sync_synchronize()
# endif
#else
# warning no memory barriers support found
# define spa_barrier_full()
# define spa_barrier_read()
# define spa_barrier_write()
#endif
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* __SPA_BARRIER_H__ */

View file

@ -30,6 +30,7 @@ typedef struct _SpaRingbuffer SpaRingbuffer;
#define SPA_RINGBUFFER_PREFIX SPA_RINGBUFFER_URI "#"
#include <spa/defs.h>
#include <spa/barrier.h>
typedef struct {
off_t offset;
@ -47,7 +48,8 @@ struct _SpaRingbuffer {
volatile size_t readindex;
volatile size_t writeindex;
size_t size;
size_t size_mask;
size_t mask;
size_t mask2;
};
/**
@ -57,20 +59,24 @@ struct _SpaRingbuffer {
* @size: the number of elements in @data
*
* Initialize a #SpaRingbuffer with @data and @size.
* When size is a power of 2, size_mask will be set with the mask to
* efficiently wrap around the indexes.
* Size must be a power of 2.
*
* Returns: %SPA_RESULT_OK, unless size is not a power of 2.
*/
static inline void
static inline SpaResult
spa_ringbuffer_init (SpaRingbuffer *rbuf,
size_t size)
{
if ((size & (size - 1)) != 0)
return SPA_RESULT_ERROR;
rbuf->size = size;
rbuf->mask = size - 1;
rbuf->mask2 = (size << 1) - 1;
rbuf->readindex = 0;
rbuf->writeindex = 0;
if ((size & (size - 1)) == 0)
rbuf->size_mask = size - 1;
else
rbuf->size_mask = 0;
return SPA_RESULT_OK;
}
/**
@ -86,6 +92,21 @@ spa_ringbuffer_clear (SpaRingbuffer *rbuf)
rbuf->writeindex = 0;
}
static inline size_t
spa_ringbuffer_get_read_offset (SpaRingbuffer *rbuf,
size_t *offset)
{
size_t avail, r;
r = rbuf->readindex;
*offset = r & rbuf->mask;
avail = (rbuf->writeindex - r) & rbuf->mask2;
spa_barrier_read();
return avail;
}
/**
* spa_ringbuffer_get_read_areas:
* @rbuf: a #SpaRingbuffer
@ -98,17 +119,9 @@ static inline void
spa_ringbuffer_get_read_areas (SpaRingbuffer *rbuf,
SpaRingbufferArea areas[2])
{
size_t avail, end, w, r;
size_t avail, end, r;
w = rbuf->writeindex;
r = rbuf->readindex;
if (w > r) {
avail = w - r;
} else {
avail = (w - r + rbuf->size);
avail = (rbuf->size_mask ? avail & rbuf->size_mask : avail % rbuf->size);
}
avail = spa_ringbuffer_get_read_offset (rbuf, &r);
end = r + avail;
areas[0].offset = r;
@ -123,25 +136,6 @@ spa_ringbuffer_get_read_areas (SpaRingbuffer *rbuf,
}
}
static inline size_t
spa_ringbuffer_get_read_offset (SpaRingbuffer *rbuf,
size_t *offset)
{
size_t avail, w, r;
w = rbuf->writeindex;
r = rbuf->readindex;
if (w > r) {
avail = w - r;
} else {
avail = (w - r + rbuf->size);
avail = (rbuf->size_mask ? avail & rbuf->size_mask : avail % rbuf->size);
}
*offset = r;
return avail;
}
/**
* spa_ringbuffer_read_advance:
* @rbuf: a #SpaRingbuffer
@ -153,9 +147,22 @@ static inline void
spa_ringbuffer_read_advance (SpaRingbuffer *rbuf,
ssize_t len)
{
size_t tmp = rbuf->readindex + len;
__sync_synchronize();
rbuf->readindex = (rbuf->size_mask ? tmp & rbuf->size_mask : tmp % rbuf->size);
spa_barrier_full();
rbuf->readindex = (rbuf->readindex + len) & rbuf->mask2;
}
static inline size_t
spa_ringbuffer_get_write_offset (SpaRingbuffer *rbuf,
size_t *offset)
{
size_t avail, w;
w = rbuf->writeindex;
*offset = w & rbuf->mask;
avail = rbuf->size - ((w - rbuf->readindex) & rbuf->mask2);
spa_barrier_full();
return avail;
}
/**
@ -170,20 +177,9 @@ static inline void
spa_ringbuffer_get_write_areas (SpaRingbuffer *rbuf,
SpaRingbufferArea areas[2])
{
size_t avail, end, w, r;
size_t avail, end, w;
w = rbuf->writeindex;
r = rbuf->readindex;
if (w > r) {
avail = (r - w + rbuf->size);
avail = (rbuf->size_mask ? avail & rbuf->size_mask : avail % rbuf->size);
} else if (w < r) {
avail = r - w;
} else {
avail = rbuf->size;
}
avail -= 1;
avail = spa_ringbuffer_get_write_offset (rbuf, &w);
end = w + avail;
areas[0].offset = w;
@ -198,28 +194,6 @@ spa_ringbuffer_get_write_areas (SpaRingbuffer *rbuf,
}
}
static inline size_t
spa_ringbuffer_get_write_offset (SpaRingbuffer *rbuf,
size_t *offset)
{
size_t avail, w, r;
w = rbuf->writeindex;
r = rbuf->readindex;
if (w > r) {
avail = (r - w + rbuf->size);
avail = (rbuf->size_mask ? avail & rbuf->size_mask : avail % rbuf->size);
} else if (w < r) {
avail = r - w;
} else {
avail = rbuf->size;
}
avail -= 1;
*offset = w;
return avail;
}
/**
* spa_ringbuffer_write_advance:
* @rbuf: a #SpaRingbuffer
@ -232,9 +206,8 @@ static inline void
spa_ringbuffer_write_advance (SpaRingbuffer *rbuf,
ssize_t len)
{
size_t tmp = rbuf->writeindex + len;
__sync_synchronize();
rbuf->writeindex = (rbuf->size_mask ? tmp & rbuf->size_mask : tmp % rbuf->size);
spa_barrier_write();
rbuf->writeindex = (rbuf->writeindex + len) & rbuf->mask2;
}
#ifdef __cplusplus