mirror of
https://gitlab.freedesktop.org/pipewire/pipewire.git
synced 2025-10-29 05:40:27 -04:00
Meta: rework ringbuffer meta
ringbuffer: remove size and mask from the ringbuffer, we have that elsewhere in the user of the ringbuffer. Remove the buffer data offset and size fields and replace with a ringbuffer. We then have a ringbuffer in all buffer data, which simplifies things. We can now remove the ringbuffer metadata.
This commit is contained in:
parent
49d8f6792e
commit
2923b623b3
27 changed files with 199 additions and 374 deletions
|
|
@ -25,6 +25,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include <spa/utils/defs.h>
|
||||
#include <spa/utils/ringbuffer.h>
|
||||
#include <spa/buffer/meta.h>
|
||||
#include <spa/support/type-map.h>
|
||||
|
||||
|
|
@ -64,9 +65,8 @@ static inline void spa_type_data_map(struct spa_type_map *map, struct spa_type_d
|
|||
|
||||
/** Chunk of memory */
|
||||
struct spa_chunk {
|
||||
uint32_t offset; /**< offset of valid data */
|
||||
uint32_t size; /**< size of valid data */
|
||||
int32_t stride; /**< stride of valid data */
|
||||
struct spa_ringbuffer area; /**< ringbuffer with valid memory */
|
||||
int32_t stride; /**< stride of ringbuffer increment */
|
||||
};
|
||||
|
||||
/** Data for a buffer */
|
||||
|
|
|
|||
|
|
@ -38,14 +38,12 @@ extern "C" {
|
|||
#define SPA_TYPE_META__Header SPA_TYPE_META_BASE "Header"
|
||||
#define SPA_TYPE_META__Pointer SPA_TYPE_META_BASE "Pointer"
|
||||
#define SPA_TYPE_META__VideoCrop SPA_TYPE_META_BASE "VideoCrop"
|
||||
#define SPA_TYPE_META__Ringbuffer SPA_TYPE_META_BASE "Ringbuffer"
|
||||
#define SPA_TYPE_META__Shared SPA_TYPE_META_BASE "Shared"
|
||||
|
||||
struct spa_type_meta {
|
||||
uint32_t Header;
|
||||
uint32_t Pointer;
|
||||
uint32_t VideoCrop;
|
||||
uint32_t Ringbuffer;
|
||||
uint32_t Shared;
|
||||
};
|
||||
|
||||
|
|
@ -55,7 +53,6 @@ static inline void spa_type_meta_map(struct spa_type_map *map, struct spa_type_m
|
|||
type->Header = spa_type_map_get_id(map, SPA_TYPE_META__Header);
|
||||
type->Pointer = spa_type_map_get_id(map, SPA_TYPE_META__Pointer);
|
||||
type->VideoCrop = spa_type_map_get_id(map, SPA_TYPE_META__VideoCrop);
|
||||
type->Ringbuffer = spa_type_map_get_id(map, SPA_TYPE_META__Ringbuffer);
|
||||
type->Shared = spa_type_map_get_id(map, SPA_TYPE_META__Shared);
|
||||
}
|
||||
}
|
||||
|
|
@ -87,11 +84,6 @@ struct spa_meta_video_crop {
|
|||
int32_t width, height; /**< width and height */
|
||||
};
|
||||
|
||||
/** Ringbuffer metadata */
|
||||
struct spa_meta_ringbuffer {
|
||||
struct spa_ringbuffer ringbuffer; /**< the ringbuffer */
|
||||
};
|
||||
|
||||
/** Describes the shared memory of a buffer is stored */
|
||||
struct spa_meta_shared {
|
||||
int32_t flags; /**< flags */
|
||||
|
|
|
|||
|
|
@ -33,21 +33,10 @@ extern "C" {
|
|||
#define SPA_TYPE_PARAM_META__type SPA_TYPE_PARAM_META_BASE "type"
|
||||
#define SPA_TYPE_PARAM_META__size SPA_TYPE_PARAM_META_BASE "size"
|
||||
|
||||
#define SPA_TYPE_PARAM_META__ringbufferSize SPA_TYPE_PARAM_META_BASE "ringbufferSize"
|
||||
#define SPA_TYPE_PARAM_META__ringbufferMinAvail SPA_TYPE_PARAM_META_BASE "ringbufferMinAvail"
|
||||
#define SPA_TYPE_PARAM_META__ringbufferStride SPA_TYPE_PARAM_META_BASE "ringbufferStride"
|
||||
#define SPA_TYPE_PARAM_META__ringbufferBlocks SPA_TYPE_PARAM_META_BASE "ringbufferBlocks"
|
||||
#define SPA_TYPE_PARAM_META__ringbufferAlign SPA_TYPE_PARAM_META_BASE "ringbufferAlign"
|
||||
|
||||
struct spa_type_param_meta {
|
||||
uint32_t Meta;
|
||||
uint32_t type;
|
||||
uint32_t size;
|
||||
uint32_t ringbufferSize;
|
||||
uint32_t ringbufferMinAvail;
|
||||
uint32_t ringbufferStride;
|
||||
uint32_t ringbufferBlocks;
|
||||
uint32_t ringbufferAlign;
|
||||
};
|
||||
|
||||
static inline void
|
||||
|
|
@ -61,11 +50,6 @@ spa_type_param_meta_map(struct spa_type_map *map,
|
|||
{ OFF(Meta), SPA_TYPE_PARAM__Meta },
|
||||
{ OFF(type), SPA_TYPE_PARAM_META__type },
|
||||
{ OFF(size), SPA_TYPE_PARAM_META__size },
|
||||
{ OFF(ringbufferSize), SPA_TYPE_PARAM_META__ringbufferSize },
|
||||
{ OFF(ringbufferMinAvail), SPA_TYPE_PARAM_META__ringbufferMinAvail },
|
||||
{ OFF(ringbufferStride), SPA_TYPE_PARAM_META__ringbufferStride },
|
||||
{ OFF(ringbufferBlocks), SPA_TYPE_PARAM_META__ringbufferBlocks },
|
||||
{ OFF(ringbufferAlign), SPA_TYPE_PARAM_META__ringbufferAlign },
|
||||
};
|
||||
#undef OFF
|
||||
for (i = 0; i < SPA_N_ELEMENTS(tab); i++)
|
||||
|
|
|
|||
|
|
@ -38,12 +38,9 @@ struct spa_ringbuffer;
|
|||
struct spa_ringbuffer {
|
||||
uint32_t readindex; /*< the current read index */
|
||||
uint32_t writeindex; /*< the current write index */
|
||||
uint32_t size; /*< the size of the ringbuffer */
|
||||
uint32_t mask; /*< mask as \a size - 1, only valid if \a size is
|
||||
* a power of 2. */
|
||||
};
|
||||
|
||||
#define SPA_RINGBUFFER_INIT(size) (struct spa_ringbuffer) { 0, 0, (size), (size)-1 }
|
||||
#define SPA_RINGBUFFER_INIT() (struct spa_ringbuffer) { 0, 0 }
|
||||
|
||||
/**
|
||||
* Initialize a spa_ringbuffer with \a size.
|
||||
|
|
@ -51,28 +48,28 @@ struct spa_ringbuffer {
|
|||
* \param rbuf a spa_ringbuffer
|
||||
* \param size the number of elements in the ringbuffer
|
||||
*/
|
||||
static inline void spa_ringbuffer_init(struct spa_ringbuffer *rbuf, uint32_t size)
|
||||
static inline void spa_ringbuffer_init(struct spa_ringbuffer *rbuf)
|
||||
{
|
||||
*rbuf = SPA_RINGBUFFER_INIT(size);
|
||||
*rbuf = SPA_RINGBUFFER_INIT();
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear \a rbuf, sets the pointers so that the ringbuffer is empty.
|
||||
* Sets the pointers so that the ringbuffer contains \a size bytes.
|
||||
*
|
||||
* \param rbuf a spa_ringbuffer
|
||||
*/
|
||||
static inline void spa_ringbuffer_clear(struct spa_ringbuffer *rbuf)
|
||||
static inline void spa_ringbuffer_set_avail(struct spa_ringbuffer *rbuf, uint32_t size)
|
||||
{
|
||||
rbuf->readindex = 0;
|
||||
rbuf->writeindex = 0;
|
||||
rbuf->writeindex = size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the read index and available bytes for reading.
|
||||
*
|
||||
* \param rbuf a spa_ringbuffer
|
||||
* \param index the value of readindex, should be masked to get the
|
||||
* offset in the ringbuffer memory
|
||||
* \param index the value of readindex, should be taken modulo the size of the
|
||||
* ringbuffer memory to get the offset in the ringbuffer memory
|
||||
* \return number of available bytes to read. values < 0 mean
|
||||
* there was an underrun. values > rbuf->size means there
|
||||
* was an overrun.
|
||||
|
|
@ -84,20 +81,22 @@ static inline int32_t spa_ringbuffer_get_read_index(struct spa_ringbuffer *rbuf,
|
|||
}
|
||||
|
||||
/**
|
||||
* Read \a len bytes from \a rbuf starting \a offset. \a offset must be masked
|
||||
* with the size of \a rbuf and len should be smaller than the size.
|
||||
* Read \a len bytes from \a rbuf starting \a offset. \a offset must be taken
|
||||
* modulo \a size and len should be smaller than \a size.
|
||||
*
|
||||
* \param rbuf a #struct spa_ringbuffer
|
||||
* \param buffer memory to read from
|
||||
* \param offset offset in \a buffer to read from
|
||||
* \param size the size of \a memory
|
||||
* \param offset offset in \a memory to read from
|
||||
* \param data destination memory
|
||||
* \param len number of bytes to read
|
||||
*/
|
||||
static inline void
|
||||
spa_ringbuffer_read_data(struct spa_ringbuffer *rbuf,
|
||||
const void *buffer, uint32_t offset, void *data, uint32_t len)
|
||||
const void *buffer, uint32_t size,
|
||||
uint32_t offset, void *data, uint32_t len)
|
||||
{
|
||||
uint32_t first = SPA_MIN(len, rbuf->size - offset);
|
||||
uint32_t first = SPA_MIN(len, size - offset);
|
||||
memcpy(data, buffer + offset, first);
|
||||
if (SPA_UNLIKELY(len > first))
|
||||
memcpy(data + first, buffer, len - first);
|
||||
|
|
@ -118,8 +117,8 @@ static inline void spa_ringbuffer_read_update(struct spa_ringbuffer *rbuf, int32
|
|||
* Get the write index and the number of bytes inside the ringbuffer.
|
||||
*
|
||||
* \param rbuf a spa_ringbuffer
|
||||
* \param index the value of writeindex, should be masked to get the
|
||||
* offset in the ringbuffer memory
|
||||
* \param index the value of writeindex, should be taken modulo the size of the
|
||||
* ringbuffer memory to get the offset in the ringbuffer memory
|
||||
* \return the fill level of \a rbuf. values < 0 mean
|
||||
* there was an underrun. values > rbuf->size means there
|
||||
* was an overrun. Subtract from the buffer size to get
|
||||
|
|
@ -132,20 +131,22 @@ static inline int32_t spa_ringbuffer_get_write_index(struct spa_ringbuffer *rbuf
|
|||
}
|
||||
|
||||
/**
|
||||
* Write \a len bytes to \a rbuf starting \a offset. \a offset must be masked
|
||||
* with the size of \a rbuf and len should be smaller than the size.
|
||||
* Write \a len bytes to \a buffer starting \a offset. \a offset must be taken
|
||||
* modulo \a size and len should be smaller than \a size.
|
||||
*
|
||||
* \param rbuf a spa_ringbuffer
|
||||
* \param buffer memory to write to
|
||||
* \param offset offset in \a buffer to write to
|
||||
* \param size the size of \a memory
|
||||
* \param offset offset in \a memory to write to
|
||||
* \param data source memory
|
||||
* \param len number of bytes to write
|
||||
*/
|
||||
static inline void
|
||||
spa_ringbuffer_write_data(struct spa_ringbuffer *rbuf,
|
||||
void *buffer, uint32_t offset, const void *data, uint32_t len)
|
||||
void *buffer, uint32_t size,
|
||||
uint32_t offset, const void *data, uint32_t len)
|
||||
{
|
||||
uint32_t first = SPA_MIN(len, rbuf->size - offset);
|
||||
uint32_t first = SPA_MIN(len, size - offset);
|
||||
memcpy(buffer + offset, data, first);
|
||||
if (SPA_UNLIKELY(len > first))
|
||||
memcpy(buffer, data + first, len - first);
|
||||
|
|
|
|||
|
|
@ -85,13 +85,6 @@ int spa_debug_buffer(const struct spa_buffer *buffer)
|
|||
fprintf(stderr, " y: %d\n", h->y);
|
||||
fprintf(stderr, " width: %d\n", h->width);
|
||||
fprintf(stderr, " height: %d\n", h->height);
|
||||
} else if (!strcmp(type_name, SPA_TYPE_META__Ringbuffer)) {
|
||||
struct spa_meta_ringbuffer *h = m->data;
|
||||
fprintf(stderr, " struct spa_meta_ringbuffer:\n");
|
||||
fprintf(stderr, " readindex: %d\n", h->ringbuffer.readindex);
|
||||
fprintf(stderr, " writeindex: %d\n", h->ringbuffer.writeindex);
|
||||
fprintf(stderr, " size: %d\n", h->ringbuffer.size);
|
||||
fprintf(stderr, " mask: %d\n", h->ringbuffer.mask);
|
||||
} else if (!strcmp(type_name, SPA_TYPE_META__Shared)) {
|
||||
struct spa_meta_shared *h = m->data;
|
||||
fprintf(stderr, " struct spa_meta_shared:\n");
|
||||
|
|
@ -115,8 +108,8 @@ int spa_debug_buffer(const struct spa_buffer *buffer)
|
|||
fprintf(stderr, " offset: %d\n", d->mapoffset);
|
||||
fprintf(stderr, " maxsize: %u\n", d->maxsize);
|
||||
fprintf(stderr, " chunk: %p\n", d->chunk);
|
||||
fprintf(stderr, " offset: %d\n", d->chunk->offset);
|
||||
fprintf(stderr, " size: %u\n", d->chunk->size);
|
||||
fprintf(stderr, " read: %d\n", d->chunk->area.readindex);
|
||||
fprintf(stderr, " write: %u\n", d->chunk->area.writeindex);
|
||||
fprintf(stderr, " stride: %d\n", d->chunk->stride);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -335,12 +335,12 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
|
||||
param = spa_pod_builder_object(&b,
|
||||
id, t->param_buffers.Buffers,
|
||||
":", t->param_buffers.size, "iru", this->props.min_latency * this->frame_size,
|
||||
":", t->param_buffers.size, "iru", this->props.max_latency * this->frame_size,
|
||||
2, this->props.min_latency * this->frame_size,
|
||||
INT32_MAX,
|
||||
":", t->param_buffers.stride, "i", 0,
|
||||
":", t->param_buffers.buffers, "ir", 2,
|
||||
2, 2, MAX_BUFFERS,
|
||||
":", t->param_buffers.buffers, "ir", 1,
|
||||
2, 1, MAX_BUFFERS,
|
||||
":", t->param_buffers.align, "i", 16);
|
||||
}
|
||||
else if (id == t->param.idMeta) {
|
||||
|
|
@ -354,19 +354,6 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
":", t->param_meta.type, "I", t->meta.Header,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_header));
|
||||
break;
|
||||
case 1:
|
||||
param = spa_pod_builder_object(&b,
|
||||
id, t->param_meta.Meta,
|
||||
":", t->param_meta.type, "I", t->meta.Ringbuffer,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_ringbuffer),
|
||||
":", t->param_meta.ringbufferSize, "iru",
|
||||
this->props.max_latency * this->frame_size,
|
||||
2, this->props.min_latency * this->frame_size,
|
||||
this->period_frames * this->frame_size,
|
||||
":", t->param_meta.ringbufferStride, "i", 0,
|
||||
":", t->param_meta.ringbufferBlocks, "i", 1,
|
||||
":", t->param_meta.ringbufferAlign, "i", 16);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -491,7 +478,6 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
b->outstanding = true;
|
||||
|
||||
b->h = spa_buffer_find_meta(b->outbuf, this->type.meta.Header);
|
||||
b->rb = spa_buffer_find_meta(b->outbuf, this->type.meta.Ringbuffer);
|
||||
|
||||
type = buffers[i]->datas[0].type;
|
||||
if ((type == this->type.data.MemFd ||
|
||||
|
|
|
|||
|
|
@ -343,56 +343,40 @@ pull_frames(struct state *state,
|
|||
try_pull(state, frames, do_pull);
|
||||
|
||||
while (!spa_list_is_empty(&state->ready) && to_write > 0) {
|
||||
uint8_t *src, *dst;
|
||||
size_t n_bytes, n_frames, size;
|
||||
off_t offs;
|
||||
uint8_t *dst;
|
||||
size_t n_bytes, n_frames;
|
||||
struct buffer *b;
|
||||
bool reuse = false;
|
||||
struct spa_data *d;
|
||||
struct spa_ringbuffer *ringbuffer;
|
||||
uint32_t index;
|
||||
int32_t avail;
|
||||
|
||||
b = spa_list_first(&state->ready, struct buffer, link);
|
||||
d = b->outbuf->datas;
|
||||
|
||||
dst = SPA_MEMBER(my_areas[0].addr, offset * state->frame_size, uint8_t);
|
||||
|
||||
if (b->rb) {
|
||||
struct spa_ringbuffer *ringbuffer = &b->rb->ringbuffer;
|
||||
uint32_t index;
|
||||
int32_t avail;
|
||||
ringbuffer = &d[0].chunk->area;
|
||||
|
||||
avail = spa_ringbuffer_get_read_index(ringbuffer, &index);
|
||||
avail /= state->frame_size;
|
||||
avail = spa_ringbuffer_get_read_index(ringbuffer, &index);
|
||||
avail /= state->frame_size;
|
||||
|
||||
n_frames = SPA_MIN(avail, to_write);
|
||||
n_bytes = n_frames * state->frame_size;
|
||||
n_frames = SPA_MIN(avail, to_write);
|
||||
n_bytes = n_frames * state->frame_size;
|
||||
|
||||
spa_ringbuffer_read_data(ringbuffer, d[0].data, index % ringbuffer->size, dst, n_bytes);
|
||||
spa_ringbuffer_read_data(ringbuffer, d[0].data, d[0].maxsize,
|
||||
index % d[0].maxsize, dst, n_bytes);
|
||||
spa_ringbuffer_read_update(ringbuffer, index + n_bytes);
|
||||
|
||||
spa_ringbuffer_read_update(ringbuffer, index + n_bytes);
|
||||
|
||||
reuse = avail == n_frames || state->n_buffers == 1;
|
||||
} else {
|
||||
offs = SPA_MIN(d[0].chunk->offset + state->ready_offset, d[0].maxsize);
|
||||
size = SPA_MIN(d[0].chunk->size + offs, d[0].maxsize) - offs;
|
||||
src = SPA_MEMBER(d[0].data, offs, uint8_t);
|
||||
|
||||
n_bytes = SPA_MIN(size, to_write * state->frame_size);
|
||||
n_frames = SPA_MIN(to_write, n_bytes / state->frame_size);
|
||||
|
||||
memcpy(dst, src, n_bytes);
|
||||
|
||||
state->ready_offset += n_bytes;
|
||||
reuse = (state->ready_offset >= size);
|
||||
}
|
||||
if (reuse) {
|
||||
if (avail == n_frames || state->n_buffers == 1) {
|
||||
spa_list_remove(&b->link);
|
||||
b->outstanding = true;
|
||||
spa_log_trace(state->log, "alsa-util %p: reuse buffer %u", state, b->outbuf->id);
|
||||
state->callbacks->reuse_buffer(state->callbacks_data, 0, b->outbuf->id);
|
||||
state->ready_offset = 0;
|
||||
}
|
||||
total_frames += n_frames;
|
||||
to_write -= n_frames;
|
||||
|
||||
spa_log_trace(state->log, "alsa-util %p: written %lu frames, left %ld", state, total_frames, to_write);
|
||||
}
|
||||
|
||||
|
|
@ -430,6 +414,8 @@ push_frames(struct state *state,
|
|||
size_t n_bytes;
|
||||
struct buffer *b;
|
||||
struct spa_data *d;
|
||||
uint32_t index, avail;
|
||||
int32_t filled;
|
||||
|
||||
b = spa_list_first(&state->free, struct buffer, link);
|
||||
spa_list_remove(&b->link);
|
||||
|
|
@ -442,15 +428,18 @@ push_frames(struct state *state,
|
|||
|
||||
d = b->outbuf->datas;
|
||||
|
||||
total_frames = SPA_MIN(frames, d[0].maxsize / state->frame_size);
|
||||
src = SPA_MEMBER(my_areas[0].addr, offset * state->frame_size, uint8_t);
|
||||
|
||||
filled = spa_ringbuffer_get_write_index(&d[0].chunk->area, &index);
|
||||
avail = (d[0].maxsize - filled) / state->frame_size;
|
||||
total_frames = SPA_MIN(avail, frames);
|
||||
n_bytes = total_frames * state->frame_size;
|
||||
|
||||
memcpy(d[0].data, src, n_bytes);
|
||||
spa_ringbuffer_write_data(&d[0].chunk->area, d[0].data, d[0].maxsize,
|
||||
index % d[0].maxsize, src, n_bytes);
|
||||
|
||||
d[0].chunk->offset = 0;
|
||||
d[0].chunk->size = n_bytes;
|
||||
d[0].chunk->stride = 0;
|
||||
spa_ringbuffer_write_update(&d[0].chunk->area, index + n_bytes);
|
||||
d[0].chunk->stride = state->frame_size;
|
||||
|
||||
b->outstanding = true;
|
||||
io->buffer_id = b->outbuf->id;
|
||||
|
|
|
|||
|
|
@ -53,7 +53,6 @@ struct props {
|
|||
struct buffer {
|
||||
struct spa_buffer *outbuf;
|
||||
struct spa_meta_header *h;
|
||||
struct spa_meta_ringbuffer *rb;
|
||||
bool outstanding;
|
||||
struct spa_list link;
|
||||
};
|
||||
|
|
@ -151,8 +150,6 @@ struct state {
|
|||
struct spa_list free;
|
||||
struct spa_list ready;
|
||||
|
||||
size_t ready_offset;
|
||||
|
||||
bool started;
|
||||
struct spa_source source;
|
||||
int timerfd;
|
||||
|
|
|
|||
|
|
@ -43,12 +43,8 @@ struct buffer {
|
|||
bool outstanding;
|
||||
|
||||
struct spa_buffer *outbuf;
|
||||
struct spa_ringbuffer *rb;
|
||||
|
||||
struct spa_meta_header *h;
|
||||
|
||||
bool have_ringbuffer;
|
||||
struct spa_ringbuffer ringbuffer;
|
||||
};
|
||||
|
||||
struct port {
|
||||
|
|
@ -439,8 +435,8 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
2, 16 * this->bpf,
|
||||
INT32_MAX / this->bpf,
|
||||
":", t->param_buffers.stride, "i", 0,
|
||||
":", t->param_buffers.buffers, "iru", 2,
|
||||
2, 2, MAX_BUFFERS,
|
||||
":", t->param_buffers.buffers, "iru", 1,
|
||||
2, 1, MAX_BUFFERS,
|
||||
":", t->param_buffers.align, "i", 16);
|
||||
}
|
||||
else if (id == t->param.idMeta) {
|
||||
|
|
@ -454,17 +450,6 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
":", t->param_meta.type, "I", t->meta.Header,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_header));
|
||||
break;
|
||||
case 1:
|
||||
param = spa_pod_builder_object(&b,
|
||||
id, t->param_meta.Meta,
|
||||
":", t->param_meta.type, "I", t->meta.Ringbuffer,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_ringbuffer),
|
||||
":", t->param_meta.ringbufferSize, "iru", 1024 * this->bpf,
|
||||
2, 16 * this->bpf, INT32_MAX / this->bpf,
|
||||
":", t->param_meta.ringbufferStride, "i", 0,
|
||||
":", t->param_meta.ringbufferBlocks, "i", 1,
|
||||
":", t->param_meta.ringbufferAlign, "i", 16);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -605,24 +590,12 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
for (i = 0; i < n_buffers; i++) {
|
||||
struct buffer *b;
|
||||
struct spa_data *d = buffers[i]->datas;
|
||||
struct spa_meta_ringbuffer *rb;
|
||||
|
||||
b = &port->buffers[i];
|
||||
b->outbuf = buffers[i];
|
||||
b->outstanding = (direction == SPA_DIRECTION_INPUT);
|
||||
b->h = spa_buffer_find_meta(buffers[i], t->meta.Header);
|
||||
|
||||
if ((rb = spa_buffer_find_meta(buffers[i], t->meta.Ringbuffer))) {
|
||||
b->rb = &rb->ringbuffer;
|
||||
b->have_ringbuffer = true;
|
||||
}
|
||||
else {
|
||||
b->rb = &b->ringbuffer;
|
||||
b->rb->size = d[0].maxsize;
|
||||
b->rb->mask = d[0].maxsize - 1;
|
||||
b->have_ringbuffer = false;
|
||||
}
|
||||
|
||||
if (!((d[0].type == t->data.MemPtr ||
|
||||
d[0].type == t->data.MemFd ||
|
||||
d[0].type == t->data.DmaBuf) && d[0].data != NULL)) {
|
||||
|
|
@ -715,17 +688,23 @@ add_port_data(struct impl *this, void *out, size_t outsize, size_t next, struct
|
|||
{
|
||||
size_t insize;
|
||||
struct buffer *b;
|
||||
uint32_t index = 0, offset, len1, len2;
|
||||
uint32_t index = 0, offset, len1, len2, maxsize;
|
||||
mix_func_t mix = layer == 0 ? this->copy : this->add;
|
||||
void *data;
|
||||
struct spa_ringbuffer *rb;
|
||||
|
||||
b = spa_list_first(&port->queue, struct buffer, link);
|
||||
|
||||
insize = spa_ringbuffer_get_read_index(b->rb, &index);
|
||||
maxsize = b->outbuf->datas[0].maxsize;
|
||||
data = b->outbuf->datas[0].data;
|
||||
rb = &b->outbuf->datas[0].chunk->area,
|
||||
|
||||
insize = spa_ringbuffer_get_read_index(rb, &index);
|
||||
outsize = SPA_MIN(outsize, insize);
|
||||
|
||||
offset = index % b->rb->size;
|
||||
if (offset + outsize > b->rb->size) {
|
||||
len1 = b->rb->size - offset;
|
||||
offset = index % maxsize;
|
||||
if (offset + outsize > maxsize) {
|
||||
len1 = maxsize - offset;
|
||||
len2 = outsize - len1;
|
||||
}
|
||||
else {
|
||||
|
|
@ -733,15 +712,15 @@ add_port_data(struct impl *this, void *out, size_t outsize, size_t next, struct
|
|||
len2 = 0;
|
||||
}
|
||||
|
||||
mix(out, SPA_MEMBER(b->outbuf->datas[0].data, offset, void), len1);
|
||||
mix(out, SPA_MEMBER(data, offset, void), len1);
|
||||
if (len2 > 0)
|
||||
mix(out + len1, b->outbuf->datas[0].data, len2);
|
||||
mix(out + len1, data, len2);
|
||||
|
||||
spa_ringbuffer_read_update(b->rb, index + outsize);
|
||||
spa_ringbuffer_read_update(rb, index + outsize);
|
||||
|
||||
port->queued_bytes -= outsize;
|
||||
|
||||
if (outsize == insize || (b->have_ringbuffer && next == 0)) {
|
||||
if (outsize == insize || next == 0) {
|
||||
spa_log_trace(this->log, NAME " %p: return buffer %d on port %p %zd",
|
||||
this, b->outbuf->id, port, outsize);
|
||||
port->io->buffer_id = b->outbuf->id;
|
||||
|
|
@ -761,8 +740,9 @@ static int mix_output(struct impl *this, size_t n_bytes)
|
|||
struct port *outport;
|
||||
struct spa_port_io *outio;
|
||||
struct spa_data *od;
|
||||
int32_t filled, avail;
|
||||
int32_t filled, avail, maxsize;
|
||||
uint32_t index = 0, len1, len2, offset;
|
||||
struct spa_ringbuffer *rb;
|
||||
|
||||
outport = GET_OUT_PORT(this, 0);
|
||||
outio = outport->io;
|
||||
|
|
@ -777,22 +757,18 @@ static int mix_output(struct impl *this, size_t n_bytes)
|
|||
outbuf->outstanding = true;
|
||||
|
||||
od = outbuf->outbuf->datas;
|
||||
maxsize = od[0].maxsize;
|
||||
|
||||
if (!outbuf->have_ringbuffer) {
|
||||
outbuf->rb->readindex = outbuf->rb->writeindex = 0;
|
||||
od[0].chunk->offset = 0;
|
||||
od[0].chunk->size = n_bytes;
|
||||
od[0].chunk->stride = 0;
|
||||
}
|
||||
rb = &od[0].chunk->area;
|
||||
|
||||
filled = spa_ringbuffer_get_write_index(outbuf->rb, &index);
|
||||
avail = outbuf->rb->size - filled;
|
||||
offset = index % outbuf->rb->size;
|
||||
filled = spa_ringbuffer_get_write_index(rb, &index);
|
||||
avail = maxsize - filled;
|
||||
offset = index % maxsize;
|
||||
|
||||
n_bytes = SPA_MIN(n_bytes, avail);
|
||||
|
||||
if (offset + n_bytes > outbuf->rb->size) {
|
||||
len1 = outbuf->rb->size - offset;
|
||||
if (offset + n_bytes > maxsize) {
|
||||
len1 = maxsize - offset;
|
||||
len2 = n_bytes - len1;
|
||||
}
|
||||
else {
|
||||
|
|
@ -820,7 +796,7 @@ static int mix_output(struct impl *this, size_t n_bytes)
|
|||
layer++;
|
||||
}
|
||||
|
||||
spa_ringbuffer_write_update(outbuf->rb, index + n_bytes);
|
||||
spa_ringbuffer_write_update(rb, index + n_bytes);
|
||||
|
||||
outio->buffer_id = outbuf->outbuf->id;
|
||||
outio->status = SPA_STATUS_HAVE_BUFFER;
|
||||
|
|
@ -858,6 +834,7 @@ static int impl_node_process_input(struct spa_node *node)
|
|||
inio->status == SPA_STATUS_HAVE_BUFFER && inio->buffer_id < inport->n_buffers) {
|
||||
struct buffer *b = &inport->buffers[inio->buffer_id];
|
||||
uint32_t index;
|
||||
struct spa_ringbuffer *rb;
|
||||
|
||||
if (!b->outstanding) {
|
||||
spa_log_warn(this->log, NAME " %p: buffer %u in use", this,
|
||||
|
|
@ -872,11 +849,8 @@ static int impl_node_process_input(struct spa_node *node)
|
|||
|
||||
spa_list_append(&inport->queue, &b->link);
|
||||
|
||||
if (!b->have_ringbuffer) {
|
||||
b->rb->readindex = 0;
|
||||
b->rb->writeindex = b->outbuf->datas[0].chunk->size;
|
||||
}
|
||||
inport->queued_bytes += spa_ringbuffer_get_read_index(b->rb, &index);
|
||||
rb = &b->outbuf->datas[0].chunk->area;
|
||||
inport->queued_bytes += spa_ringbuffer_get_read_index(rb, &index);
|
||||
|
||||
spa_log_trace(this->log, NAME " %p: queue buffer %d on port %d %zd %zd",
|
||||
this, b->outbuf->id, i, inport->queued_bytes, min_queued);
|
||||
|
|
|
|||
|
|
@ -105,7 +105,6 @@ struct buffer {
|
|||
struct spa_buffer *outbuf;
|
||||
bool outstanding;
|
||||
struct spa_meta_header *h;
|
||||
struct spa_meta_ringbuffer *rb;
|
||||
struct spa_list link;
|
||||
};
|
||||
|
||||
|
|
@ -298,6 +297,12 @@ static int make_buffer(struct impl *this)
|
|||
struct buffer *b;
|
||||
struct spa_port_io *io = this->io;
|
||||
int n_bytes, n_samples;
|
||||
uint32_t maxsize;
|
||||
void *data;
|
||||
struct spa_ringbuffer *rb;
|
||||
struct spa_data *d;
|
||||
int32_t filled, avail;
|
||||
uint32_t index, offset, l0, l1;
|
||||
|
||||
read_timer(this);
|
||||
|
||||
|
|
@ -310,7 +315,11 @@ static int make_buffer(struct impl *this)
|
|||
spa_list_remove(&b->link);
|
||||
b->outstanding = true;
|
||||
|
||||
n_bytes = b->outbuf->datas[0].maxsize;
|
||||
d = b->outbuf->datas;
|
||||
maxsize = d[0].maxsize;
|
||||
data = d[0].data;
|
||||
|
||||
n_bytes = maxsize;
|
||||
if (io->range.min_size != 0) {
|
||||
n_bytes = SPA_MIN(n_bytes, io->range.min_size);
|
||||
if (io->range.max_size < n_bytes)
|
||||
|
|
@ -318,41 +327,32 @@ static int make_buffer(struct impl *this)
|
|||
}
|
||||
|
||||
spa_log_trace(this->log, NAME " %p: dequeue buffer %d %d %d", this, b->outbuf->id,
|
||||
b->outbuf->datas[0].maxsize, n_bytes);
|
||||
maxsize, n_bytes);
|
||||
|
||||
if (b->rb) {
|
||||
int32_t filled, avail;
|
||||
uint32_t index, offset, l0, l1;
|
||||
rb = &d[0].chunk->area;
|
||||
|
||||
filled = spa_ringbuffer_get_write_index(&b->rb->ringbuffer, &index);
|
||||
avail = b->rb->ringbuffer.size - filled;
|
||||
n_bytes = SPA_MIN(avail, n_bytes);
|
||||
filled = spa_ringbuffer_get_write_index(rb, &index);
|
||||
avail = maxsize - filled;
|
||||
n_bytes = SPA_MIN(avail, n_bytes);
|
||||
|
||||
n_samples = n_bytes / this->bpf;
|
||||
n_samples = n_bytes / this->bpf;
|
||||
|
||||
offset = index & b->rb->ringbuffer.mask;
|
||||
offset = index % maxsize;
|
||||
|
||||
if (offset + n_bytes > b->rb->ringbuffer.size) {
|
||||
l0 = (b->rb->ringbuffer.size - offset) / this->bpf;
|
||||
l1 = n_samples - l0;
|
||||
}
|
||||
else {
|
||||
l0 = n_samples;
|
||||
l1 = 0;
|
||||
}
|
||||
|
||||
this->render_func(this, SPA_MEMBER(b->outbuf->datas[0].data, offset, void), l0);
|
||||
if (l1)
|
||||
this->render_func(this, b->outbuf->datas[0].data, l1);
|
||||
|
||||
spa_ringbuffer_write_update(&b->rb->ringbuffer, index + n_bytes);
|
||||
} else {
|
||||
n_samples = n_bytes / this->bpf;
|
||||
this->render_func(this, b->outbuf->datas[0].data, n_samples);
|
||||
b->outbuf->datas[0].chunk->size = n_bytes;
|
||||
b->outbuf->datas[0].chunk->offset = 0;
|
||||
b->outbuf->datas[0].chunk->stride = 0;
|
||||
if (offset + n_bytes > maxsize) {
|
||||
l0 = (maxsize - offset) / this->bpf;
|
||||
l1 = n_samples - l0;
|
||||
}
|
||||
else {
|
||||
l0 = n_samples;
|
||||
l1 = 0;
|
||||
}
|
||||
|
||||
this->render_func(this, SPA_MEMBER(data, offset, void), l0);
|
||||
if (l1)
|
||||
this->render_func(this, data, l1);
|
||||
|
||||
spa_ringbuffer_write_update(rb, index + n_bytes);
|
||||
|
||||
if (b->h) {
|
||||
b->h->seq = this->sample_count;
|
||||
|
|
@ -628,7 +628,7 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
2, 16 * this->bpf,
|
||||
INT32_MAX / this->bpf,
|
||||
":", t->param_buffers.stride, "i", 0,
|
||||
":", t->param_buffers.buffers, "iru", 2,
|
||||
":", t->param_buffers.buffers, "iru", 1,
|
||||
2, 1, 32,
|
||||
":", t->param_buffers.align, "i", 16);
|
||||
}
|
||||
|
|
@ -643,17 +643,6 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
":", t->param_meta.type, "I", t->meta.Header,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_header));
|
||||
break;
|
||||
case 1:
|
||||
param = spa_pod_builder_object(&b,
|
||||
id, t->param_meta.Meta,
|
||||
":", t->param_meta.type, "I", t->meta.Ringbuffer,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_ringbuffer),
|
||||
":", t->param_meta.ringbufferSize, "ir", 5512 * this->bpf,
|
||||
2, 16 * this->bpf, INT32_MAX / this->bpf,
|
||||
":", t->param_meta.ringbufferStride, "i", 0,
|
||||
":", t->param_meta.ringbufferBlocks, "i", 1,
|
||||
":", t->param_meta.ringbufferAlign, "i", 16);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -783,7 +772,6 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
b->outbuf = buffers[i];
|
||||
b->outstanding = false;
|
||||
b->h = spa_buffer_find_meta(buffers[i], this->type.meta.Header);
|
||||
b->rb = spa_buffer_find_meta(buffers[i], this->type.meta.Ringbuffer);
|
||||
|
||||
if ((d[0].type == this->type.data.MemPtr ||
|
||||
d[0].type == this->type.data.MemFd ||
|
||||
|
|
|
|||
|
|
@ -86,8 +86,8 @@ impl_log_logv(struct spa_log *log,
|
|||
uint64_t count = 1;
|
||||
|
||||
spa_ringbuffer_get_write_index(&impl->trace_rb, &index);
|
||||
spa_ringbuffer_write_data(&impl->trace_rb, impl->trace_data,
|
||||
index & impl->trace_rb.mask, location, size);
|
||||
spa_ringbuffer_write_data(&impl->trace_rb, impl->trace_data, TRACE_BUFFER,
|
||||
index & (TRACE_BUFFER - 1), location, size);
|
||||
spa_ringbuffer_write_update(&impl->trace_rb, index + size);
|
||||
|
||||
if (write(impl->source.fd, &count, sizeof(uint64_t)) != sizeof(uint64_t))
|
||||
|
|
@ -124,12 +124,12 @@ static void on_trace_event(struct spa_source *source)
|
|||
while ((avail = spa_ringbuffer_get_read_index(&impl->trace_rb, &index)) > 0) {
|
||||
uint32_t offset, first;
|
||||
|
||||
if (avail > impl->trace_rb.size) {
|
||||
index += avail - impl->trace_rb.size;
|
||||
avail = impl->trace_rb.size;
|
||||
if (avail > TRACE_BUFFER) {
|
||||
index += avail - TRACE_BUFFER;
|
||||
avail = TRACE_BUFFER;
|
||||
}
|
||||
offset = index & impl->trace_rb.mask;
|
||||
first = SPA_MIN(avail, impl->trace_rb.size - offset);
|
||||
offset = index & (TRACE_BUFFER - 1);
|
||||
first = SPA_MIN(avail, TRACE_BUFFER - offset);
|
||||
|
||||
fwrite(impl->trace_data + offset, first, 1, stderr);
|
||||
if (SPA_UNLIKELY(avail > first)) {
|
||||
|
|
@ -223,7 +223,7 @@ impl_init(const struct spa_handle_factory *factory,
|
|||
this->have_source = true;
|
||||
}
|
||||
|
||||
spa_ringbuffer_init(&this->trace_rb, TRACE_BUFFER);
|
||||
spa_ringbuffer_init(&this->trace_rb);
|
||||
|
||||
spa_log_debug(&this->log, NAME " %p: initialized", this);
|
||||
|
||||
|
|
|
|||
|
|
@ -212,18 +212,18 @@ loop_invoke(struct spa_loop *loop,
|
|||
uint32_t idx, offset, l0;
|
||||
|
||||
filled = spa_ringbuffer_get_write_index(&impl->buffer, &idx);
|
||||
if (filled < 0 || filled > impl->buffer.size) {
|
||||
if (filled < 0 || filled > DATAS_SIZE) {
|
||||
spa_log_warn(impl->log, NAME " %p: queue xrun %d", impl, filled);
|
||||
return -EPIPE;
|
||||
}
|
||||
avail = impl->buffer.size - filled;
|
||||
avail = DATAS_SIZE - filled;
|
||||
if (avail < sizeof(struct invoke_item)) {
|
||||
spa_log_warn(impl->log, NAME " %p: queue full %d", impl, avail);
|
||||
return -EPIPE;
|
||||
}
|
||||
offset = idx & impl->buffer.mask;
|
||||
offset = idx & (DATAS_SIZE - 1);
|
||||
|
||||
l0 = impl->buffer.size - offset;
|
||||
l0 = DATAS_SIZE - offset;
|
||||
|
||||
item = SPA_MEMBER(impl->buffer_data, offset, struct invoke_item);
|
||||
item->func = func;
|
||||
|
|
@ -272,7 +272,7 @@ static void wakeup_func(void *data, uint64_t count)
|
|||
|
||||
while (spa_ringbuffer_get_read_index(&impl->buffer, &index) > 0) {
|
||||
struct invoke_item *item =
|
||||
SPA_MEMBER(impl->buffer_data, index & impl->buffer.mask, struct invoke_item);
|
||||
SPA_MEMBER(impl->buffer_data, index & (DATAS_SIZE - 1), struct invoke_item);
|
||||
item->res = item->func(&impl->loop, true, item->seq, item->size, item->data,
|
||||
item->user_data);
|
||||
spa_ringbuffer_read_update(&impl->buffer, index + item->item_size);
|
||||
|
|
@ -737,7 +737,7 @@ impl_init(const struct spa_handle_factory *factory,
|
|||
spa_list_init(&impl->destroy_list);
|
||||
spa_hook_list_init(&impl->hooks_list);
|
||||
|
||||
spa_ringbuffer_init(&impl->buffer, DATAS_SIZE);
|
||||
spa_ringbuffer_init(&impl->buffer);
|
||||
|
||||
impl->wakeup = spa_loop_utils_add_event(&impl->utils, wakeup_func, impl);
|
||||
impl->ack_fd = eventfd(0, EFD_CLOEXEC);
|
||||
|
|
|
|||
|
|
@ -267,8 +267,7 @@ static int consume_buffer(struct impl *this)
|
|||
|
||||
render_buffer(this, b);
|
||||
|
||||
b->outbuf->datas[0].chunk->offset = 0;
|
||||
b->outbuf->datas[0].chunk->size = n_bytes;
|
||||
spa_ringbuffer_set_avail(&b->outbuf->datas[0].chunk->area, n_bytes);
|
||||
b->outbuf->datas[0].chunk->stride = n_bytes;
|
||||
|
||||
if (b->h) {
|
||||
|
|
|
|||
|
|
@ -279,8 +279,7 @@ static int make_buffer(struct impl *this)
|
|||
|
||||
fill_buffer(this, b);
|
||||
|
||||
b->outbuf->datas[0].chunk->offset = 0;
|
||||
b->outbuf->datas[0].chunk->size = n_bytes;
|
||||
spa_ringbuffer_set_avail(&b->outbuf->datas[0].chunk->area, n_bytes);
|
||||
b->outbuf->datas[0].chunk->stride = n_bytes;
|
||||
|
||||
if (b->h) {
|
||||
|
|
|
|||
|
|
@ -930,8 +930,7 @@ static int mmap_read(struct impl *this)
|
|||
}
|
||||
|
||||
d = b->outbuf->datas;
|
||||
d[0].chunk->offset = 0;
|
||||
d[0].chunk->size = buf.bytesused;
|
||||
spa_ringbuffer_set_avail(&d[0].chunk->area, buf.bytesused);
|
||||
d[0].chunk->stride = port->fmt.fmt.pix.bytesperline;
|
||||
|
||||
b->outstanding = true;
|
||||
|
|
@ -1091,8 +1090,7 @@ mmap_init(struct impl *this,
|
|||
d = buffers[i]->datas;
|
||||
d[0].mapoffset = 0;
|
||||
d[0].maxsize = b->v4l2_buffer.length;
|
||||
d[0].chunk->offset = 0;
|
||||
d[0].chunk->size = b->v4l2_buffer.length;
|
||||
spa_ringbuffer_set_avail(&d[0].chunk->area, 0);
|
||||
d[0].chunk->stride = state->fmt.fmt.pix.bytesperline;
|
||||
|
||||
if (state->export_buf) {
|
||||
|
|
|
|||
|
|
@ -275,7 +275,7 @@ static int make_buffer(struct impl *this)
|
|||
{
|
||||
struct buffer *b;
|
||||
struct spa_port_io *io = this->io;
|
||||
int n_bytes;
|
||||
uint32_t n_bytes;
|
||||
|
||||
read_timer(this);
|
||||
|
||||
|
|
@ -294,8 +294,7 @@ static int make_buffer(struct impl *this)
|
|||
|
||||
fill_buffer(this, b);
|
||||
|
||||
b->outbuf->datas[0].chunk->offset = 0;
|
||||
b->outbuf->datas[0].chunk->size = n_bytes;
|
||||
spa_ringbuffer_set_avail(&b->outbuf->datas[0].chunk->area, n_bytes);
|
||||
b->outbuf->datas[0].chunk->stride = this->stride;
|
||||
|
||||
if (b->h) {
|
||||
|
|
|
|||
|
|
@ -703,10 +703,10 @@ static void do_volume(struct impl *this, struct spa_buffer *dbuf, struct spa_buf
|
|||
sd = &sbuf->datas[si];
|
||||
dd = &dbuf->datas[di];
|
||||
|
||||
src = (int16_t *) ((uint8_t *) sd->data + sd->chunk->offset + soff);
|
||||
src = (int16_t *) ((uint8_t *) sd->data + sd->chunk->area.readindex + soff);
|
||||
dst = (int16_t *) ((uint8_t *) dd->data + doff);
|
||||
|
||||
n_bytes = SPA_MIN(sd->chunk->size - soff, dd->maxsize - doff);
|
||||
n_bytes = SPA_MIN(sd->chunk->area.writeindex - soff, dd->maxsize - doff);
|
||||
n_samples = n_bytes / sizeof(uint16_t);
|
||||
|
||||
for (i = 0; i < n_samples; i++)
|
||||
|
|
@ -715,10 +715,9 @@ static void do_volume(struct impl *this, struct spa_buffer *dbuf, struct spa_buf
|
|||
soff += n_bytes;
|
||||
doff += n_bytes;
|
||||
|
||||
dd->chunk->offset = 0;
|
||||
dd->chunk->size = doff;
|
||||
spa_ringbuffer_set_avail(&dd->chunk->area, doff);
|
||||
|
||||
if (soff >= sd->chunk->size) {
|
||||
if (soff >= sd->chunk->area.writeindex) {
|
||||
si++;
|
||||
soff = 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@
|
|||
#define MAX_VALUE 0x10000
|
||||
|
||||
struct spa_ringbuffer rb;
|
||||
uint32_t size;
|
||||
uint8_t *data;
|
||||
|
||||
static int fill_int_array(int *array, int start, int count)
|
||||
|
|
@ -47,7 +48,7 @@ static void *reader_start(void *arg)
|
|||
uint32_t index;
|
||||
|
||||
if (spa_ringbuffer_get_read_index(&rb, &index) >= ARRAY_SIZE * sizeof(int)) {
|
||||
spa_ringbuffer_read_data(&rb, data, index & rb.mask, b,
|
||||
spa_ringbuffer_read_data(&rb, data, size, index & (size - 1), b,
|
||||
ARRAY_SIZE * sizeof(int));
|
||||
|
||||
if (!cmp_array(a, b, ARRAY_SIZE)) {
|
||||
|
|
@ -78,7 +79,7 @@ static void *writer_start(void *arg)
|
|||
uint32_t index;
|
||||
|
||||
if (spa_ringbuffer_get_write_index(&rb, &index) >= ARRAY_SIZE * sizeof(int)) {
|
||||
spa_ringbuffer_write_data(&rb, data, index & rb.mask, a,
|
||||
spa_ringbuffer_write_data(&rb, data, size, index & (size - 1), a,
|
||||
ARRAY_SIZE * sizeof(int));
|
||||
spa_ringbuffer_write_update(&rb, index + ARRAY_SIZE * sizeof(int));
|
||||
|
||||
|
|
@ -91,8 +92,6 @@ static void *writer_start(void *arg)
|
|||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int size;
|
||||
|
||||
printf("starting ringbuffer stress test\n");
|
||||
|
||||
sscanf(argv[1], "%d", &size);
|
||||
|
|
@ -100,7 +99,7 @@ int main(int argc, char *argv[])
|
|||
printf("buffer size (bytes): %d\n", size);
|
||||
printf("array size (bytes): %ld\n", sizeof(int) * ARRAY_SIZE);
|
||||
|
||||
spa_ringbuffer_init(&rb, size);
|
||||
spa_ringbuffer_init(&rb);
|
||||
data = malloc(size);
|
||||
|
||||
pthread_t reader_thread, writer_thread;
|
||||
|
|
|
|||
|
|
@ -171,8 +171,7 @@ init_buffer(struct data *data, struct spa_buffer **bufs, struct buffer *ba, int
|
|||
b->datas[0].maxsize = size;
|
||||
b->datas[0].data = malloc(size);
|
||||
b->datas[0].chunk = &b->chunks[0];
|
||||
b->datas[0].chunk->offset = 0;
|
||||
b->datas[0].chunk->size = size;
|
||||
spa_ringbuffer_set_avail(&b->datas[0].chunk->area, size);
|
||||
b->datas[0].chunk->stride = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -182,8 +182,7 @@ init_buffer(struct data *data, struct spa_buffer **bufs, struct buffer *ba, int
|
|||
b->datas[0].maxsize = size;
|
||||
b->datas[0].data = malloc(size);
|
||||
b->datas[0].chunk = &b->chunks[0];
|
||||
b->datas[0].chunk->offset = 0;
|
||||
b->datas[0].chunk->size = size;
|
||||
spa_ringbuffer_init(&b->datas[0].chunk->area);
|
||||
b->datas[0].chunk->stride = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -167,8 +167,7 @@ init_buffer(struct data *data, struct spa_buffer **bufs, struct buffer *ba, int
|
|||
b->datas[0].maxsize = size;
|
||||
b->datas[0].data = malloc(size);
|
||||
b->datas[0].chunk = &b->chunks[0];
|
||||
b->datas[0].chunk->offset = 0;
|
||||
b->datas[0].chunk->size = size;
|
||||
spa_ringbuffer_set_avail(&b->datas[0].chunk->area, size);
|
||||
b->datas[0].chunk->stride = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -85,7 +85,6 @@ struct buffer {
|
|||
struct spa_buffer buffer;
|
||||
struct spa_meta metas[2];
|
||||
struct spa_meta_header header;
|
||||
struct spa_meta_ringbuffer rb;
|
||||
struct spa_data datas[1];
|
||||
struct spa_chunk chunks[1];
|
||||
};
|
||||
|
|
@ -143,11 +142,6 @@ init_buffer(struct data *data, struct spa_buffer **bufs, struct buffer *ba, int
|
|||
b->metas[0].data = &b->header;
|
||||
b->metas[0].size = sizeof(b->header);
|
||||
|
||||
spa_ringbuffer_init(&b->rb.ringbuffer, size);
|
||||
b->metas[1].type = data->type.meta.Ringbuffer;
|
||||
b->metas[1].data = &b->rb;
|
||||
b->metas[1].size = sizeof(b->rb);
|
||||
|
||||
b->datas[0].type = data->type.data.MemPtr;
|
||||
b->datas[0].flags = 0;
|
||||
b->datas[0].fd = -1;
|
||||
|
|
@ -155,8 +149,7 @@ init_buffer(struct data *data, struct spa_buffer **bufs, struct buffer *ba, int
|
|||
b->datas[0].maxsize = size;
|
||||
b->datas[0].data = malloc(size);
|
||||
b->datas[0].chunk = &b->chunks[0];
|
||||
b->datas[0].chunk->offset = 0;
|
||||
b->datas[0].chunk->size = size;
|
||||
spa_ringbuffer_set_avail(&b->datas[0].chunk->area, 0);
|
||||
b->datas[0].chunk->stride = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -235,8 +235,7 @@ static void on_source_have_output(void *_data)
|
|||
datas[0].mapoffset = 0;
|
||||
datas[0].maxsize = sstride * 240;
|
||||
datas[0].data = sdata;
|
||||
datas[0].chunk->offset = 0;
|
||||
datas[0].chunk->size = sstride * 240;
|
||||
spa_ringbuffer_set_avail(&datas[0].chunk->area, sstride * 240);
|
||||
datas[0].chunk->stride = sstride;
|
||||
} else {
|
||||
if (SDL_LockTexture(data->texture, NULL, &ddata, &dstride) < 0) {
|
||||
|
|
@ -376,8 +375,7 @@ static int alloc_buffers(struct data *data)
|
|||
b->datas[0].maxsize = stride * 240;
|
||||
b->datas[0].data = ptr;
|
||||
b->datas[0].chunk = &b->chunks[0];
|
||||
b->datas[0].chunk->offset = 0;
|
||||
b->datas[0].chunk->size = stride * 240;
|
||||
spa_ringbuffer_set_avail(&b->datas[0].chunk->area, stride * 240);
|
||||
b->datas[0].chunk->stride = stride;
|
||||
}
|
||||
data->n_buffers = MAX_BUFFERS;
|
||||
|
|
|
|||
|
|
@ -60,7 +60,6 @@ struct buffer {
|
|||
struct spa_list link;
|
||||
void *ptr;
|
||||
bool mapped;
|
||||
struct spa_meta_ringbuffer *rb;
|
||||
};
|
||||
|
||||
struct data {
|
||||
|
|
@ -245,8 +244,8 @@ static int impl_port_enum_params(struct spa_node *node,
|
|||
":", t->param_buffers.size, "iru", 1024,
|
||||
2, 32, 4096,
|
||||
":", t->param_buffers.stride, "i", 0,
|
||||
":", t->param_buffers.buffers, "iru", 2,
|
||||
2, 2, 32,
|
||||
":", t->param_buffers.buffers, "iru", 1,
|
||||
2, 1, 32,
|
||||
":", t->param_buffers.align, "i", 16);
|
||||
}
|
||||
else if (id == t->param.idMeta) {
|
||||
|
|
@ -257,17 +256,6 @@ static int impl_port_enum_params(struct spa_node *node,
|
|||
":", t->param_meta.type, "I", t->meta.Header,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_header));
|
||||
break;
|
||||
case 1:
|
||||
param = spa_pod_builder_object(builder,
|
||||
id, t->param_meta.Meta,
|
||||
":", t->param_meta.type, "I", t->meta.Ringbuffer,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_ringbuffer),
|
||||
":", t->param_meta.ringbufferSize, "ir", 1024 * 4,
|
||||
2, 16 * 4, INT32_MAX / 4,
|
||||
":", t->param_meta.ringbufferStride, "i", 0,
|
||||
":", t->param_meta.ringbufferBlocks, "i", 1,
|
||||
":", t->param_meta.ringbufferAlign, "i", 16);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -348,7 +336,6 @@ static int impl_port_use_buffers(struct spa_node *node, enum spa_direction direc
|
|||
return -EINVAL;
|
||||
}
|
||||
b->buffer = buffers[i];
|
||||
b->rb = spa_buffer_find_meta(buffers[i], d->type.meta.Ringbuffer);
|
||||
pw_log_info("got buffer %d size %d", i, datas[0].maxsize);
|
||||
spa_list_append(&d->empty, &b->link);
|
||||
}
|
||||
|
|
@ -376,7 +363,9 @@ static int impl_node_process_output(struct spa_node *node)
|
|||
int i, c, n_samples, avail;
|
||||
int16_t *dst;
|
||||
struct spa_port_io *io = d->io;
|
||||
uint32_t index = 0;
|
||||
uint32_t maxsize, index = 0;
|
||||
struct spa_ringbuffer *rb;
|
||||
uint32_t filled, offset;
|
||||
|
||||
if (io->buffer_id < d->n_buffers) {
|
||||
reuse_buffer(d, io->buffer_id);
|
||||
|
|
@ -389,22 +378,17 @@ static int impl_node_process_output(struct spa_node *node)
|
|||
b = spa_list_first(&d->empty, struct buffer, link);
|
||||
spa_list_remove(&b->link);
|
||||
|
||||
if (b->rb) {
|
||||
uint32_t filled, offset;
|
||||
maxsize = b->buffer->datas[0].maxsize;
|
||||
rb = &b->buffer->datas[0].chunk->area;
|
||||
|
||||
filled = spa_ringbuffer_get_write_index(&b->rb->ringbuffer, &index);
|
||||
avail = b->rb->ringbuffer.size - filled;
|
||||
offset = index % b->rb->ringbuffer.size;
|
||||
filled = spa_ringbuffer_get_write_index(rb, &index);
|
||||
avail = maxsize - filled;
|
||||
offset = index % maxsize;
|
||||
|
||||
if (offset + avail > b->rb->ringbuffer.size)
|
||||
avail = b->rb->ringbuffer.size - offset;
|
||||
if (offset + avail > maxsize)
|
||||
avail = maxsize - offset;
|
||||
|
||||
dst = SPA_MEMBER(b->ptr, offset, void);
|
||||
}
|
||||
else {
|
||||
dst = b->ptr;
|
||||
avail = b->buffer->datas[0].maxsize;
|
||||
}
|
||||
dst = SPA_MEMBER(b->ptr, offset, void);
|
||||
n_samples = avail / (sizeof(int16_t) * d->format.channels);
|
||||
|
||||
for (i = 0; i < n_samples; i++) {
|
||||
|
|
@ -420,14 +404,7 @@ static int impl_node_process_output(struct spa_node *node)
|
|||
*dst++ = val;
|
||||
}
|
||||
|
||||
if (b->rb) {
|
||||
spa_ringbuffer_write_update(&b->rb->ringbuffer, index + avail);
|
||||
}
|
||||
else {
|
||||
b->buffer->datas[0].chunk->offset = 0;
|
||||
b->buffer->datas[0].chunk->size = avail;
|
||||
b->buffer->datas[0].chunk->stride = 0;
|
||||
}
|
||||
spa_ringbuffer_write_update(rb, index + avail);
|
||||
|
||||
io->buffer_id = b->buffer->id;
|
||||
io->status = SPA_STATUS_HAVE_BUFFER;
|
||||
|
|
|
|||
|
|
@ -94,8 +94,8 @@ static void transport_reset_area(struct pw_client_node_transport *trans)
|
|||
trans->outputs[i].status = SPA_STATUS_OK;
|
||||
trans->outputs[i].buffer_id = SPA_ID_INVALID;
|
||||
}
|
||||
spa_ringbuffer_init(trans->input_buffer, INPUT_BUFFER_SIZE);
|
||||
spa_ringbuffer_init(trans->output_buffer, OUTPUT_BUFFER_SIZE);
|
||||
spa_ringbuffer_init(trans->input_buffer);
|
||||
spa_ringbuffer_init(trans->output_buffer);
|
||||
}
|
||||
|
||||
static void destroy(struct pw_client_node_transport *trans)
|
||||
|
|
@ -118,14 +118,14 @@ static int add_message(struct pw_client_node_transport *trans, struct pw_client_
|
|||
return -EINVAL;
|
||||
|
||||
filled = spa_ringbuffer_get_write_index(trans->output_buffer, &index);
|
||||
avail = trans->output_buffer->size - filled;
|
||||
avail = OUTPUT_BUFFER_SIZE - filled;
|
||||
size = SPA_POD_SIZE(message);
|
||||
if (avail < size)
|
||||
return -ENOSPC;
|
||||
|
||||
spa_ringbuffer_write_data(trans->output_buffer,
|
||||
trans->output_data,
|
||||
index & trans->output_buffer->mask, message, size);
|
||||
trans->output_data, OUTPUT_BUFFER_SIZE,
|
||||
index & (OUTPUT_BUFFER_SIZE - 1), message, size);
|
||||
spa_ringbuffer_write_update(trans->output_buffer, index + size);
|
||||
|
||||
return 0;
|
||||
|
|
@ -144,8 +144,8 @@ static int next_message(struct pw_client_node_transport *trans, struct pw_client
|
|||
return 0;
|
||||
|
||||
spa_ringbuffer_read_data(trans->input_buffer,
|
||||
trans->input_data,
|
||||
impl->current_index & trans->input_buffer->mask,
|
||||
trans->input_data, INPUT_BUFFER_SIZE,
|
||||
impl->current_index & (INPUT_BUFFER_SIZE - 1),
|
||||
&impl->current, sizeof(struct pw_client_node_message));
|
||||
|
||||
*message = impl->current;
|
||||
|
|
@ -164,8 +164,8 @@ static int parse_message(struct pw_client_node_transport *trans, void *message)
|
|||
size = SPA_POD_SIZE(&impl->current);
|
||||
|
||||
spa_ringbuffer_read_data(trans->input_buffer,
|
||||
trans->input_data,
|
||||
impl->current_index & trans->input_buffer->mask, message, size);
|
||||
trans->input_data, INPUT_BUFFER_SIZE,
|
||||
impl->current_index & (INPUT_BUFFER_SIZE - 1), message, size);
|
||||
spa_ringbuffer_read_update(trans->input_buffer, impl->current_index + size);
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -304,7 +304,8 @@ static int driver_process_output(struct spa_node *node)
|
|||
op++;
|
||||
in_io->status = SPA_STATUS_NEED_BUFFER;
|
||||
}
|
||||
out->outbuf->datas[0].chunk->size = ctrl->buffer_size * sizeof(int16_t) * 2;
|
||||
|
||||
spa_ringbuffer_set_avail(&out->outbuf->datas[0].chunk->area, ctrl->buffer_size * sizeof(int16_t) * 2);
|
||||
|
||||
spa_hook_list_call(&nd->listener_list, struct pw_jack_node_events, push);
|
||||
gn->ready[SPA_DIRECTION_INPUT] = gn->required[SPA_DIRECTION_OUTPUT] = 0;
|
||||
|
|
|
|||
|
|
@ -244,26 +244,6 @@ static struct spa_pod *find_param(struct spa_pod **params, int n_params, uint32_
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct spa_pod *find_meta(struct pw_core *core, struct spa_pod **params,
|
||||
int n_params, uint32_t type)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < n_params; i++) {
|
||||
if (spa_pod_is_object_type (params[i], core->type.param_meta.Meta)) {
|
||||
uint32_t qtype;
|
||||
|
||||
if (spa_pod_object_parse(params[i],
|
||||
":", core->type.param_meta.type, "I", &qtype, NULL) < 0)
|
||||
continue;
|
||||
|
||||
if (qtype == type)
|
||||
return params[i];
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct spa_buffer **alloc_buffers(struct pw_link *this,
|
||||
uint32_t n_buffers,
|
||||
uint32_t n_params,
|
||||
|
|
@ -357,9 +337,6 @@ static struct spa_buffer **alloc_buffers(struct pw_link *this,
|
|||
msh->fd = mem->fd;
|
||||
msh->offset = data_size * i;
|
||||
msh->size = data_size;
|
||||
} else if (m->type == this->core->type.meta.Ringbuffer) {
|
||||
struct spa_meta_ringbuffer *rb = p;
|
||||
spa_ringbuffer_init(&rb->ringbuffer, data_sizes[0]);
|
||||
}
|
||||
p += m->size;
|
||||
}
|
||||
|
|
@ -381,8 +358,7 @@ static struct spa_buffer **alloc_buffers(struct pw_link *this,
|
|||
d->mapoffset = SPA_PTRDIFF(ddp, mem->ptr);
|
||||
d->maxsize = data_sizes[j];
|
||||
d->data = SPA_MEMBER(mem->ptr, d->mapoffset, void);
|
||||
d->chunk->offset = 0;
|
||||
d->chunk->size = data_sizes[j];
|
||||
spa_ringbuffer_set_avail(&d->chunk->area, 0);
|
||||
d->chunk->stride = data_strides[j];
|
||||
ddp += data_sizes[j];
|
||||
} else {
|
||||
|
|
@ -532,43 +508,29 @@ static int do_allocation(struct pw_link *this, uint32_t in_state, uint32_t out_s
|
|||
offset += SPA_ROUND_UP_N(SPA_POD_SIZE(params[i]), 8);
|
||||
}
|
||||
|
||||
param = find_meta(this->core, params, n_params, t->meta.Ringbuffer);
|
||||
max_buffers = MAX_BUFFERS;
|
||||
minsize = stride = 0;
|
||||
param = find_param(params, n_params, t->param_buffers.Buffers);
|
||||
if (param) {
|
||||
uint32_t ms, s;
|
||||
max_buffers = 1;
|
||||
uint32_t qmax_buffers = max_buffers,
|
||||
qminsize = minsize, qstride = stride;
|
||||
|
||||
if (spa_pod_object_parse(param,
|
||||
":", t->param_meta.ringbufferSize, "i", &ms,
|
||||
":", t->param_meta.ringbufferStride, "i", &s, NULL) >= 0) {
|
||||
minsize = ms;
|
||||
stride = s;
|
||||
}
|
||||
spa_pod_object_parse(param,
|
||||
":", t->param_buffers.size, "i", &qminsize,
|
||||
":", t->param_buffers.stride, "i", &qstride,
|
||||
":", t->param_buffers.buffers, "i", &qmax_buffers, NULL);
|
||||
|
||||
max_buffers =
|
||||
qmax_buffers == 0 ? max_buffers : SPA_MIN(qmax_buffers,
|
||||
max_buffers);
|
||||
minsize = SPA_MAX(minsize, qminsize);
|
||||
stride = SPA_MAX(stride, qstride);
|
||||
|
||||
pw_log_debug("%d %d %d -> %zd %zd %d", qminsize, qstride, qmax_buffers,
|
||||
minsize, stride, max_buffers);
|
||||
} else {
|
||||
max_buffers = MAX_BUFFERS;
|
||||
minsize = stride = 0;
|
||||
param = find_param(params, n_params,
|
||||
t->param_buffers.Buffers);
|
||||
if (param) {
|
||||
uint32_t qmax_buffers = max_buffers,
|
||||
qminsize = minsize, qstride = stride;
|
||||
|
||||
spa_pod_object_parse(param,
|
||||
":", t->param_buffers.size, "i", &qminsize,
|
||||
":", t->param_buffers.stride, "i", &qstride,
|
||||
":", t->param_buffers.buffers, "i", &qmax_buffers, NULL);
|
||||
|
||||
max_buffers =
|
||||
qmax_buffers == 0 ? max_buffers : SPA_MIN(qmax_buffers,
|
||||
max_buffers);
|
||||
minsize = SPA_MAX(minsize, qminsize);
|
||||
stride = SPA_MAX(stride, qstride);
|
||||
|
||||
pw_log_debug("%d %d %d -> %zd %zd %d", qminsize, qstride, qmax_buffers,
|
||||
minsize, stride, max_buffers);
|
||||
} else {
|
||||
pw_log_warn("no buffers param");
|
||||
minsize = 1024;
|
||||
}
|
||||
pw_log_warn("no buffers param");
|
||||
minsize = 1024;
|
||||
}
|
||||
|
||||
if ((in_flags & SPA_PORT_INFO_FLAG_CAN_ALLOC_BUFFERS) ||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue