mirror of
https://gitlab.freedesktop.org/pipewire/pipewire.git
synced 2025-10-31 22:25:38 -04:00
Meta: rework ringbuffer meta
ringbuffer: remove size and mask from the ringbuffer, we have that elsewhere in the user of the ringbuffer. Remove the buffer data offset and size fields and replace with a ringbuffer. We then have a ringbuffer in all buffer data, which simplifies things. We can now remove the ringbuffer metadata.
This commit is contained in:
parent
49d8f6792e
commit
2923b623b3
27 changed files with 199 additions and 374 deletions
|
|
@ -335,12 +335,12 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
|
||||
param = spa_pod_builder_object(&b,
|
||||
id, t->param_buffers.Buffers,
|
||||
":", t->param_buffers.size, "iru", this->props.min_latency * this->frame_size,
|
||||
":", t->param_buffers.size, "iru", this->props.max_latency * this->frame_size,
|
||||
2, this->props.min_latency * this->frame_size,
|
||||
INT32_MAX,
|
||||
":", t->param_buffers.stride, "i", 0,
|
||||
":", t->param_buffers.buffers, "ir", 2,
|
||||
2, 2, MAX_BUFFERS,
|
||||
":", t->param_buffers.buffers, "ir", 1,
|
||||
2, 1, MAX_BUFFERS,
|
||||
":", t->param_buffers.align, "i", 16);
|
||||
}
|
||||
else if (id == t->param.idMeta) {
|
||||
|
|
@ -354,19 +354,6 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
":", t->param_meta.type, "I", t->meta.Header,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_header));
|
||||
break;
|
||||
case 1:
|
||||
param = spa_pod_builder_object(&b,
|
||||
id, t->param_meta.Meta,
|
||||
":", t->param_meta.type, "I", t->meta.Ringbuffer,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_ringbuffer),
|
||||
":", t->param_meta.ringbufferSize, "iru",
|
||||
this->props.max_latency * this->frame_size,
|
||||
2, this->props.min_latency * this->frame_size,
|
||||
this->period_frames * this->frame_size,
|
||||
":", t->param_meta.ringbufferStride, "i", 0,
|
||||
":", t->param_meta.ringbufferBlocks, "i", 1,
|
||||
":", t->param_meta.ringbufferAlign, "i", 16);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -491,7 +478,6 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
b->outstanding = true;
|
||||
|
||||
b->h = spa_buffer_find_meta(b->outbuf, this->type.meta.Header);
|
||||
b->rb = spa_buffer_find_meta(b->outbuf, this->type.meta.Ringbuffer);
|
||||
|
||||
type = buffers[i]->datas[0].type;
|
||||
if ((type == this->type.data.MemFd ||
|
||||
|
|
|
|||
|
|
@ -343,56 +343,40 @@ pull_frames(struct state *state,
|
|||
try_pull(state, frames, do_pull);
|
||||
|
||||
while (!spa_list_is_empty(&state->ready) && to_write > 0) {
|
||||
uint8_t *src, *dst;
|
||||
size_t n_bytes, n_frames, size;
|
||||
off_t offs;
|
||||
uint8_t *dst;
|
||||
size_t n_bytes, n_frames;
|
||||
struct buffer *b;
|
||||
bool reuse = false;
|
||||
struct spa_data *d;
|
||||
struct spa_ringbuffer *ringbuffer;
|
||||
uint32_t index;
|
||||
int32_t avail;
|
||||
|
||||
b = spa_list_first(&state->ready, struct buffer, link);
|
||||
d = b->outbuf->datas;
|
||||
|
||||
dst = SPA_MEMBER(my_areas[0].addr, offset * state->frame_size, uint8_t);
|
||||
|
||||
if (b->rb) {
|
||||
struct spa_ringbuffer *ringbuffer = &b->rb->ringbuffer;
|
||||
uint32_t index;
|
||||
int32_t avail;
|
||||
ringbuffer = &d[0].chunk->area;
|
||||
|
||||
avail = spa_ringbuffer_get_read_index(ringbuffer, &index);
|
||||
avail /= state->frame_size;
|
||||
avail = spa_ringbuffer_get_read_index(ringbuffer, &index);
|
||||
avail /= state->frame_size;
|
||||
|
||||
n_frames = SPA_MIN(avail, to_write);
|
||||
n_bytes = n_frames * state->frame_size;
|
||||
n_frames = SPA_MIN(avail, to_write);
|
||||
n_bytes = n_frames * state->frame_size;
|
||||
|
||||
spa_ringbuffer_read_data(ringbuffer, d[0].data, index % ringbuffer->size, dst, n_bytes);
|
||||
spa_ringbuffer_read_data(ringbuffer, d[0].data, d[0].maxsize,
|
||||
index % d[0].maxsize, dst, n_bytes);
|
||||
spa_ringbuffer_read_update(ringbuffer, index + n_bytes);
|
||||
|
||||
spa_ringbuffer_read_update(ringbuffer, index + n_bytes);
|
||||
|
||||
reuse = avail == n_frames || state->n_buffers == 1;
|
||||
} else {
|
||||
offs = SPA_MIN(d[0].chunk->offset + state->ready_offset, d[0].maxsize);
|
||||
size = SPA_MIN(d[0].chunk->size + offs, d[0].maxsize) - offs;
|
||||
src = SPA_MEMBER(d[0].data, offs, uint8_t);
|
||||
|
||||
n_bytes = SPA_MIN(size, to_write * state->frame_size);
|
||||
n_frames = SPA_MIN(to_write, n_bytes / state->frame_size);
|
||||
|
||||
memcpy(dst, src, n_bytes);
|
||||
|
||||
state->ready_offset += n_bytes;
|
||||
reuse = (state->ready_offset >= size);
|
||||
}
|
||||
if (reuse) {
|
||||
if (avail == n_frames || state->n_buffers == 1) {
|
||||
spa_list_remove(&b->link);
|
||||
b->outstanding = true;
|
||||
spa_log_trace(state->log, "alsa-util %p: reuse buffer %u", state, b->outbuf->id);
|
||||
state->callbacks->reuse_buffer(state->callbacks_data, 0, b->outbuf->id);
|
||||
state->ready_offset = 0;
|
||||
}
|
||||
total_frames += n_frames;
|
||||
to_write -= n_frames;
|
||||
|
||||
spa_log_trace(state->log, "alsa-util %p: written %lu frames, left %ld", state, total_frames, to_write);
|
||||
}
|
||||
|
||||
|
|
@ -430,6 +414,8 @@ push_frames(struct state *state,
|
|||
size_t n_bytes;
|
||||
struct buffer *b;
|
||||
struct spa_data *d;
|
||||
uint32_t index, avail;
|
||||
int32_t filled;
|
||||
|
||||
b = spa_list_first(&state->free, struct buffer, link);
|
||||
spa_list_remove(&b->link);
|
||||
|
|
@ -442,15 +428,18 @@ push_frames(struct state *state,
|
|||
|
||||
d = b->outbuf->datas;
|
||||
|
||||
total_frames = SPA_MIN(frames, d[0].maxsize / state->frame_size);
|
||||
src = SPA_MEMBER(my_areas[0].addr, offset * state->frame_size, uint8_t);
|
||||
|
||||
filled = spa_ringbuffer_get_write_index(&d[0].chunk->area, &index);
|
||||
avail = (d[0].maxsize - filled) / state->frame_size;
|
||||
total_frames = SPA_MIN(avail, frames);
|
||||
n_bytes = total_frames * state->frame_size;
|
||||
|
||||
memcpy(d[0].data, src, n_bytes);
|
||||
spa_ringbuffer_write_data(&d[0].chunk->area, d[0].data, d[0].maxsize,
|
||||
index % d[0].maxsize, src, n_bytes);
|
||||
|
||||
d[0].chunk->offset = 0;
|
||||
d[0].chunk->size = n_bytes;
|
||||
d[0].chunk->stride = 0;
|
||||
spa_ringbuffer_write_update(&d[0].chunk->area, index + n_bytes);
|
||||
d[0].chunk->stride = state->frame_size;
|
||||
|
||||
b->outstanding = true;
|
||||
io->buffer_id = b->outbuf->id;
|
||||
|
|
|
|||
|
|
@ -53,7 +53,6 @@ struct props {
|
|||
struct buffer {
|
||||
struct spa_buffer *outbuf;
|
||||
struct spa_meta_header *h;
|
||||
struct spa_meta_ringbuffer *rb;
|
||||
bool outstanding;
|
||||
struct spa_list link;
|
||||
};
|
||||
|
|
@ -151,8 +150,6 @@ struct state {
|
|||
struct spa_list free;
|
||||
struct spa_list ready;
|
||||
|
||||
size_t ready_offset;
|
||||
|
||||
bool started;
|
||||
struct spa_source source;
|
||||
int timerfd;
|
||||
|
|
|
|||
|
|
@ -43,12 +43,8 @@ struct buffer {
|
|||
bool outstanding;
|
||||
|
||||
struct spa_buffer *outbuf;
|
||||
struct spa_ringbuffer *rb;
|
||||
|
||||
struct spa_meta_header *h;
|
||||
|
||||
bool have_ringbuffer;
|
||||
struct spa_ringbuffer ringbuffer;
|
||||
};
|
||||
|
||||
struct port {
|
||||
|
|
@ -439,8 +435,8 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
2, 16 * this->bpf,
|
||||
INT32_MAX / this->bpf,
|
||||
":", t->param_buffers.stride, "i", 0,
|
||||
":", t->param_buffers.buffers, "iru", 2,
|
||||
2, 2, MAX_BUFFERS,
|
||||
":", t->param_buffers.buffers, "iru", 1,
|
||||
2, 1, MAX_BUFFERS,
|
||||
":", t->param_buffers.align, "i", 16);
|
||||
}
|
||||
else if (id == t->param.idMeta) {
|
||||
|
|
@ -454,17 +450,6 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
":", t->param_meta.type, "I", t->meta.Header,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_header));
|
||||
break;
|
||||
case 1:
|
||||
param = spa_pod_builder_object(&b,
|
||||
id, t->param_meta.Meta,
|
||||
":", t->param_meta.type, "I", t->meta.Ringbuffer,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_ringbuffer),
|
||||
":", t->param_meta.ringbufferSize, "iru", 1024 * this->bpf,
|
||||
2, 16 * this->bpf, INT32_MAX / this->bpf,
|
||||
":", t->param_meta.ringbufferStride, "i", 0,
|
||||
":", t->param_meta.ringbufferBlocks, "i", 1,
|
||||
":", t->param_meta.ringbufferAlign, "i", 16);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -605,24 +590,12 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
for (i = 0; i < n_buffers; i++) {
|
||||
struct buffer *b;
|
||||
struct spa_data *d = buffers[i]->datas;
|
||||
struct spa_meta_ringbuffer *rb;
|
||||
|
||||
b = &port->buffers[i];
|
||||
b->outbuf = buffers[i];
|
||||
b->outstanding = (direction == SPA_DIRECTION_INPUT);
|
||||
b->h = spa_buffer_find_meta(buffers[i], t->meta.Header);
|
||||
|
||||
if ((rb = spa_buffer_find_meta(buffers[i], t->meta.Ringbuffer))) {
|
||||
b->rb = &rb->ringbuffer;
|
||||
b->have_ringbuffer = true;
|
||||
}
|
||||
else {
|
||||
b->rb = &b->ringbuffer;
|
||||
b->rb->size = d[0].maxsize;
|
||||
b->rb->mask = d[0].maxsize - 1;
|
||||
b->have_ringbuffer = false;
|
||||
}
|
||||
|
||||
if (!((d[0].type == t->data.MemPtr ||
|
||||
d[0].type == t->data.MemFd ||
|
||||
d[0].type == t->data.DmaBuf) && d[0].data != NULL)) {
|
||||
|
|
@ -715,17 +688,23 @@ add_port_data(struct impl *this, void *out, size_t outsize, size_t next, struct
|
|||
{
|
||||
size_t insize;
|
||||
struct buffer *b;
|
||||
uint32_t index = 0, offset, len1, len2;
|
||||
uint32_t index = 0, offset, len1, len2, maxsize;
|
||||
mix_func_t mix = layer == 0 ? this->copy : this->add;
|
||||
void *data;
|
||||
struct spa_ringbuffer *rb;
|
||||
|
||||
b = spa_list_first(&port->queue, struct buffer, link);
|
||||
|
||||
insize = spa_ringbuffer_get_read_index(b->rb, &index);
|
||||
maxsize = b->outbuf->datas[0].maxsize;
|
||||
data = b->outbuf->datas[0].data;
|
||||
rb = &b->outbuf->datas[0].chunk->area,
|
||||
|
||||
insize = spa_ringbuffer_get_read_index(rb, &index);
|
||||
outsize = SPA_MIN(outsize, insize);
|
||||
|
||||
offset = index % b->rb->size;
|
||||
if (offset + outsize > b->rb->size) {
|
||||
len1 = b->rb->size - offset;
|
||||
offset = index % maxsize;
|
||||
if (offset + outsize > maxsize) {
|
||||
len1 = maxsize - offset;
|
||||
len2 = outsize - len1;
|
||||
}
|
||||
else {
|
||||
|
|
@ -733,15 +712,15 @@ add_port_data(struct impl *this, void *out, size_t outsize, size_t next, struct
|
|||
len2 = 0;
|
||||
}
|
||||
|
||||
mix(out, SPA_MEMBER(b->outbuf->datas[0].data, offset, void), len1);
|
||||
mix(out, SPA_MEMBER(data, offset, void), len1);
|
||||
if (len2 > 0)
|
||||
mix(out + len1, b->outbuf->datas[0].data, len2);
|
||||
mix(out + len1, data, len2);
|
||||
|
||||
spa_ringbuffer_read_update(b->rb, index + outsize);
|
||||
spa_ringbuffer_read_update(rb, index + outsize);
|
||||
|
||||
port->queued_bytes -= outsize;
|
||||
|
||||
if (outsize == insize || (b->have_ringbuffer && next == 0)) {
|
||||
if (outsize == insize || next == 0) {
|
||||
spa_log_trace(this->log, NAME " %p: return buffer %d on port %p %zd",
|
||||
this, b->outbuf->id, port, outsize);
|
||||
port->io->buffer_id = b->outbuf->id;
|
||||
|
|
@ -761,8 +740,9 @@ static int mix_output(struct impl *this, size_t n_bytes)
|
|||
struct port *outport;
|
||||
struct spa_port_io *outio;
|
||||
struct spa_data *od;
|
||||
int32_t filled, avail;
|
||||
int32_t filled, avail, maxsize;
|
||||
uint32_t index = 0, len1, len2, offset;
|
||||
struct spa_ringbuffer *rb;
|
||||
|
||||
outport = GET_OUT_PORT(this, 0);
|
||||
outio = outport->io;
|
||||
|
|
@ -777,22 +757,18 @@ static int mix_output(struct impl *this, size_t n_bytes)
|
|||
outbuf->outstanding = true;
|
||||
|
||||
od = outbuf->outbuf->datas;
|
||||
maxsize = od[0].maxsize;
|
||||
|
||||
if (!outbuf->have_ringbuffer) {
|
||||
outbuf->rb->readindex = outbuf->rb->writeindex = 0;
|
||||
od[0].chunk->offset = 0;
|
||||
od[0].chunk->size = n_bytes;
|
||||
od[0].chunk->stride = 0;
|
||||
}
|
||||
rb = &od[0].chunk->area;
|
||||
|
||||
filled = spa_ringbuffer_get_write_index(outbuf->rb, &index);
|
||||
avail = outbuf->rb->size - filled;
|
||||
offset = index % outbuf->rb->size;
|
||||
filled = spa_ringbuffer_get_write_index(rb, &index);
|
||||
avail = maxsize - filled;
|
||||
offset = index % maxsize;
|
||||
|
||||
n_bytes = SPA_MIN(n_bytes, avail);
|
||||
|
||||
if (offset + n_bytes > outbuf->rb->size) {
|
||||
len1 = outbuf->rb->size - offset;
|
||||
if (offset + n_bytes > maxsize) {
|
||||
len1 = maxsize - offset;
|
||||
len2 = n_bytes - len1;
|
||||
}
|
||||
else {
|
||||
|
|
@ -820,7 +796,7 @@ static int mix_output(struct impl *this, size_t n_bytes)
|
|||
layer++;
|
||||
}
|
||||
|
||||
spa_ringbuffer_write_update(outbuf->rb, index + n_bytes);
|
||||
spa_ringbuffer_write_update(rb, index + n_bytes);
|
||||
|
||||
outio->buffer_id = outbuf->outbuf->id;
|
||||
outio->status = SPA_STATUS_HAVE_BUFFER;
|
||||
|
|
@ -858,6 +834,7 @@ static int impl_node_process_input(struct spa_node *node)
|
|||
inio->status == SPA_STATUS_HAVE_BUFFER && inio->buffer_id < inport->n_buffers) {
|
||||
struct buffer *b = &inport->buffers[inio->buffer_id];
|
||||
uint32_t index;
|
||||
struct spa_ringbuffer *rb;
|
||||
|
||||
if (!b->outstanding) {
|
||||
spa_log_warn(this->log, NAME " %p: buffer %u in use", this,
|
||||
|
|
@ -872,11 +849,8 @@ static int impl_node_process_input(struct spa_node *node)
|
|||
|
||||
spa_list_append(&inport->queue, &b->link);
|
||||
|
||||
if (!b->have_ringbuffer) {
|
||||
b->rb->readindex = 0;
|
||||
b->rb->writeindex = b->outbuf->datas[0].chunk->size;
|
||||
}
|
||||
inport->queued_bytes += spa_ringbuffer_get_read_index(b->rb, &index);
|
||||
rb = &b->outbuf->datas[0].chunk->area;
|
||||
inport->queued_bytes += spa_ringbuffer_get_read_index(rb, &index);
|
||||
|
||||
spa_log_trace(this->log, NAME " %p: queue buffer %d on port %d %zd %zd",
|
||||
this, b->outbuf->id, i, inport->queued_bytes, min_queued);
|
||||
|
|
|
|||
|
|
@ -105,7 +105,6 @@ struct buffer {
|
|||
struct spa_buffer *outbuf;
|
||||
bool outstanding;
|
||||
struct spa_meta_header *h;
|
||||
struct spa_meta_ringbuffer *rb;
|
||||
struct spa_list link;
|
||||
};
|
||||
|
||||
|
|
@ -298,6 +297,12 @@ static int make_buffer(struct impl *this)
|
|||
struct buffer *b;
|
||||
struct spa_port_io *io = this->io;
|
||||
int n_bytes, n_samples;
|
||||
uint32_t maxsize;
|
||||
void *data;
|
||||
struct spa_ringbuffer *rb;
|
||||
struct spa_data *d;
|
||||
int32_t filled, avail;
|
||||
uint32_t index, offset, l0, l1;
|
||||
|
||||
read_timer(this);
|
||||
|
||||
|
|
@ -310,7 +315,11 @@ static int make_buffer(struct impl *this)
|
|||
spa_list_remove(&b->link);
|
||||
b->outstanding = true;
|
||||
|
||||
n_bytes = b->outbuf->datas[0].maxsize;
|
||||
d = b->outbuf->datas;
|
||||
maxsize = d[0].maxsize;
|
||||
data = d[0].data;
|
||||
|
||||
n_bytes = maxsize;
|
||||
if (io->range.min_size != 0) {
|
||||
n_bytes = SPA_MIN(n_bytes, io->range.min_size);
|
||||
if (io->range.max_size < n_bytes)
|
||||
|
|
@ -318,41 +327,32 @@ static int make_buffer(struct impl *this)
|
|||
}
|
||||
|
||||
spa_log_trace(this->log, NAME " %p: dequeue buffer %d %d %d", this, b->outbuf->id,
|
||||
b->outbuf->datas[0].maxsize, n_bytes);
|
||||
maxsize, n_bytes);
|
||||
|
||||
if (b->rb) {
|
||||
int32_t filled, avail;
|
||||
uint32_t index, offset, l0, l1;
|
||||
rb = &d[0].chunk->area;
|
||||
|
||||
filled = spa_ringbuffer_get_write_index(&b->rb->ringbuffer, &index);
|
||||
avail = b->rb->ringbuffer.size - filled;
|
||||
n_bytes = SPA_MIN(avail, n_bytes);
|
||||
filled = spa_ringbuffer_get_write_index(rb, &index);
|
||||
avail = maxsize - filled;
|
||||
n_bytes = SPA_MIN(avail, n_bytes);
|
||||
|
||||
n_samples = n_bytes / this->bpf;
|
||||
n_samples = n_bytes / this->bpf;
|
||||
|
||||
offset = index & b->rb->ringbuffer.mask;
|
||||
offset = index % maxsize;
|
||||
|
||||
if (offset + n_bytes > b->rb->ringbuffer.size) {
|
||||
l0 = (b->rb->ringbuffer.size - offset) / this->bpf;
|
||||
l1 = n_samples - l0;
|
||||
}
|
||||
else {
|
||||
l0 = n_samples;
|
||||
l1 = 0;
|
||||
}
|
||||
|
||||
this->render_func(this, SPA_MEMBER(b->outbuf->datas[0].data, offset, void), l0);
|
||||
if (l1)
|
||||
this->render_func(this, b->outbuf->datas[0].data, l1);
|
||||
|
||||
spa_ringbuffer_write_update(&b->rb->ringbuffer, index + n_bytes);
|
||||
} else {
|
||||
n_samples = n_bytes / this->bpf;
|
||||
this->render_func(this, b->outbuf->datas[0].data, n_samples);
|
||||
b->outbuf->datas[0].chunk->size = n_bytes;
|
||||
b->outbuf->datas[0].chunk->offset = 0;
|
||||
b->outbuf->datas[0].chunk->stride = 0;
|
||||
if (offset + n_bytes > maxsize) {
|
||||
l0 = (maxsize - offset) / this->bpf;
|
||||
l1 = n_samples - l0;
|
||||
}
|
||||
else {
|
||||
l0 = n_samples;
|
||||
l1 = 0;
|
||||
}
|
||||
|
||||
this->render_func(this, SPA_MEMBER(data, offset, void), l0);
|
||||
if (l1)
|
||||
this->render_func(this, data, l1);
|
||||
|
||||
spa_ringbuffer_write_update(rb, index + n_bytes);
|
||||
|
||||
if (b->h) {
|
||||
b->h->seq = this->sample_count;
|
||||
|
|
@ -628,7 +628,7 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
2, 16 * this->bpf,
|
||||
INT32_MAX / this->bpf,
|
||||
":", t->param_buffers.stride, "i", 0,
|
||||
":", t->param_buffers.buffers, "iru", 2,
|
||||
":", t->param_buffers.buffers, "iru", 1,
|
||||
2, 1, 32,
|
||||
":", t->param_buffers.align, "i", 16);
|
||||
}
|
||||
|
|
@ -643,17 +643,6 @@ impl_node_port_enum_params(struct spa_node *node,
|
|||
":", t->param_meta.type, "I", t->meta.Header,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_header));
|
||||
break;
|
||||
case 1:
|
||||
param = spa_pod_builder_object(&b,
|
||||
id, t->param_meta.Meta,
|
||||
":", t->param_meta.type, "I", t->meta.Ringbuffer,
|
||||
":", t->param_meta.size, "i", sizeof(struct spa_meta_ringbuffer),
|
||||
":", t->param_meta.ringbufferSize, "ir", 5512 * this->bpf,
|
||||
2, 16 * this->bpf, INT32_MAX / this->bpf,
|
||||
":", t->param_meta.ringbufferStride, "i", 0,
|
||||
":", t->param_meta.ringbufferBlocks, "i", 1,
|
||||
":", t->param_meta.ringbufferAlign, "i", 16);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -783,7 +772,6 @@ impl_node_port_use_buffers(struct spa_node *node,
|
|||
b->outbuf = buffers[i];
|
||||
b->outstanding = false;
|
||||
b->h = spa_buffer_find_meta(buffers[i], this->type.meta.Header);
|
||||
b->rb = spa_buffer_find_meta(buffers[i], this->type.meta.Ringbuffer);
|
||||
|
||||
if ((d[0].type == this->type.data.MemPtr ||
|
||||
d[0].type == this->type.data.MemFd ||
|
||||
|
|
|
|||
|
|
@ -86,8 +86,8 @@ impl_log_logv(struct spa_log *log,
|
|||
uint64_t count = 1;
|
||||
|
||||
spa_ringbuffer_get_write_index(&impl->trace_rb, &index);
|
||||
spa_ringbuffer_write_data(&impl->trace_rb, impl->trace_data,
|
||||
index & impl->trace_rb.mask, location, size);
|
||||
spa_ringbuffer_write_data(&impl->trace_rb, impl->trace_data, TRACE_BUFFER,
|
||||
index & (TRACE_BUFFER - 1), location, size);
|
||||
spa_ringbuffer_write_update(&impl->trace_rb, index + size);
|
||||
|
||||
if (write(impl->source.fd, &count, sizeof(uint64_t)) != sizeof(uint64_t))
|
||||
|
|
@ -124,12 +124,12 @@ static void on_trace_event(struct spa_source *source)
|
|||
while ((avail = spa_ringbuffer_get_read_index(&impl->trace_rb, &index)) > 0) {
|
||||
uint32_t offset, first;
|
||||
|
||||
if (avail > impl->trace_rb.size) {
|
||||
index += avail - impl->trace_rb.size;
|
||||
avail = impl->trace_rb.size;
|
||||
if (avail > TRACE_BUFFER) {
|
||||
index += avail - TRACE_BUFFER;
|
||||
avail = TRACE_BUFFER;
|
||||
}
|
||||
offset = index & impl->trace_rb.mask;
|
||||
first = SPA_MIN(avail, impl->trace_rb.size - offset);
|
||||
offset = index & (TRACE_BUFFER - 1);
|
||||
first = SPA_MIN(avail, TRACE_BUFFER - offset);
|
||||
|
||||
fwrite(impl->trace_data + offset, first, 1, stderr);
|
||||
if (SPA_UNLIKELY(avail > first)) {
|
||||
|
|
@ -223,7 +223,7 @@ impl_init(const struct spa_handle_factory *factory,
|
|||
this->have_source = true;
|
||||
}
|
||||
|
||||
spa_ringbuffer_init(&this->trace_rb, TRACE_BUFFER);
|
||||
spa_ringbuffer_init(&this->trace_rb);
|
||||
|
||||
spa_log_debug(&this->log, NAME " %p: initialized", this);
|
||||
|
||||
|
|
|
|||
|
|
@ -212,18 +212,18 @@ loop_invoke(struct spa_loop *loop,
|
|||
uint32_t idx, offset, l0;
|
||||
|
||||
filled = spa_ringbuffer_get_write_index(&impl->buffer, &idx);
|
||||
if (filled < 0 || filled > impl->buffer.size) {
|
||||
if (filled < 0 || filled > DATAS_SIZE) {
|
||||
spa_log_warn(impl->log, NAME " %p: queue xrun %d", impl, filled);
|
||||
return -EPIPE;
|
||||
}
|
||||
avail = impl->buffer.size - filled;
|
||||
avail = DATAS_SIZE - filled;
|
||||
if (avail < sizeof(struct invoke_item)) {
|
||||
spa_log_warn(impl->log, NAME " %p: queue full %d", impl, avail);
|
||||
return -EPIPE;
|
||||
}
|
||||
offset = idx & impl->buffer.mask;
|
||||
offset = idx & (DATAS_SIZE - 1);
|
||||
|
||||
l0 = impl->buffer.size - offset;
|
||||
l0 = DATAS_SIZE - offset;
|
||||
|
||||
item = SPA_MEMBER(impl->buffer_data, offset, struct invoke_item);
|
||||
item->func = func;
|
||||
|
|
@ -272,7 +272,7 @@ static void wakeup_func(void *data, uint64_t count)
|
|||
|
||||
while (spa_ringbuffer_get_read_index(&impl->buffer, &index) > 0) {
|
||||
struct invoke_item *item =
|
||||
SPA_MEMBER(impl->buffer_data, index & impl->buffer.mask, struct invoke_item);
|
||||
SPA_MEMBER(impl->buffer_data, index & (DATAS_SIZE - 1), struct invoke_item);
|
||||
item->res = item->func(&impl->loop, true, item->seq, item->size, item->data,
|
||||
item->user_data);
|
||||
spa_ringbuffer_read_update(&impl->buffer, index + item->item_size);
|
||||
|
|
@ -737,7 +737,7 @@ impl_init(const struct spa_handle_factory *factory,
|
|||
spa_list_init(&impl->destroy_list);
|
||||
spa_hook_list_init(&impl->hooks_list);
|
||||
|
||||
spa_ringbuffer_init(&impl->buffer, DATAS_SIZE);
|
||||
spa_ringbuffer_init(&impl->buffer);
|
||||
|
||||
impl->wakeup = spa_loop_utils_add_event(&impl->utils, wakeup_func, impl);
|
||||
impl->ack_fd = eventfd(0, EFD_CLOEXEC);
|
||||
|
|
|
|||
|
|
@ -267,8 +267,7 @@ static int consume_buffer(struct impl *this)
|
|||
|
||||
render_buffer(this, b);
|
||||
|
||||
b->outbuf->datas[0].chunk->offset = 0;
|
||||
b->outbuf->datas[0].chunk->size = n_bytes;
|
||||
spa_ringbuffer_set_avail(&b->outbuf->datas[0].chunk->area, n_bytes);
|
||||
b->outbuf->datas[0].chunk->stride = n_bytes;
|
||||
|
||||
if (b->h) {
|
||||
|
|
|
|||
|
|
@ -279,8 +279,7 @@ static int make_buffer(struct impl *this)
|
|||
|
||||
fill_buffer(this, b);
|
||||
|
||||
b->outbuf->datas[0].chunk->offset = 0;
|
||||
b->outbuf->datas[0].chunk->size = n_bytes;
|
||||
spa_ringbuffer_set_avail(&b->outbuf->datas[0].chunk->area, n_bytes);
|
||||
b->outbuf->datas[0].chunk->stride = n_bytes;
|
||||
|
||||
if (b->h) {
|
||||
|
|
|
|||
|
|
@ -930,8 +930,7 @@ static int mmap_read(struct impl *this)
|
|||
}
|
||||
|
||||
d = b->outbuf->datas;
|
||||
d[0].chunk->offset = 0;
|
||||
d[0].chunk->size = buf.bytesused;
|
||||
spa_ringbuffer_set_avail(&d[0].chunk->area, buf.bytesused);
|
||||
d[0].chunk->stride = port->fmt.fmt.pix.bytesperline;
|
||||
|
||||
b->outstanding = true;
|
||||
|
|
@ -1091,8 +1090,7 @@ mmap_init(struct impl *this,
|
|||
d = buffers[i]->datas;
|
||||
d[0].mapoffset = 0;
|
||||
d[0].maxsize = b->v4l2_buffer.length;
|
||||
d[0].chunk->offset = 0;
|
||||
d[0].chunk->size = b->v4l2_buffer.length;
|
||||
spa_ringbuffer_set_avail(&d[0].chunk->area, 0);
|
||||
d[0].chunk->stride = state->fmt.fmt.pix.bytesperline;
|
||||
|
||||
if (state->export_buf) {
|
||||
|
|
|
|||
|
|
@ -275,7 +275,7 @@ static int make_buffer(struct impl *this)
|
|||
{
|
||||
struct buffer *b;
|
||||
struct spa_port_io *io = this->io;
|
||||
int n_bytes;
|
||||
uint32_t n_bytes;
|
||||
|
||||
read_timer(this);
|
||||
|
||||
|
|
@ -294,8 +294,7 @@ static int make_buffer(struct impl *this)
|
|||
|
||||
fill_buffer(this, b);
|
||||
|
||||
b->outbuf->datas[0].chunk->offset = 0;
|
||||
b->outbuf->datas[0].chunk->size = n_bytes;
|
||||
spa_ringbuffer_set_avail(&b->outbuf->datas[0].chunk->area, n_bytes);
|
||||
b->outbuf->datas[0].chunk->stride = this->stride;
|
||||
|
||||
if (b->h) {
|
||||
|
|
|
|||
|
|
@ -703,10 +703,10 @@ static void do_volume(struct impl *this, struct spa_buffer *dbuf, struct spa_buf
|
|||
sd = &sbuf->datas[si];
|
||||
dd = &dbuf->datas[di];
|
||||
|
||||
src = (int16_t *) ((uint8_t *) sd->data + sd->chunk->offset + soff);
|
||||
src = (int16_t *) ((uint8_t *) sd->data + sd->chunk->area.readindex + soff);
|
||||
dst = (int16_t *) ((uint8_t *) dd->data + doff);
|
||||
|
||||
n_bytes = SPA_MIN(sd->chunk->size - soff, dd->maxsize - doff);
|
||||
n_bytes = SPA_MIN(sd->chunk->area.writeindex - soff, dd->maxsize - doff);
|
||||
n_samples = n_bytes / sizeof(uint16_t);
|
||||
|
||||
for (i = 0; i < n_samples; i++)
|
||||
|
|
@ -715,10 +715,9 @@ static void do_volume(struct impl *this, struct spa_buffer *dbuf, struct spa_buf
|
|||
soff += n_bytes;
|
||||
doff += n_bytes;
|
||||
|
||||
dd->chunk->offset = 0;
|
||||
dd->chunk->size = doff;
|
||||
spa_ringbuffer_set_avail(&dd->chunk->area, doff);
|
||||
|
||||
if (soff >= sd->chunk->size) {
|
||||
if (soff >= sd->chunk->area.writeindex) {
|
||||
si++;
|
||||
soff = 0;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue