Work on better scheduling

Also use double buffering for the client-node input, we process the
output of the previous cycle.
Only process the reuse_buffer if the client is explicitly going to
send them otherwise, recycle right after sending.
Let the tee and mix pass the io status upstream.
Initialize io area with NEED_BUFFER for inputs.
Implement reuse_buffer for the remote.
This commit is contained in:
Wim Taymans 2017-10-16 18:14:35 +02:00
parent 957a03e3f8
commit f817aabe24
5 changed files with 68 additions and 37 deletions

View file

@ -747,7 +747,7 @@ static int spa_proxy_node_process_input(struct spa_node *node)
{
struct impl *impl;
struct proxy *this;
int i;
int i, res = SPA_RESULT_OK;
if (node == NULL)
return SPA_RESULT_INVALID_ARGUMENTS;
@ -756,26 +756,31 @@ static int spa_proxy_node_process_input(struct spa_node *node)
impl = this->impl;
for (i = 0; i < MAX_INPUTS; i++) {
struct spa_port_io *io = this->in_ports[i].io;
struct spa_port_io *io = this->in_ports[i].io, tmp;
if (!io)
continue;
pw_log_trace("%d %d", io->status, io->buffer_id);
tmp = impl->transport->inputs[i];
impl->transport->inputs[i] = *io;
if (res == SPA_RESULT_OK)
res = tmp.status;
*io = tmp;
pw_log_trace("%d %d -> %d %d", io->status, io->buffer_id,
impl->transport->inputs[i].status,
impl->transport->inputs[i].buffer_id);
if (impl->client_reuse)
io->buffer_id = SPA_ID_INVALID;
else
io->buffer_id = impl->transport->inputs[i].buffer_id;
}
pw_client_node_transport_add_message(impl->transport,
&PW_CLIENT_NODE_MESSAGE_INIT(PW_CLIENT_NODE_MESSAGE_PROCESS_INPUT));
do_flush(this);
if (this->callbacks->need_input)
return SPA_RESULT_OK;
else
return SPA_RESULT_NEED_BUFFER;
return res;
}
static int spa_proxy_node_process_output(struct spa_node *node)
@ -794,12 +799,9 @@ static int spa_proxy_node_process_output(struct spa_node *node)
continue;
tmp = impl->transport->outputs[i];
io->status = SPA_RESULT_NEED_BUFFER;
impl->transport->outputs[i] = *io;
if (tmp.status == SPA_RESULT_HAVE_BUFFER)
res = SPA_RESULT_HAVE_BUFFER;
else if (tmp.status == SPA_RESULT_NEED_BUFFER)
res = SPA_RESULT_NEED_BUFFER;
if (res == SPA_RESULT_OK)
res = tmp.status;
*io = tmp;
pw_log_trace("%d %d -> %d %d", io->status, io->buffer_id,
impl->transport->outputs[i].status,
@ -826,17 +828,21 @@ static int handle_node_message(struct proxy *this, struct pw_client_node_message
continue;
*io = impl->transport->outputs[i];
impl->transport->outputs[i].buffer_id = SPA_ID_INVALID;
impl->transport->outputs[i].status = SPA_RESULT_OK;
pw_log_trace("%d %d", io->status, io->buffer_id);
}
this->callbacks->have_output(this->callbacks_data);
} else if (PW_CLIENT_NODE_MESSAGE_TYPE(message) == PW_CLIENT_NODE_MESSAGE_NEED_INPUT) {
this->callbacks->need_input(this->callbacks_data);
} else if (PW_CLIENT_NODE_MESSAGE_TYPE(message) == PW_CLIENT_NODE_MESSAGE_REUSE_BUFFER) {
if (impl->client_reuse) {
struct pw_client_node_message_reuse_buffer *p =
(struct pw_client_node_message_reuse_buffer *) message;
this->callbacks->reuse_buffer(this->callbacks_data, p->body.port_id.value,
p->body.buffer_id.value);
}
}
return SPA_RESULT_OK;
}

View file

@ -1123,6 +1123,9 @@ struct pw_link *pw_link_new(struct pw_core *core,
this->info.format = NULL;
this->info.props = this->properties ? &this->properties->dict : NULL;
this->io.buffer_id = SPA_ID_INVALID;
this->io.status = SPA_RESULT_NEED_BUFFER;
spa_graph_port_init(&this->rt.out_port,
PW_DIRECTION_OUTPUT,
this->rt.out_port.port_id,

View file

@ -50,21 +50,17 @@ static int schedule_tee_input(struct spa_node *data)
struct spa_graph_node *node = &this->rt.mix_node;
struct spa_graph_port *p;
struct spa_port_io *io = this->rt.mix_port.io;
int res;
if (spa_list_is_empty(&node->ports[SPA_DIRECTION_OUTPUT])) {
io->status = SPA_RESULT_NEED_BUFFER;
res = SPA_RESULT_NEED_BUFFER;
}
else {
if (!spa_list_is_empty(&node->ports[SPA_DIRECTION_OUTPUT])) {
pw_log_trace("tee input %d %d", io->status, io->buffer_id);
spa_list_for_each(p, &node->ports[SPA_DIRECTION_OUTPUT], link)
*p->io = *io;
io->status = SPA_RESULT_OK;
io->buffer_id = SPA_ID_INVALID;
res = SPA_RESULT_HAVE_BUFFER;
}
return res;
else
io->status = SPA_RESULT_NEED_BUFFER;
return io->status;
}
static int schedule_tee_output(struct spa_node *data)
{
@ -76,9 +72,8 @@ static int schedule_tee_output(struct spa_node *data)
spa_list_for_each(p, &node->ports[SPA_DIRECTION_OUTPUT], link)
*io = *p->io;
io->status = SPA_RESULT_NEED_BUFFER;
return SPA_RESULT_NEED_BUFFER;
pw_log_trace("tee output %d %d", io->status, io->buffer_id);
return io->status;
}
static int schedule_tee_reuse_buffer(struct spa_node *data, uint32_t port_id, uint32_t buffer_id)
@ -113,11 +108,10 @@ static int schedule_mix_input(struct spa_node *data)
pw_log_trace("mix %p: input %p %p->%p %d %d", node,
p, p->io, io, p->io->status, p->io->buffer_id);
*io = *p->io;
p->io->status = SPA_RESULT_OK;
p->io->buffer_id = SPA_ID_INVALID;
break;
}
return SPA_RESULT_HAVE_BUFFER;
return io->status;
}
static int schedule_mix_output(struct spa_node *data)
@ -128,12 +122,10 @@ static int schedule_mix_output(struct spa_node *data)
struct spa_graph_port *p;
struct spa_port_io *io = this->rt.mix_port.io;
io->status = SPA_RESULT_NEED_BUFFER;
spa_list_for_each(p, &node->ports[SPA_DIRECTION_INPUT], link)
*p->io = *io;
io->buffer_id = SPA_ID_INVALID;
return SPA_RESULT_NEED_BUFFER;
pw_log_trace("mix output %d %d", io->status, io->buffer_id);
return io->status;
}
static int schedule_mix_reuse_buffer(struct spa_node *data, uint32_t port_id, uint32_t buffer_id)

View file

@ -459,6 +459,20 @@ static void handle_rtnode_message(struct pw_proxy *proxy, struct pw_client_node_
spa_graph_need_input(data->node->rt.graph, &data->out_node);
}
else if (PW_CLIENT_NODE_MESSAGE_TYPE(message) == PW_CLIENT_NODE_MESSAGE_REUSE_BUFFER) {
struct pw_client_node_message_reuse_buffer *rb =
(struct pw_client_node_message_reuse_buffer *) message;
uint32_t port_id = rb->body.port_id.value;
uint32_t buffer_id = rb->body.buffer_id.value;
struct spa_graph_port *p, *pp;
spa_list_for_each(p, &data->out_node.ports[SPA_DIRECTION_INPUT], link) {
if (p->port_id != port_id || (pp = p->peer) == NULL)
continue;
spa_node_port_reuse_buffer(pp->node->implementation,
pp->port_id, buffer_id);
break;
}
}
else {
pw_log_warn("unexpected node message %d", PW_CLIENT_NODE_MESSAGE_TYPE(message));
@ -484,6 +498,10 @@ on_rtsocket_condition(void *user_data, int fd, enum spa_io mask)
if (read(fd, &cmd, sizeof(uint64_t)) != sizeof(uint64_t))
pw_log_warn("proxy %p: read failed %m", proxy);
if (cmd > 1)
pw_log_warn("proxy %p: %ld messages", proxy, cmd);
while (pw_client_node_transport_next_message(data->trans, &message) == SPA_RESULT_OK) {
struct pw_client_node_message *msg = alloca(SPA_POD_SIZE(&message));
pw_client_node_transport_parse_message(data->trans, msg);
@ -546,6 +564,8 @@ static void client_node_transport(void *object, uint32_t node_id,
sizeof(struct port));
for (i = 0; i < data->trans->area->max_input_ports; i++) {
data->trans->inputs[i].status = SPA_RESULT_NEED_BUFFER;
data->trans->inputs[i].buffer_id = SPA_ID_INVALID;
spa_graph_port_init(&data->in_ports[i].input,
SPA_DIRECTION_INPUT,
i,
@ -1069,13 +1089,19 @@ static const struct pw_proxy_events proxy_events = {
.destroy = node_proxy_destroy,
};
static int impl_port_reuse_buffer(struct spa_node *node, uint32_t port_id, uint32_t buffer_id)
{
pw_log_trace("node %p: reuse buffer %d %d", node, port_id, buffer_id);
return SPA_RESULT_OK;
}
static int impl_process_input(struct spa_node *node)
{
#if 0
struct node_data *data = SPA_CONTAINER_OF(node, struct node_data, out_node_impl);
node_have_output(data);
#endif
pw_log_trace("node %p: have output", node);
pw_log_trace("node %p: process input", node);
return SPA_RESULT_OK;
}
@ -1084,8 +1110,9 @@ static int impl_process_output(struct spa_node *node)
#if 0
struct node_data *data = SPA_CONTAINER_OF(node, struct node_data, in_node_impl);
node_need_input(data);
#endif
pw_log_trace("node %p: need input", node);
#endif
pw_log_trace("node %p: process output", node);
return SPA_RESULT_OK;
}
@ -1094,6 +1121,7 @@ static const struct spa_node node_impl = {
NULL,
.process_input = impl_process_input,
.process_output = impl_process_output,
.port_reuse_buffer = impl_port_reuse_buffer,
};
struct pw_proxy *pw_remote_export(struct pw_remote *remote,

View file

@ -532,6 +532,7 @@ static void handle_rtnode_message(struct pw_stream *stream, struct pw_client_nod
buffer_id = input->buffer_id;
input->buffer_id = SPA_ID_INVALID;
input->status = SPA_RESULT_NEED_BUFFER;
pw_log_trace("stream %p: process input %d %d", stream, input->status,
buffer_id);
@ -908,6 +909,7 @@ static void client_node_transport(void *data, uint32_t node_id,
}
impl->trans = transport;
for (i = 0; i < impl->trans->area->max_input_ports; i++) {
impl->trans->inputs[i].status = SPA_RESULT_NEED_BUFFER;
}
pw_log_info("stream %p: create client transport %p with fds %d %d for node %u",