node: allocate shared mem for activation

Allocate a per node a piece of shared memory where we place the
activation structure with the graph state and io_position.
We can then give this info to nodes so that they can get the position
in the graph directly but also later, activate the next node in
the graph.
This commit is contained in:
Wim Taymans 2019-02-07 12:34:54 +01:00
parent db230fc136
commit 658c1da52f
8 changed files with 83 additions and 89 deletions

View file

@ -170,8 +170,6 @@ struct impl {
int fds[2];
int other_fds[2];
struct spa_io_position *position;
};
static int
@ -295,7 +293,8 @@ static struct io *update_io(struct node *this,
io = f;
io->id = id;
io->memid = memid;
spa_log_debug(this->log, "node %p: add io %p %d %d", this, io, id, memid);
spa_log_debug(this->log, "node %p: add io %p %s %d", this, io,
spa_debug_type_find_name(spa_type_io, id), memid);
found:
return io;
@ -430,6 +429,7 @@ static int impl_node_set_io(struct spa_node *node, uint32_t id, void *data, size
return -EINVAL;
mem_offset += mem->offset;
mem_size = size;
m = ensure_mem(impl, mem->fd, SPA_DATA_MemFd, mem->flags);
memid = m->id;
}
@ -1011,15 +1011,10 @@ static int impl_node_process(struct spa_node *node)
{
struct node *this = SPA_CONTAINER_OF(node, struct node, node);
struct impl *impl = this->impl;
struct spa_io_position *q, *rq;
uint64_t cmd = 1;
spa_log_trace(this->log, "%p: send process %p", this, impl->this.node->driver_node);
q = impl->this.node->driver_node->rt.position;
rq = impl->position;
*rq = *q;
if (write(this->writefd, &cmd, 8) != 8)
spa_log_warn(this->log, "node %p: error %s", this, strerror(errno));
@ -1297,8 +1292,7 @@ static void node_initialized(void *data)
struct pw_client_node *this = &impl->this;
struct pw_node *node = this->node;
struct pw_global *global;
uint32_t area_size, size;
struct mem *m;
size_t size;
if (this->resource == NULL)
return;
@ -1313,8 +1307,7 @@ static void node_initialized(void *data)
spa_loop_add_source(impl->node.data_loop, &impl->node.data_source);
pw_log_debug("client-node %p: transport fd %d %d", node, impl->fds[0], impl->fds[1]);
area_size = sizeof(struct spa_io_buffers) * MAX_AREAS;
size = area_size + sizeof(struct spa_io_position);
size = sizeof(struct spa_io_buffers) * MAX_AREAS;
if (pw_memblock_alloc(PW_MEMBLOCK_FLAG_WITH_FD |
PW_MEMBLOCK_FLAG_MAP_READWRITE |
@ -1323,18 +1316,8 @@ static void node_initialized(void *data)
&impl->io_areas) < 0)
return;
impl->position = SPA_MEMBER(impl->io_areas->ptr,
area_size, struct spa_io_position);
m = ensure_mem(impl, impl->io_areas->fd, SPA_DATA_MemFd, impl->io_areas->flags);
pw_log_debug("client-node %p: io areas %p", node, impl->io_areas->ptr);
pw_client_node_resource_set_io(this->resource,
SPA_IO_Position,
m->id,
area_size,
sizeof(struct spa_io_position));
if ((global = pw_node_get_global(node)) != NULL)
pw_client_node_registered(this, pw_global_get_id(global));
}

View file

@ -245,7 +245,14 @@ static int impl_node_set_param(struct spa_node *node, uint32_t id, uint32_t flag
static int impl_node_set_io(struct spa_node *node, uint32_t id, void *data, size_t size)
{
return 0;
struct node *this;
struct impl *impl;
spa_return_val_if_fail(node != NULL, -EINVAL);
this = SPA_CONTAINER_OF(node, struct node, node);
impl = this->impl;
return spa_node_set_io(impl->cnode, id, data, size);
}
static int impl_node_send_command(struct spa_node *node, const struct spa_command *command)
@ -949,8 +956,6 @@ static void client_node_initialized(void *data)
pw_log_debug("client-stream %p: initialized", &impl->this);
impl->cnode = pw_node_get_implementation(impl->client_node->node);
if ((res = spa_node_get_n_ports(impl->cnode,
&n_input_ports,
&max_input_ports,
@ -1235,6 +1240,8 @@ struct pw_client_stream *pw_client_stream_new(struct pw_resource *resource,
if (impl->client_node == NULL)
goto error_no_node;
impl->cnode = pw_node_get_implementation(impl->client_node->node);
support = pw_core_get_support(impl->core, &n_support);
node_init(&impl->node, NULL, support, n_support);

View file

@ -183,23 +183,25 @@ static void *mem_map(struct node_data *data, struct mapping *map,
pw_map_range_init(&m.map, offset, size, data->core->sc_pagesize);
if (map->ptr == NULL || map->map.offset != m.map.offset || map->map.size != m.map.size) {
map->ptr = mmap(map->ptr, m.map.size, prot, MAP_SHARED, fd, m.map.offset);
if (map->ptr == MAP_FAILED) {
m.ptr = mmap(map->ptr, m.map.size, prot, MAP_SHARED, fd, m.map.offset);
if (m.ptr == MAP_FAILED) {
pw_log_error("remote %p: Failed to mmap memory %d: %m", data, size);
return NULL;
}
map->map = m.map;
map->ptr = m.ptr;
pw_log_debug("remote %p: fd %d map %d %d %p", data, fd, m.map.offset, m.map.size, m.ptr);
}
ptr = SPA_MEMBER(map->ptr, map->map.start, void);
pw_log_debug("remote %p: fd %d mapped %d %d %p", data, fd, offset, size, ptr);
pw_log_debug("remote %p: fd %d ptr %p (%d %d)", data, fd, ptr, offset, size);
return ptr;
}
static void *mem_unmap(struct node_data *data, void *ptr, struct pw_map_range *range)
static void *mem_unmap(struct node_data *data, struct mapping *map)
{
if (ptr != NULL) {
if (munmap(SPA_MEMBER(ptr, -range->start, void), range->size) < 0)
if (map->ptr != NULL) {
if (munmap(map->ptr, map->map.size) < 0)
pw_log_warn("failed to unmap: %m");
}
return NULL;
@ -225,7 +227,7 @@ static void clear_mem(struct node_data *data, struct mem *m)
}
}
if (!has_ref) {
m->map.ptr = mem_unmap(data, m->map.ptr, &m->map.map);
m->map.ptr = mem_unmap(data, &m->map);
close(fd);
}
}