impl-node: implement async scheduling

When node.async is set, make the node async.

Advertize SPA_IO_AsyncBuffers on mixer ports when supported. Set a new
port flag when AsyncBuffer is supported on the port.

When making a link and if one of the nodes is async and the linked ports
support AsyncBuffer, make the link async and set this as a property on
the link. For async nodes we will use SPA_IO_AsyncBuffers on the mixer
ports.

Nodes that are async will not increment the peer required counters. This
ensures that the peer can start immediately before the async node is
ready.

On an async link, writers will write to the (cycle+1 & 1) async buffers
entry and readers will read from (cycle & 1). This makes the readers read
from the previously filled area.

We need to have two very controlled areas with specific rules for who
reads and who writes where because the two nodes will run concurrently
and no special synchronization is possible otherwise.

These async nodes can be paused and blocked without blocking or xrunning
the rest of graph. If the node didn't produce anything when the next
cycle starts, the graph will run with silence.

See #3509
This commit is contained in:
Wim Taymans 2024-04-09 17:45:34 +02:00
parent e8ac4e6a34
commit 68916e062b
11 changed files with 324 additions and 112 deletions

View file

@ -31,7 +31,8 @@ PW_LOG_TOPIC_EXTERN(mod_topic);
#define MAX_BUFFERS 64
#define MAX_METAS 16u
#define MAX_DATAS 64u
#define AREA_SIZE (4096u / sizeof(struct spa_io_buffers))
#define AREA_SLOT (sizeof(struct spa_io_async_buffers))
#define AREA_SIZE (4096u / AREA_SLOT)
#define MAX_AREAS 32
#define CHECK_FREE_PORT(impl,d,p) (p <= pw_map_get_size(&impl->ports[d]) && !CHECK_PORT(impl,d,p))
@ -1363,7 +1364,7 @@ static int add_area(struct impl *impl)
size_t size;
struct pw_memblock *area;
size = sizeof(struct spa_io_buffers) * AREA_SIZE;
size = AREA_SLOT * AREA_SIZE;
area = pw_mempool_alloc(impl->context_pool,
PW_MEMBLOCK_FLAG_READWRITE |
@ -1449,6 +1450,7 @@ static int port_init_mix(void *data, struct pw_impl_port_mix *mix)
struct mix *m;
uint32_t idx, pos, len;
struct pw_memblock *area;
struct spa_io_async_buffers *ab;
if ((m = create_mix(port, mix->port.port_id)) == NULL)
return -ENOMEM;
@ -1472,9 +1474,12 @@ static int port_init_mix(void *data, struct pw_impl_port_mix *mix)
}
area = *pw_array_get_unchecked(&impl->io_areas, idx, struct pw_memblock*);
mix->io = SPA_PTROFF(area->map->ptr,
pos * sizeof(struct spa_io_buffers), void);
*mix->io = SPA_IO_BUFFERS_INIT;
ab = SPA_PTROFF(area->map->ptr, pos * AREA_SLOT, void);
mix->io_data = ab;
mix->io[0] = &ab->buffers[0];
mix->io[1] = &ab->buffers[1];
*mix->io[0] = SPA_IO_BUFFERS_INIT;
*mix->io[1] = SPA_IO_BUFFERS_INIT;
m->peer_id = mix->peer_id;
m->impl_mix_id = mix->id;
@ -1484,8 +1489,8 @@ static int port_init_mix(void *data, struct pw_impl_port_mix *mix)
mix->port.direction, mix->p->port_id,
mix->port.port_id, mix->peer_id, NULL);
pw_log_debug("%p: init mix id:%d io:%p base:%p", impl,
mix->id, mix->io, area->map->ptr);
pw_log_debug("%p: init mix id:%d io:%p/%p base:%p", impl,
mix->id, mix->io[0], mix->io[1], area->map->ptr);
return 0;
no_mem:
@ -1606,11 +1611,24 @@ static int impl_mix_port_set_io(void *object,
if (mix == NULL)
return -EINVAL;
if (id == SPA_IO_Buffers) {
switch (id) {
case SPA_IO_Buffers:
if (data && size >= sizeof(struct spa_io_buffers))
mix->io = data;
mix->io[0] = mix->io[1] = data;
else
mix->io = NULL;
mix->io[0] = mix->io[1] = NULL;
break;
case SPA_IO_AsyncBuffers:
if (data && size >= sizeof(struct spa_io_async_buffers)) {
struct spa_io_async_buffers *ab = data;
mix->io[0] = &ab->buffers[0];
mix->io[1] = &ab->buffers[1];
}
else
mix->io[0] = mix->io[1] = NULL;
break;
default:
break;
}
return do_port_set_io(impl,
direction, port->port_id, mix->port.port_id,