diff --git a/doc/dox/internals/scheduling.dox b/doc/dox/internals/scheduling.dox index 0c290abcb..989aa0306 100644 --- a/doc/dox/internals/scheduling.dox +++ b/doc/dox/internals/scheduling.dox @@ -206,4 +206,143 @@ After they complete (and only when the profiler is active), they will trigger an extra eventfd to signal the server that the graph completed. This is used by the server to generate the profiler info. +## Lazy scheduling + +Normally, a driver will wake up the graph and all the followers need to process +the data in sync. There are cases where: + + 1. the follower might not be ready to process the data + 2. the driver rate is not ideal, the follower rate is better + 3. the driver might not know when new data is available in the follower and + might wake up the graph too often. + +In these cases, the driver and follower roles need to be reversed and a mechanism +needs to be provided so that the follower can know when it is worth processing the +graph. + +For notifying when the graph is ready to be processed, (non driver) nodes can send +a RequestProcess event which will arrive as a RequestProcess command in the driver. +The driver can then decide to run the graph or not. + +When the graph is started or partially controlled by RequestProcess events and +commands we say we have lazy scheduling. The driver is not always scheduling according +to its own rhythm but also depending on the follower. + +We can't just enable lazy scheduling when no follower will emit RequestProcess events +or when no driver will listen for RequestProcess commands. Two new node properties are +defined: + + - node.supports-lazy = 0 | 1 | ... + + 0 means lazy scheduling as a driver is not supported + >1 means lazy scheduling as a driver is supported with increasing preference + + - node.supports-request + + 0 means request events as a follower are not supported + >1 means request events as a follower are supported with increasing preference + + We can only enable lazy scheduling when both the driver and (at least one) follower + has the node.supports-lazy and node.supports-request property respectively. + + Node can end up as a driver (is_driver()) and lazy scheduling can be enabled (is_lazy()), + which results in the following cases: + + driver producer + -> node.driver = true + -> is_driving() && !is_lazy() + -> calls trigger_process() to start the graph + + lazy producer + -> node.driver = true + -> node.supports-lazy = 1 + -> is_driving() && is_lazy() + -> listens for RequestProcess and calls trigger_process() to start the graph + + requesting producer + -> node.supports-request = 1 + -> !is_driving() && is_lazy() + -> emits RequestProcess to suggest starting the graph + + follower producer + -> !is_driving() && !is_lazy() + + + driver consumer + -> node.driver = true + -> is_driving() && !is_lazy() + -> calls trigger_process() to start the graph + + lazy consumer + -> node.driver = true + -> node.supports-lazy = 1 + -> is_driving() && is_lazy() + -> listens for RequestProcess and calls trigger_process() to start the graph + + requesting consumer + -> node.supports-request = 1 + -> !is_driving() && is_lazy() + -> emits RequestProcess to suggest starting the graph + + follower consumer + -> !is_driving() && !is_lazy() + + +Some use cases: + + 1. Screensharing - driver producer, follower consumer + - The producer starts the graph when a new frame is available. + - The consumer consumes the new frames. + -> throttles to the rate of the producer and idles when no frames + are available. + + producer + - node.driver = true + + consumer + - node.driver = false + + -> producer selected as driver, consumer is simple follower. + lazy scheduling inactive (no lazy driver or no request follower) + + + 2. headless server - requesting producer, (semi) lazy driver consumer + + - The producer emits RequestProcess when new frames are available. + - The consumer requests new frames from the producer according to its + refresh rate when there are RequestProcess commands. + -> this throttles the framerate to the consumer but idles when there is + no activity on the producer. + + producer + - node.driver = true + - node.supports-request = 1 + + consumer + - node.driver = true + - node.supports-lazy = 2 + + -> consumer is selected as driver (lazy > request) + lazy scheduling active (1 lazy driver and at least 1 request follower) + + + 3. frame encoder - lazy driver producer, requesting follower consumer + + - The consumer pulls a frame when it is ready to encode the next one. + - The producer produces the next frame on demand. + -> throttles the speed to the consumer without idle. + + producer + - node.driver = true + - node.supports-lazy = 1 + + consumer + - node.driver = true + - node.supports-request = 1 + + -> producer is selected as driver (lazy <= request) + lazy scheduling active (1 lazy driver and at least 1 request follower) + + + */ diff --git a/spa/include/spa/node/io.h b/spa/include/spa/node/io.h index 367bebd91..409fdde19 100644 --- a/spa/include/spa/node/io.h +++ b/spa/include/spa/node/io.h @@ -127,6 +127,7 @@ struct spa_io_range { struct spa_io_clock { #define SPA_IO_CLOCK_FLAG_FREEWHEEL (1u<<0) /* graph is freewheeling */ #define SPA_IO_CLOCK_FLAG_XRUN_RECOVER (1u<<1) /* recovering from xrun */ +#define SPA_IO_CLOCK_FLAG_LAZY (1u<<2) /* lazy scheduling */ uint32_t flags; /**< Clock flags */ uint32_t id; /**< Unique clock id, set by host application */ char name[64]; /**< Clock name prefixed with API, set by node when it receives diff --git a/src/pipewire/impl-node.c b/src/pipewire/impl-node.c index 5a32580c9..8a03d7250 100644 --- a/src/pipewire/impl-node.c +++ b/src/pipewire/impl-node.c @@ -1122,6 +1122,8 @@ static void check_properties(struct pw_impl_node *node) } } } + node->supports_lazy = pw_properties_get_uint32(node->properties, PW_KEY_NODE_SUPPORTS_LAZY, 0); + node->supports_request = pw_properties_get_uint32(node->properties, PW_KEY_NODE_SUPPORTS_REQUEST, 0); if ((str = pw_properties_get(node->properties, PW_KEY_NODE_NAME)) && (node->name == NULL || !spa_streq(node->name, str))) { diff --git a/src/pipewire/keys.h b/src/pipewire/keys.h index bc6cdb2ff..600b2fa61 100644 --- a/src/pipewire/keys.h +++ b/src/pipewire/keys.h @@ -190,6 +190,15 @@ extern "C" { #define PW_KEY_NODE_DRIVER "node.driver" /**< node can drive the graph. When the node is * selected as the driver, it needs to start * the graph periodically. */ +#define PW_KEY_NODE_SUPPORTS_LAZY "node.supports-lazy" /**< the node can be a lazy driver. It will listen + * to RequestProcess commands and take them into + * account when deciding to start the graph. + * A value of 0 disables support, a value of > 0 + * enables with increasing preference. */ +#define PW_KEY_NODE_SUPPORTS_REQUEST "node.supports-request" /**< The node supports emiting RequestProcess events + * when it wants the graph to be scheduled. + * A value of 0 disables support, a value of > 0 + * enables with increasing preference. */ #define PW_KEY_NODE_DRIVER_ID "node.driver-id" /**< the node id of the node assigned as driver * for this node */ #define PW_KEY_NODE_ASYNC "node.async" /**< the node wants async scheduling */ diff --git a/src/pipewire/private.h b/src/pipewire/private.h index db2291ec5..e14d79f42 100644 --- a/src/pipewire/private.h +++ b/src/pipewire/private.h @@ -740,6 +740,9 @@ struct pw_impl_node { char *name; /** for debug */ + uint32_t supports_lazy; /**< lazy driver preference */ + uint32_t supports_request; /**< request follower preference */ + uint32_t priority_driver; /** priority for being driver */ char **groups; /** groups to schedule this node in */ char **link_groups; /** groups this node is linked to */