From f16f074725b88edec5ab79f925d566977012f040 Mon Sep 17 00:00:00 2001 From: Wim Taymans Date: Mon, 4 Nov 2024 17:02:52 +0100 Subject: [PATCH] context: set lazy scheduling flags in clock Collect the request scheduling flags and when there is a lazy driver, set the lazy scheduling flag in its clock. This means that the driver can expect RequestProcess commands to start the scheduling. Pass the lazy scheduling clock flag to the node. Make sure lazy scheduling driver have a higher priority than their request drivers. --- src/pipewire/context.c | 7 +++++++ src/pipewire/impl-node.c | 11 +++++++++++ src/pipewire/private.h | 1 + 3 files changed, 19 insertions(+) diff --git a/src/pipewire/context.c b/src/pipewire/context.c index ecc8823d6..3fa36c981 100644 --- a/src/pipewire/context.c +++ b/src/pipewire/context.c @@ -1630,6 +1630,7 @@ again: uint64_t quantum_stamp = 0, rate_stamp = 0; bool force_rate, force_quantum, restore_rate = false, restore_quantum = false; bool do_reconfigure = false, need_resume, was_target_pending; + bool have_request = false; const uint32_t *node_rates; uint32_t node_n_rates, node_def_rate; uint32_t node_max_quantum, node_min_quantum, node_def_quantum, node_rate_quantum; @@ -1698,6 +1699,9 @@ again: context, s, running, s->runnable, rate.num, rate.denom, latency.num, latency.denom, s->name); + if (running && s != n && s->supports_request > 0) + have_request = true; + s->moved = false; } @@ -1854,6 +1858,9 @@ again: n->target_rate = n->rt.position->clock.target_rate; } + SPA_FLAG_UPDATE(n->rt.position->clock.flags, + SPA_IO_CLOCK_FLAG_LAZY, have_request && n->supports_lazy > 0); + pw_log_debug("%p: driver %p running:%d runnable:%d quantum:%u rate:%u (%"PRIu64"/%u)'%s'", context, n, running, n->runnable, target_quantum, target_rate, n->rt.position->clock.target_duration, diff --git a/src/pipewire/impl-node.c b/src/pipewire/impl-node.c index f4f3ab8af..1592d2685 100644 --- a/src/pipewire/impl-node.c +++ b/src/pipewire/impl-node.c @@ -326,6 +326,9 @@ static int start_node(struct pw_impl_node *this) pw_log_debug("%p: start node driving:%d driver:%d prepared:%d", this, this->driving, this->driver, this->rt.prepared); + this->lazy = this->rt.position && SPA_FLAG_IS_SET(this->rt.position->clock.flags, + SPA_IO_CLOCK_FLAG_LAZY); + if (!(this->driving && this->driver)) { impl->pending_play = true; res = spa_node_send_command(this->node, @@ -759,6 +762,14 @@ static inline void insert_driver(struct pw_context *context, struct pw_impl_node spa_list_for_each_safe(n, t, &context->driver_list, driver_link) { if (n->priority_driver < node->priority_driver) break; + if (n->priority_driver == 0 && node->priority_driver == 0) { + /* no priority is set, we prefer the driver that does + * lazy scheduling. */ + if (n->supports_request > 0 && node->supports_lazy > 0) { + if (n->supports_request <= node->supports_lazy) + break; + } + } } spa_list_append(&n->driver_link, &node->driver_link); pw_context_emit_driver_added(context, node); diff --git a/src/pipewire/private.h b/src/pipewire/private.h index 83a6547d4..cffd1e509 100644 --- a/src/pipewire/private.h +++ b/src/pipewire/private.h @@ -794,6 +794,7 @@ struct pw_impl_node { unsigned int sync:1; /**< the sync-groups are active */ unsigned int transport:1; /**< the transport is active */ unsigned int async:1; /**< async processing, one cycle latency */ + unsigned int lazy:1; /**< the graph is lazy scheduling */ uint32_t port_user_data_size; /**< extra size for port user data */