context: set lazy scheduling flags in clock

Collect the request scheduling flags and when there is a lazy driver,
set the lazy scheduling flag in its clock. This means that the driver
can expect RequestProcess commands to start the scheduling.

Pass the lazy scheduling clock flag to the node.

Make sure lazy scheduling driver have a higher priority than their
request drivers.
This commit is contained in:
Wim Taymans 2024-11-04 17:02:52 +01:00
parent 9c49bffc22
commit f16f074725
3 changed files with 19 additions and 0 deletions

View file

@ -1630,6 +1630,7 @@ again:
uint64_t quantum_stamp = 0, rate_stamp = 0;
bool force_rate, force_quantum, restore_rate = false, restore_quantum = false;
bool do_reconfigure = false, need_resume, was_target_pending;
bool have_request = false;
const uint32_t *node_rates;
uint32_t node_n_rates, node_def_rate;
uint32_t node_max_quantum, node_min_quantum, node_def_quantum, node_rate_quantum;
@ -1698,6 +1699,9 @@ again:
context, s, running, s->runnable, rate.num, rate.denom,
latency.num, latency.denom, s->name);
if (running && s != n && s->supports_request > 0)
have_request = true;
s->moved = false;
}
@ -1854,6 +1858,9 @@ again:
n->target_rate = n->rt.position->clock.target_rate;
}
SPA_FLAG_UPDATE(n->rt.position->clock.flags,
SPA_IO_CLOCK_FLAG_LAZY, have_request && n->supports_lazy > 0);
pw_log_debug("%p: driver %p running:%d runnable:%d quantum:%u rate:%u (%"PRIu64"/%u)'%s'",
context, n, running, n->runnable, target_quantum, target_rate,
n->rt.position->clock.target_duration,

View file

@ -326,6 +326,9 @@ static int start_node(struct pw_impl_node *this)
pw_log_debug("%p: start node driving:%d driver:%d prepared:%d", this,
this->driving, this->driver, this->rt.prepared);
this->lazy = this->rt.position && SPA_FLAG_IS_SET(this->rt.position->clock.flags,
SPA_IO_CLOCK_FLAG_LAZY);
if (!(this->driving && this->driver)) {
impl->pending_play = true;
res = spa_node_send_command(this->node,
@ -759,6 +762,14 @@ static inline void insert_driver(struct pw_context *context, struct pw_impl_node
spa_list_for_each_safe(n, t, &context->driver_list, driver_link) {
if (n->priority_driver < node->priority_driver)
break;
if (n->priority_driver == 0 && node->priority_driver == 0) {
/* no priority is set, we prefer the driver that does
* lazy scheduling. */
if (n->supports_request > 0 && node->supports_lazy > 0) {
if (n->supports_request <= node->supports_lazy)
break;
}
}
}
spa_list_append(&n->driver_link, &node->driver_link);
pw_context_emit_driver_added(context, node);

View file

@ -794,6 +794,7 @@ struct pw_impl_node {
unsigned int sync:1; /**< the sync-groups are active */
unsigned int transport:1; /**< the transport is active */
unsigned int async:1; /**< async processing, one cycle latency */
unsigned int lazy:1; /**< the graph is lazy scheduling */
uint32_t port_user_data_size; /**< extra size for port user data */