diff --git a/include/wlr/types/wlr_compositor.h b/include/wlr/types/wlr_compositor.h index 6ba8d8a07..10e831629 100644 --- a/include/wlr/types/wlr_compositor.h +++ b/include/wlr/types/wlr_compositor.h @@ -72,6 +72,22 @@ struct wlr_surface_state { // Sync'ed object states, one per struct wlr_surface_synced struct wl_array synced; // void * + + // private state + + struct wlr_surface_state_group *group; +}; + +struct wlr_surface_state_lock { + // private state + struct wlr_surface *surface; + uint32_t seq; + struct wl_listener surface_destroy; +}; + +struct wlr_surface_transaction { + // private state + struct wl_array *surfaces; // struct wlr_surface * }; struct wlr_surface_role { @@ -195,7 +211,7 @@ struct wlr_surface { * The commit may not be applied immediately, in which case it's marked * as "cached" and put into a queue. See wlr_surface_lock_pending(). */ - struct wl_signal client_commit; + struct wl_signal client_commit; // struct wlr_surface_client_commit_event /** * Signals that a commit has been applied. * @@ -263,6 +279,13 @@ struct wlr_surface { struct wl_resource *pending_buffer_resource; struct wl_listener pending_buffer_resource_destroy; + + struct wlr_surface_state *txn_state; // NULL if not added to a transaction + struct wl_array txn_buffer; +}; + +struct wlr_surface_client_commit_event { + struct wlr_surface_transaction *transaction; }; struct wlr_renderer; @@ -443,23 +466,65 @@ void wlr_surface_get_buffer_source_box(struct wlr_surface *surface, struct wlr_fbox *box); /** - * Acquire a lock for the pending surface state. + * Acquire a lock for the pending surface state. The lock must be unlocked. * * The state won't be committed before the caller releases the lock. Instead, - * the state becomes cached. The caller needs to use wlr_surface_unlock_cached() - * to release the lock. - * - * Returns a surface commit sequence number for the cached state. + * the state becomes cached. The caller needs to use + * wlr_surface_state_lock_release() to release the lock. */ -uint32_t wlr_surface_lock_pending(struct wlr_surface *surface); +void wlr_surface_state_lock_acquire(struct wlr_surface_state_lock *lock, + struct wlr_surface *surface); /** - * Release a lock for a cached state. + * Release a lock for a surface state. If the lock is already unlocked, + * this is no-op. * * Callers should not assume that the cached state will immediately be * committed. Another caller may still have an active lock. */ -void wlr_surface_unlock_cached(struct wlr_surface *surface, uint32_t seq); +void wlr_surface_state_lock_release(struct wlr_surface_state_lock *lock); + +/** + * Get the status of a lock. + * + * Returns true if the lock is locked, false otherwise. + */ +bool wlr_surface_state_lock_locked(struct wlr_surface_state_lock *lock); + +/** + * Initialize a surface transaction with an existing buffer. + * + * After the transaction is committed or dropped, the buffer can be reused. + */ +void wlr_surface_transaction_init(struct wlr_surface_transaction *txn, struct wl_array *buffer); + +/** + * Add a state lock to the transaction. If the locked state is a part of a state + * group, the group is treated as a set of locks which are added individually + * instead. + * + * Adding locks for different states of the same surface is not allowed. + * + * On success, the lock is moved to the transaction, and true is returned. + * On failure, false is returned. + */ +bool wlr_surface_transaction_add_lock(struct wlr_surface_transaction *txn, + struct wlr_surface_state_lock *lock); + +/** + * Drop the transaction, releasing all its locks. + */ +void wlr_surface_transaction_drop(struct wlr_surface_transaction *txn); + +/** + * Commit the transaction, releasing all its locks. The corresponding states are + * added into a state group, which is committed only once all its states can be + * committed. + * + * On success, true is returned. + * On failure, the transaction is dropped, and false is returned. + */ +bool wlr_surface_transaction_commit(struct wlr_surface_transaction *txn); /** * Set the preferred buffer scale for the surface. diff --git a/include/wlr/types/wlr_dbg_txn.h b/include/wlr/types/wlr_dbg_txn.h new file mode 100644 index 000000000..dbea0e239 --- /dev/null +++ b/include/wlr/types/wlr_dbg_txn.h @@ -0,0 +1,23 @@ +/* + * This an unstable interface of wlroots. No guarantees are made regarding the + * future consistency of this API. + */ +#ifndef WLR_USE_UNSTABLE +#error "Add -DWLR_USE_UNSTABLE to enable unstable wlroots features" +#endif + +#ifndef WLR_TYPES_WLR_DBG_TXN_H +#define WLR_TYPES_WLR_DBG_TXN_H + +#include +#include "dbg-txn-protocol.h" + +struct wlr_dbg_txn_manager { + struct wl_global *global; + + struct wl_listener display_destroy; +}; + +struct wlr_dbg_txn_manager *wlr_dbg_txn_manager_create(struct wl_display *display); + +#endif diff --git a/include/wlr/types/wlr_subcompositor.h b/include/wlr/types/wlr_subcompositor.h index 61a2cccc9..85505ff3e 100644 --- a/include/wlr/types/wlr_subcompositor.h +++ b/include/wlr/types/wlr_subcompositor.h @@ -35,13 +35,11 @@ struct wlr_subsurface { struct wlr_subsurface_parent_state current, pending; - uint32_t cached_seq; - bool has_cache; - bool synchronized; bool added; struct wl_listener surface_client_commit; + struct wl_listener parent_client_commit; struct wl_listener parent_destroy; struct { @@ -52,6 +50,8 @@ struct wlr_subsurface { // private state + struct wlr_surface_state_lock cached_lock; + struct wlr_surface_synced parent_synced; }; diff --git a/protocol/dbg-txn.xml b/protocol/dbg-txn.xml new file mode 100644 index 000000000..36179e777 --- /dev/null +++ b/protocol/dbg-txn.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/protocol/meson.build b/protocol/meson.build index a4476918b..0d36053f2 100644 --- a/protocol/meson.build +++ b/protocol/meson.build @@ -64,6 +64,8 @@ protocols = { 'wlr-output-power-management-unstable-v1': 'wlr-output-power-management-unstable-v1.xml', 'wlr-screencopy-unstable-v1': 'wlr-screencopy-unstable-v1.xml', 'wlr-virtual-pointer-unstable-v1': 'wlr-virtual-pointer-unstable-v1.xml', + + 'dbg-txn': 'dbg-txn.xml', } protocols_code = {} diff --git a/tinywl/meson.build b/tinywl/meson.build index e7271458b..11ce064a0 100644 --- a/tinywl/meson.build +++ b/tinywl/meson.build @@ -1,5 +1,6 @@ executable( 'tinywl', - ['tinywl.c', protocols_server_header['xdg-shell']], + ['tinywl.c', protocols_server_header['xdg-shell'], + protocols_server_header['dbg-txn'], protocols_server_header['viewporter']], dependencies: wlroots, ) diff --git a/tinywl/tinywl.c b/tinywl/tinywl.c index 6cf08b119..d545005b7 100644 --- a/tinywl/tinywl.c +++ b/tinywl/tinywl.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -981,6 +983,9 @@ int main(int argc, char *argv[]) { * HiDPI support). */ server.cursor_mgr = wlr_xcursor_manager_create(NULL, 24); + wlr_viewporter_create(server.wl_display); + wlr_dbg_txn_manager_create(server.wl_display); + /* * wlr_cursor *only* displays an image on screen. It does not move around * when the pointer moves. However, we can attach input devices to it, and diff --git a/types/meson.build b/types/meson.build index ec70d4b7c..c3ab2b4dd 100644 --- a/types/meson.build +++ b/types/meson.build @@ -40,6 +40,7 @@ wlr_files += files( 'wlr_cursor.c', 'wlr_damage_ring.c', 'wlr_data_control_v1.c', + 'wlr_dbg_txn.c', 'wlr_drm.c', 'wlr_export_dmabuf_v1.c', 'wlr_foreign_toplevel_management_v1.c', diff --git a/types/wlr_compositor.c b/types/wlr_compositor.c index 081af3539..2d7b39ffb 100644 --- a/types/wlr_compositor.c +++ b/types/wlr_compositor.c @@ -19,6 +19,19 @@ #define COMPOSITOR_VERSION 6 #define CALLBACK_VERSION 1 +struct wlr_surface_state_group_entry { + struct wlr_surface_state_group *group; + struct wlr_surface *surface; + struct wlr_surface_state *state; + struct wl_listener surface_destroy; +}; + +struct wlr_surface_state_group { + size_t n_entries; + size_t n_waiting; + struct wlr_surface_state_group_entry entries[]; +}; + static int min(int fst, int snd) { if (fst < snd) { return fst; @@ -468,11 +481,13 @@ static void surface_update_input_region(struct wlr_surface *surface) { 0, 0, surface->current.width, surface->current.height); } -static bool surface_state_init(struct wlr_surface_state *state, - struct wlr_surface *surface); +static bool surface_state_init(struct wlr_surface_state *state, struct wlr_surface *surface); static void surface_state_finish(struct wlr_surface_state *state); -static void surface_cache_pending(struct wlr_surface *surface) { +static bool transaction_commit(struct wlr_surface_transaction *txn, + struct wlr_surface *commit_surface); + +static struct wlr_surface_state *surface_cache_pending(struct wlr_surface *surface) { struct wlr_surface_state *cached = calloc(1, sizeof(*cached)); if (!cached) { goto error; @@ -498,7 +513,7 @@ static void surface_cache_pending(struct wlr_surface *surface) { surface->pending.seq++; - return; + return cached; error_state: surface_state_finish(cached); @@ -506,11 +521,12 @@ error_cached: free(cached); error: wl_resource_post_no_memory(surface->resource); + return NULL; } -static void surface_commit_state(struct wlr_surface *surface, - struct wlr_surface_state *next) { +static void surface_commit_state(struct wlr_surface *surface, struct wlr_surface_state *next) { assert(next->cached_state_locks == 0); + assert(next->group == NULL); bool invalid_buffer = next->committed & WLR_SURFACE_STATE_BUFFER; @@ -578,17 +594,21 @@ static void surface_handle_commit(struct wl_client *client, surface->role->client_commit(surface); } - wl_signal_emit_mutable(&surface->events.client_commit, NULL); + struct wlr_surface_transaction txn; + wlr_surface_transaction_init(&txn, &surface->txn_buffer); surface->handling_commit = false; if (surface->pending_rejected) { return; } - if (surface->pending.cached_state_locks > 0 || !wl_list_empty(&surface->cached)) { - surface_cache_pending(surface); - } else { - surface_commit_state(surface, &surface->pending); + struct wlr_surface_client_commit_event event = { + .transaction = &txn, + }; + wl_signal_emit_mutable(&surface->events.client_commit, &event); + + if (!transaction_commit(&txn, surface)) { + wl_resource_post_no_memory(resource); } } @@ -718,6 +738,9 @@ static void surface_destroy_role_object(struct wlr_surface *surface); static void surface_handle_resource_destroy(struct wl_resource *resource) { struct wlr_surface *surface = wlr_surface_from_resource(resource); + assert(surface->txn_state == NULL && + "Tried to destroy a surface which was a part of a transaction"); + struct wlr_surface_output *surface_output, *surface_output_tmp; wl_list_for_each_safe(surface_output, surface_output_tmp, &surface->current_outputs, link) { @@ -736,6 +759,8 @@ static void surface_handle_resource_destroy(struct wl_resource *resource) { surface_state_destroy_cached(cached, surface); } + wl_array_release(&surface->txn_buffer); + wl_list_remove(&surface->role_resource_destroy.link); wl_list_remove(&surface->pending_buffer_resource_destroy.link); @@ -789,6 +814,7 @@ static struct wlr_surface *surface_create(struct wl_client *client, pixman_region32_init(&surface->input_region); wlr_addon_set_init(&surface->addons); wl_list_init(&surface->synced); + wl_array_init(&surface->txn_buffer); wl_list_init(&surface->role_resource_destroy.link); @@ -928,53 +954,324 @@ static void surface_destroy_role_object(struct wlr_surface *surface) { wl_list_init(&surface->role_resource_destroy.link); } -uint32_t wlr_surface_lock_pending(struct wlr_surface *surface) { - surface->pending.cached_state_locks++; - return surface->pending.seq; -} - -void wlr_surface_unlock_cached(struct wlr_surface *surface, uint32_t seq) { +static struct wlr_surface_state *state_by_seq(struct wlr_surface *surface, uint32_t seq) { if (surface->pending.seq == seq) { - assert(surface->pending.cached_state_locks > 0); - surface->pending.cached_state_locks--; - return; + return &surface->pending; } - bool found = false; struct wlr_surface_state *cached; wl_list_for_each(cached, &surface->cached, cached_state_link) { if (cached->seq == seq) { - found = true; - break; + return cached; } } - assert(found); - assert(cached->cached_state_locks > 0); - cached->cached_state_locks--; + abort(); // Invalid seq +} - if (cached->cached_state_locks != 0) { - return; - } +// Returns true if the cached state can be applied immediately +static bool cached_state_ready(struct wlr_surface *surface, struct wlr_surface_state *state) { + return state->cached_state_locks == 0 && state->cached_state_link.prev == &surface->cached; +} - if (cached->cached_state_link.prev != &surface->cached) { - // This isn't the first cached state. This means we're blocked on a - // previous cached state. - return; - } +static void group_notify_ready(struct wlr_surface_state_group *group); - // TODO: consider merging all committed states together - struct wlr_surface_state *next, *tmp; - wl_list_for_each_safe(next, tmp, &surface->cached, cached_state_link) { - if (next->cached_state_locks > 0) { +static void commit_cached_states(struct wlr_surface *surface) { + // TODO: consider merging all committed states together + struct wlr_surface_state *state, *tmp; + wl_list_for_each_safe(state, tmp, &surface->cached, cached_state_link) { + if (state->cached_state_locks > 0) { + break; + } else if (state->group != NULL) { + // XXX: possible stack overflow? + group_notify_ready(state->group); break; } - surface_commit_state(surface, next); - surface_state_destroy_cached(next, surface); + surface_commit_state(surface, state); + surface_state_destroy_cached(state, surface); } } +static void group_entry_finish(struct wlr_surface_state_group_entry *entry) { + entry->surface = NULL; + entry->state = NULL; + wl_list_remove(&entry->surface_destroy.link); +} + +static void group_notify_ready(struct wlr_surface_state_group *group) { + assert(group->n_waiting > 0); + --group->n_waiting; + if (group->n_waiting > 0) { + return; + } + + for (size_t i = 0; i < group->n_entries; i++) { + struct wlr_surface_state_group_entry *entry = &group->entries[i]; + if (entry->surface == NULL) { + // Surface was destroyed + continue; + } + entry->state->group = NULL; + commit_cached_states(entry->surface); + group_entry_finish(entry); + } + + free(group); +} + +static void group_entry_handle_surface_destroy(struct wl_listener *listener, void *data) { + struct wlr_surface_state_group_entry *entry = + wl_container_of(listener, entry, surface_destroy); + + struct wlr_surface_state_group *group = entry->group; + struct wlr_surface *surface = entry->surface; + struct wlr_surface_state *state = entry->state; + + group_entry_finish(entry); + + if (!cached_state_ready(surface, state)) { + group_notify_ready(group); + } +} + +static void group_add_entry(struct wlr_surface_state_group *group, struct wlr_surface *surface, + struct wlr_surface_state *state) { + struct wlr_surface_state_group_entry *entry = &group->entries[group->n_entries++]; + entry->group = group; + entry->surface = surface; + entry->state = state; + + entry->surface_destroy.notify = group_entry_handle_surface_destroy; + wl_signal_add(&surface->events.destroy, &entry->surface_destroy); + + state->group = group; +} + +// Commit a transaction with an optional pending state of commit_surface +static bool transaction_commit(struct wlr_surface_transaction *txn, + struct wlr_surface *commit_surface) { + size_t n_waiting = 0; + struct wlr_surface **iter; + wl_array_for_each(iter, txn->surfaces) { + struct wlr_surface *surface = *iter; + struct wlr_surface_state *state = surface->txn_state; + + assert(state->group == NULL); + assert(state->cached_state_locks > 0); + if (state->cached_state_locks > 1 || state->cached_state_link.prev != &surface->cached) { + // The state isn't the first cached one or has locks other than + // the one added to the transaction + ++n_waiting; + } + } + + size_t n_entries = txn->surfaces->size / sizeof(struct wlr_surface *); + if (n_entries == 0) { + // Fast path: no cached states + if (commit_surface != NULL) { + if (wl_list_empty(&commit_surface->cached) && + commit_surface->pending.cached_state_locks == 0) { + surface_commit_state(commit_surface, &commit_surface->pending); + } else if (surface_cache_pending(commit_surface) == NULL) { + return false; + } + } + return true; + } else if (n_entries == 1 && commit_surface == NULL) { + // Fast path: one cached state, no pending state + struct wlr_surface *surface = ((struct wlr_surface **)txn->surfaces->data)[0]; + --surface->txn_state->cached_state_locks; + surface->txn_state = NULL; + commit_cached_states(surface); + return true; + } + + if (commit_surface != NULL) { + if (!wl_list_empty(&commit_surface->cached) || + commit_surface->pending.cached_state_locks > 0) { + ++n_waiting; + } + } + + if (n_waiting == 0) { + // Fast path: all states can be applied immediately + // Unlock and unset txn_state separately so commit listeners won't get + // a surface which is still in a transaction + wl_array_for_each(iter, txn->surfaces) { + struct wlr_surface *surface = *iter; + --surface->txn_state->cached_state_locks; + surface->txn_state = NULL; + } + + // Then, apply everything + wl_array_for_each(iter, txn->surfaces) { + commit_cached_states(*iter); + } + if (commit_surface != NULL) { + surface_commit_state(commit_surface, &commit_surface->pending); + } + + return true; + } + + // "Slow" path + struct wlr_surface_state *commit_state = NULL; + if (commit_surface != NULL) { + commit_state = surface_cache_pending(commit_surface); + if (commit_state == NULL) { + goto error; + } + ++n_entries; + } + + struct wlr_surface_state_group *group = + calloc(1, sizeof(*group) + n_entries * sizeof(*group->entries)); + if (group == NULL) { + if (commit_surface != NULL) { + commit_cached_states(commit_surface); + } + goto error; + } + + wl_array_for_each(iter, txn->surfaces) { + struct wlr_surface *surface = *iter; + --surface->txn_state->cached_state_locks; + group_add_entry(group, surface, surface->txn_state); + surface->txn_state = NULL; + } + + if (commit_state != NULL) { + group_add_entry(group, commit_surface, commit_state); + } + + group->n_waiting = n_waiting; + return true; + +error: + wlr_surface_transaction_drop(txn); + return false; +} + +static void state_lock_handle_surface_destroy(struct wl_listener *listener, void *data) { + struct wlr_surface_state_lock *lock = wl_container_of(listener, lock, surface_destroy); + wlr_surface_state_lock_release(lock); +} + +void wlr_surface_state_lock_acquire(struct wlr_surface_state_lock *lock, + struct wlr_surface *surface) { + assert(lock->surface == NULL && "Tried to acquire a locked lock"); + lock->surface = surface; + lock->seq = surface->pending.seq; + ++surface->pending.cached_state_locks; + lock->surface_destroy.notify = state_lock_handle_surface_destroy; + wl_signal_add(&surface->events.destroy, &lock->surface_destroy); +} + +void wlr_surface_state_lock_release(struct wlr_surface_state_lock *lock) { + struct wlr_surface *surface = lock->surface; + if (surface == NULL) { + // Already unlocked + return; + } + + lock->surface = NULL; + wl_list_remove(&lock->surface_destroy.link); + + struct wlr_surface_state *state = state_by_seq(surface, lock->seq); + --state->cached_state_locks; + if (state == &surface->pending) { + return; + } + + commit_cached_states(surface); +} + +bool wlr_surface_state_lock_locked(struct wlr_surface_state_lock *lock) { + return lock->surface != NULL; +} + +void wlr_surface_transaction_init(struct wlr_surface_transaction *txn, struct wl_array *buffer) { + txn->surfaces = buffer; + buffer->size = 0; +} + +bool wlr_surface_transaction_add_lock(struct wlr_surface_transaction *txn, + struct wlr_surface_state_lock *lock) { + struct wlr_surface *surface = lock->surface; + assert(surface != NULL); + + struct wlr_surface_state *state = state_by_seq(surface, lock->seq); + if (surface->txn_state == state) { + // Already added + --state->cached_state_locks; + goto release; + } + + assert(surface->txn_state == NULL && + "Tried to add locks for different states of the same surface"); + + if (state == &surface->pending) { + // No cached state to add + --state->cached_state_locks; + goto release; + } + + struct wlr_surface_state_group *group = state->group; + if (group != NULL) { + // Add the whole group instead + struct wlr_surface **ptr = wl_array_add(txn->surfaces, sizeof(surface) * group->n_entries); + if (ptr == NULL) { + return false; + } + + for (size_t i = 0; i < group->n_entries; i++) { + struct wlr_surface_state_group_entry *entry = &group->entries[i]; + assert(entry->surface->txn_state == NULL); + + ptr[i] = entry->surface; + entry->surface->txn_state = entry->state; + + entry->state->group = NULL; + ++entry->state->cached_state_locks; + + group_entry_finish(entry); + } + + free(group); + --state->cached_state_locks; + goto release; + } + + struct wlr_surface **ptr = wl_array_add(txn->surfaces, sizeof(surface)); + if (ptr == NULL) { + return false; + } + *ptr = surface; + surface->txn_state = state; + +release: + lock->surface = NULL; + wl_list_remove(&lock->surface_destroy.link); + return true; +} + +void wlr_surface_transaction_drop(struct wlr_surface_transaction *txn) { + struct wlr_surface **surfaces = txn->surfaces->data; + size_t n_surfaces = txn->surfaces->size / sizeof(*surfaces); + for (size_t i = 0; i < n_surfaces; i++) { + struct wlr_surface *surface = surfaces[i]; + struct wlr_surface_state *state = surface->txn_state; + surface->txn_state = NULL; + --state->cached_state_locks; + commit_cached_states(surface); + } +} + +bool wlr_surface_transaction_commit(struct wlr_surface_transaction *txn) { + return transaction_commit(txn, NULL); +} + struct wlr_surface *wlr_surface_get_root_surface(struct wlr_surface *surface) { struct wlr_subsurface *subsurface; while ((subsurface = wlr_subsurface_try_from_wlr_surface(surface))) { diff --git a/types/wlr_dbg_txn.c b/types/wlr_dbg_txn.c new file mode 100644 index 000000000..c08f4acee --- /dev/null +++ b/types/wlr_dbg_txn.c @@ -0,0 +1,157 @@ +#include +#include + +#include +#include + +struct wlr_dbg_txn { + struct wl_resource *resource; + struct wl_list locks; +}; + +struct wlr_dbg_txn_lock { + struct wlr_addon addon; + struct wlr_surface_state_lock lock; + struct wl_list link; +}; + +static const struct dbg_txn_interface txn_impl; + +static struct wlr_dbg_txn *txn_from_resource(struct wl_resource *resource) { + assert(wl_resource_instance_of(resource, &dbg_txn_interface, &txn_impl)); + return wl_resource_get_user_data(resource); +} + +static void txn_lock_destroy(struct wlr_dbg_txn_lock *lock) { + wlr_surface_state_lock_release(&lock->lock); + wl_list_remove(&lock->link); + wlr_addon_finish(&lock->addon); + free(lock); +} + +static void addon_handle_destroy(struct wlr_addon *addon) { + struct wlr_dbg_txn_lock *lock = wl_container_of(addon, lock, addon); + txn_lock_destroy(lock); +} + +static struct wlr_addon_interface addon_impl = { + .name = "wlr_dbg_txn_lock", + .destroy = addon_handle_destroy, +}; + +static void txn_handle_add_surface(struct wl_client *client, struct wl_resource *resource, + struct wl_resource *surface_resource) { + struct wlr_surface *surface = wlr_surface_from_resource(surface_resource); + if (wlr_addon_find(&surface->addons, NULL, &addon_impl) != NULL) { + wl_resource_post_error(resource, -1, "already added"); + return; + } + + struct wlr_dbg_txn *txn = txn_from_resource(resource); + struct wlr_dbg_txn_lock *lock = calloc(1, sizeof(*lock)); + assert(lock != NULL); + wlr_surface_state_lock_acquire(&lock->lock, surface); + wl_list_insert(&txn->locks, &lock->link); + wlr_addon_init(&lock->addon, &surface->addons, NULL, &addon_impl); +} + +static void txn_handle_commit(struct wl_client *client, struct wl_resource *resource) { + struct wlr_dbg_txn *txn = txn_from_resource(resource); + + struct wl_array buffer; + wl_array_init(&buffer); + + struct wlr_surface_transaction surface_txn; + wlr_surface_transaction_init(&surface_txn, &buffer); + + bool ok = true; + struct wlr_dbg_txn_lock *lock; + wl_list_for_each(lock, &txn->locks, link) { + if (!wlr_surface_transaction_add_lock(&surface_txn, &lock->lock)) { + wlr_surface_transaction_drop(&surface_txn); + ok = false; + break; + } + } + if (ok) { + ok = wlr_surface_transaction_commit(&surface_txn); + } + if (!ok) { + wl_resource_post_no_memory(resource); + } + + wl_array_release(&buffer); + wl_resource_destroy(resource); +} + +static const struct dbg_txn_interface txn_impl = { + .add_surface = txn_handle_add_surface, + .commit = txn_handle_commit, +}; + +static void txn_handle_resource_destroy(struct wl_resource *resource) { + struct wlr_dbg_txn *txn = txn_from_resource(resource); + struct wlr_dbg_txn_lock *lock, *tmp; + wl_list_for_each_safe(lock, tmp, &txn->locks, link) { + txn_lock_destroy(lock); + } + free(txn); +} + +static void manager_handle_get_txn(struct wl_client *client, + struct wl_resource *resource, uint32_t id) { + struct wlr_dbg_txn *txn = calloc(1, sizeof(*txn)); + if (txn == NULL) { + wl_client_post_no_memory(client); + return; + } + + txn->resource = wl_resource_create(client, &dbg_txn_interface, 1, id); + assert(txn->resource != NULL); + + wl_resource_set_implementation(txn->resource, &txn_impl, txn, txn_handle_resource_destroy); + + wl_list_init(&txn->locks); +} + +static const struct dbg_txn_manager_interface manager_impl = { + .get_txn = manager_handle_get_txn, +}; + +static void manager_bind(struct wl_client *wl_client, void *data, uint32_t version, uint32_t id) { + struct wlr_dbg_txn_manager *manager = data; + + struct wl_resource *resource = + wl_resource_create(wl_client, &wl_compositor_interface, version, id); + if (resource == NULL) { + wl_client_post_no_memory(wl_client); + return; + } + wl_resource_set_implementation(resource, &manager_impl, manager, NULL); +} + +static void manager_handle_display_destroy(struct wl_listener *listener, void *data) { + struct wlr_dbg_txn_manager *manager = wl_container_of(listener, manager, display_destroy); + wl_list_remove(&manager->display_destroy.link); + wl_global_destroy(manager->global); + free(manager); +} + +struct wlr_dbg_txn_manager *wlr_dbg_txn_manager_create(struct wl_display *display) { + struct wlr_dbg_txn_manager *manager = calloc(1, sizeof(*manager)); + if (!manager) { + return NULL; + } + + manager->global = wl_global_create(display, &dbg_txn_manager_interface, + 1, manager, manager_bind); + if (!manager->global) { + free(manager); + return NULL; + } + + manager->display_destroy.notify = manager_handle_display_destroy; + wl_display_add_destroy_listener(display, &manager->display_destroy); + + return manager; +} diff --git a/types/wlr_subcompositor.c b/types/wlr_subcompositor.c index f0d0827b6..48282e770 100644 --- a/types/wlr_subcompositor.c +++ b/types/wlr_subcompositor.c @@ -25,9 +25,7 @@ static bool subsurface_is_synchronized(struct wlr_subsurface *subsurface) { static const struct wl_subsurface_interface subsurface_implementation; static void subsurface_destroy(struct wlr_subsurface *subsurface) { - if (subsurface->has_cache) { - wlr_surface_unlock_cached(subsurface->surface, subsurface->cached_seq); - } + wlr_surface_state_lock_release(&subsurface->cached_lock); wlr_surface_unmap(subsurface->surface); @@ -36,6 +34,7 @@ static void subsurface_destroy(struct wlr_subsurface *subsurface) { wlr_surface_synced_finish(&subsurface->parent_synced); wl_list_remove(&subsurface->surface_client_commit.link); + wl_list_remove(&subsurface->parent_client_commit.link); wl_list_remove(&subsurface->parent_destroy.link); wl_resource_set_user_data(subsurface->resource, NULL); @@ -170,11 +169,8 @@ static void subsurface_handle_set_desync(struct wl_client *client, if (subsurface->synchronized) { subsurface->synchronized = false; - if (!subsurface_is_synchronized(subsurface) && - subsurface->has_cache) { - wlr_surface_unlock_cached(subsurface->surface, - subsurface->cached_seq); - subsurface->has_cache = false; + if (!subsurface_is_synchronized(subsurface)) { + wlr_surface_state_lock_release(&subsurface->cached_lock); } } } @@ -248,10 +244,19 @@ static struct wlr_surface_synced_impl surface_synced_impl = { .move_state = surface_synced_move_state, }; -static void subsurface_handle_parent_destroy(struct wl_listener *listener, - void *data) { +static void subsurface_handle_parent_client_commit(struct wl_listener *listener, void *data) { struct wlr_subsurface *subsurface = - wl_container_of(listener, subsurface, parent_destroy); + wl_container_of(listener, subsurface, parent_client_commit); + struct wlr_surface_client_commit_event *event = data; + if (wlr_surface_state_lock_locked(&subsurface->cached_lock)) { + if (!wlr_surface_transaction_add_lock(event->transaction, &subsurface->cached_lock)) { + wl_resource_post_no_memory(subsurface->resource); + } + } +} + +static void subsurface_handle_parent_destroy(struct wl_listener *listener, void *data) { + struct wlr_subsurface *subsurface = wl_container_of(listener, subsurface, parent_destroy); // Once the parent is destroyed, the client has no way to use the // wl_subsurface object anymore, so we can destroy it. subsurface_destroy(subsurface); @@ -264,23 +269,20 @@ static void subsurface_handle_surface_client_commit( struct wlr_surface *surface = subsurface->surface; if (subsurface_is_synchronized(subsurface)) { - if (subsurface->has_cache) { + if (wlr_surface_state_lock_locked(&subsurface->cached_lock)) { // We already lock a previous commit. The prevents any future // commit to be applied before we release the previous commit. return; } - subsurface->has_cache = true; - subsurface->cached_seq = wlr_surface_lock_pending(surface); - } else if (subsurface->has_cache) { - wlr_surface_unlock_cached(surface, subsurface->cached_seq); - subsurface->has_cache = false; + wlr_surface_state_lock_acquire(&subsurface->cached_lock, surface); + } else { + wlr_surface_state_lock_release(&subsurface->cached_lock); } } void subsurface_handle_parent_commit(struct wlr_subsurface *subsurface) { - if (subsurface->synchronized && subsurface->has_cache) { - wlr_surface_unlock_cached(subsurface->surface, subsurface->cached_seq); - subsurface->has_cache = false; + if (subsurface->synchronized) { + wlr_surface_state_lock_release(&subsurface->cached_lock); } if (!subsurface->added) { @@ -375,6 +377,8 @@ static void subcompositor_handle_get_subsurface(struct wl_client *client, // link parent subsurface->parent = parent; + wl_signal_add(&parent->events.client_commit, &subsurface->parent_client_commit); + subsurface->parent_client_commit.notify = subsurface_handle_parent_client_commit; wl_signal_add(&parent->events.destroy, &subsurface->parent_destroy); subsurface->parent_destroy.notify = subsurface_handle_parent_destroy;