compositor: add wlr_surface_state_lock

This commit is contained in:
Kirill Primak 2024-01-09 18:10:58 +03:00
parent dd8f4913a4
commit a3af71b996
4 changed files with 77 additions and 51 deletions

View file

@ -928,53 +928,74 @@ static void surface_destroy_role_object(struct wlr_surface *surface) {
wl_list_init(&surface->role_resource_destroy.link);
}
uint32_t wlr_surface_lock_pending(struct wlr_surface *surface) {
surface->pending.cached_state_locks++;
return surface->pending.seq;
}
void wlr_surface_unlock_cached(struct wlr_surface *surface, uint32_t seq) {
static struct wlr_surface_state *state_by_seq(struct wlr_surface *surface, uint32_t seq) {
if (surface->pending.seq == seq) {
assert(surface->pending.cached_state_locks > 0);
surface->pending.cached_state_locks--;
return;
return &surface->pending;
}
bool found = false;
struct wlr_surface_state *cached;
wl_list_for_each(cached, &surface->cached, cached_state_link) {
if (cached->seq == seq) {
found = true;
break;
return cached;
}
}
assert(found);
assert(cached->cached_state_locks > 0);
cached->cached_state_locks--;
abort(); // Invalid seq
}
if (cached->cached_state_locks != 0) {
static void state_lock_handle_surface_destroy(struct wl_listener *listener, void *data) {
struct wlr_surface_state_lock *lock = wl_container_of(listener, lock, surface_destroy);
wlr_surface_state_lock_release(lock);
}
void wlr_surface_state_lock_acquire(struct wlr_surface_state_lock *lock,
struct wlr_surface *surface) {
assert(lock->surface == NULL && "Tried to acquire a locked lock");
lock->surface = surface;
lock->seq = surface->pending.seq;
++surface->pending.cached_state_locks;
lock->surface_destroy.notify = state_lock_handle_surface_destroy;
wl_signal_add(&surface->events.destroy, &lock->surface_destroy);
}
void wlr_surface_state_lock_release(struct wlr_surface_state_lock *lock) {
struct wlr_surface *surface = lock->surface;
if (surface == NULL) {
// Already unlocked
return;
}
if (cached->cached_state_link.prev != &surface->cached) {
lock->surface = NULL;
wl_list_remove(&lock->surface_destroy.link);
struct wlr_surface_state *state = state_by_seq(surface, lock->seq);
--state->cached_state_locks;
if (state == &lock->surface->pending) {
return;
}
if (state->cached_state_link.prev != &surface->cached) {
// This isn't the first cached state. This means we're blocked on a
// previous cached state.
return;
}
// TODO: consider merging all committed states together
struct wlr_surface_state *next, *tmp;
wl_list_for_each_safe(next, tmp, &surface->cached, cached_state_link) {
if (next->cached_state_locks > 0) {
// TODO: consider merging all committed states together
struct wlr_surface_state *tmp;
wl_list_for_each_safe(state, tmp, &surface->cached, cached_state_link) {
if (state->cached_state_locks > 0) {
break;
}
surface_commit_state(surface, next);
surface_state_destroy_cached(next, surface);
surface_commit_state(surface, state);
surface_state_destroy_cached(state, surface);
}
}
bool wlr_surface_state_lock_locked(struct wlr_surface_state_lock *lock) {
return lock->surface != NULL;
}
struct wlr_surface *wlr_surface_get_root_surface(struct wlr_surface *surface) {
struct wlr_subsurface *subsurface;
while ((subsurface = wlr_subsurface_try_from_wlr_surface(surface))) {

View file

@ -25,9 +25,7 @@ static bool subsurface_is_synchronized(struct wlr_subsurface *subsurface) {
static const struct wl_subsurface_interface subsurface_implementation;
static void subsurface_destroy(struct wlr_subsurface *subsurface) {
if (subsurface->has_cache) {
wlr_surface_unlock_cached(subsurface->surface, subsurface->cached_seq);
}
wlr_surface_state_lock_release(&subsurface->cached_lock);
wlr_surface_unmap(subsurface->surface);
@ -170,11 +168,8 @@ static void subsurface_handle_set_desync(struct wl_client *client,
if (subsurface->synchronized) {
subsurface->synchronized = false;
if (!subsurface_is_synchronized(subsurface) &&
subsurface->has_cache) {
wlr_surface_unlock_cached(subsurface->surface,
subsurface->cached_seq);
subsurface->has_cache = false;
if (!subsurface_is_synchronized(subsurface)) {
wlr_surface_state_lock_release(&subsurface->cached_lock);
}
}
}
@ -264,23 +259,20 @@ static void subsurface_handle_surface_client_commit(
struct wlr_surface *surface = subsurface->surface;
if (subsurface_is_synchronized(subsurface)) {
if (subsurface->has_cache) {
if (wlr_surface_state_lock_locked(&subsurface->cached_lock)) {
// We already lock a previous commit. The prevents any future
// commit to be applied before we release the previous commit.
return;
}
subsurface->has_cache = true;
subsurface->cached_seq = wlr_surface_lock_pending(surface);
} else if (subsurface->has_cache) {
wlr_surface_unlock_cached(surface, subsurface->cached_seq);
subsurface->has_cache = false;
wlr_surface_state_lock_acquire(&subsurface->cached_lock, surface);
} else {
wlr_surface_state_lock_release(&subsurface->cached_lock);
}
}
void subsurface_handle_parent_commit(struct wlr_subsurface *subsurface) {
if (subsurface->synchronized && subsurface->has_cache) {
wlr_surface_unlock_cached(subsurface->surface, subsurface->cached_seq);
subsurface->has_cache = false;
if (subsurface->synchronized) {
wlr_surface_state_lock_release(&subsurface->cached_lock);
}
if (!subsurface->added) {