mirror of
https://gitlab.freedesktop.org/wlroots/wlroots.git
synced 2026-04-17 06:46:39 -04:00
compositor: add wlr_surface_state_lock
This commit is contained in:
parent
dd8f4913a4
commit
a3af71b996
4 changed files with 77 additions and 51 deletions
|
|
@ -74,6 +74,13 @@ struct wlr_surface_state {
|
|||
struct wl_array synced; // void *
|
||||
};
|
||||
|
||||
struct wlr_surface_state_lock {
|
||||
// private state
|
||||
struct wlr_surface *surface;
|
||||
uint32_t seq;
|
||||
struct wl_listener surface_destroy;
|
||||
};
|
||||
|
||||
struct wlr_surface_role {
|
||||
const char *name;
|
||||
/**
|
||||
|
|
@ -443,23 +450,30 @@ void wlr_surface_get_buffer_source_box(struct wlr_surface *surface,
|
|||
struct wlr_fbox *box);
|
||||
|
||||
/**
|
||||
* Acquire a lock for the pending surface state.
|
||||
* Acquire a lock for the pending surface state. The lock must be unlocked.
|
||||
*
|
||||
* The state won't be committed before the caller releases the lock. Instead,
|
||||
* the state becomes cached. The caller needs to use wlr_surface_unlock_cached()
|
||||
* to release the lock.
|
||||
*
|
||||
* Returns a surface commit sequence number for the cached state.
|
||||
* the state becomes cached. The caller needs to use
|
||||
* wlr_surface_state_lock_release() to release the lock.
|
||||
*/
|
||||
uint32_t wlr_surface_lock_pending(struct wlr_surface *surface);
|
||||
void wlr_surface_state_lock_acquire(struct wlr_surface_state_lock *lock,
|
||||
struct wlr_surface *surface);
|
||||
|
||||
/**
|
||||
* Release a lock for a cached state.
|
||||
* Release a lock for a surface state. If the lock is already unlocked,
|
||||
* this is no-op.
|
||||
*
|
||||
* Callers should not assume that the cached state will immediately be
|
||||
* committed. Another caller may still have an active lock.
|
||||
*/
|
||||
void wlr_surface_unlock_cached(struct wlr_surface *surface, uint32_t seq);
|
||||
void wlr_surface_state_lock_release(struct wlr_surface_state_lock *lock);
|
||||
|
||||
/**
|
||||
* Get the status of a lock.
|
||||
*
|
||||
* Returns true if the lock is locked, false otherwise.
|
||||
*/
|
||||
bool wlr_surface_state_lock_locked(struct wlr_surface_state_lock *lock);
|
||||
|
||||
/**
|
||||
* Set the preferred buffer scale for the surface.
|
||||
|
|
|
|||
|
|
@ -35,9 +35,6 @@ struct wlr_subsurface {
|
|||
|
||||
struct wlr_subsurface_parent_state current, pending;
|
||||
|
||||
uint32_t cached_seq;
|
||||
bool has_cache;
|
||||
|
||||
bool synchronized;
|
||||
bool added;
|
||||
|
||||
|
|
@ -52,6 +49,8 @@ struct wlr_subsurface {
|
|||
|
||||
// private state
|
||||
|
||||
struct wlr_surface_state_lock cached_lock;
|
||||
|
||||
struct wlr_surface_synced parent_synced;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -928,53 +928,74 @@ static void surface_destroy_role_object(struct wlr_surface *surface) {
|
|||
wl_list_init(&surface->role_resource_destroy.link);
|
||||
}
|
||||
|
||||
uint32_t wlr_surface_lock_pending(struct wlr_surface *surface) {
|
||||
surface->pending.cached_state_locks++;
|
||||
return surface->pending.seq;
|
||||
}
|
||||
|
||||
void wlr_surface_unlock_cached(struct wlr_surface *surface, uint32_t seq) {
|
||||
static struct wlr_surface_state *state_by_seq(struct wlr_surface *surface, uint32_t seq) {
|
||||
if (surface->pending.seq == seq) {
|
||||
assert(surface->pending.cached_state_locks > 0);
|
||||
surface->pending.cached_state_locks--;
|
||||
return;
|
||||
return &surface->pending;
|
||||
}
|
||||
|
||||
bool found = false;
|
||||
struct wlr_surface_state *cached;
|
||||
wl_list_for_each(cached, &surface->cached, cached_state_link) {
|
||||
if (cached->seq == seq) {
|
||||
found = true;
|
||||
break;
|
||||
return cached;
|
||||
}
|
||||
}
|
||||
assert(found);
|
||||
|
||||
assert(cached->cached_state_locks > 0);
|
||||
cached->cached_state_locks--;
|
||||
abort(); // Invalid seq
|
||||
}
|
||||
|
||||
if (cached->cached_state_locks != 0) {
|
||||
static void state_lock_handle_surface_destroy(struct wl_listener *listener, void *data) {
|
||||
struct wlr_surface_state_lock *lock = wl_container_of(listener, lock, surface_destroy);
|
||||
wlr_surface_state_lock_release(lock);
|
||||
}
|
||||
|
||||
void wlr_surface_state_lock_acquire(struct wlr_surface_state_lock *lock,
|
||||
struct wlr_surface *surface) {
|
||||
assert(lock->surface == NULL && "Tried to acquire a locked lock");
|
||||
lock->surface = surface;
|
||||
lock->seq = surface->pending.seq;
|
||||
++surface->pending.cached_state_locks;
|
||||
lock->surface_destroy.notify = state_lock_handle_surface_destroy;
|
||||
wl_signal_add(&surface->events.destroy, &lock->surface_destroy);
|
||||
}
|
||||
|
||||
void wlr_surface_state_lock_release(struct wlr_surface_state_lock *lock) {
|
||||
struct wlr_surface *surface = lock->surface;
|
||||
if (surface == NULL) {
|
||||
// Already unlocked
|
||||
return;
|
||||
}
|
||||
|
||||
if (cached->cached_state_link.prev != &surface->cached) {
|
||||
lock->surface = NULL;
|
||||
wl_list_remove(&lock->surface_destroy.link);
|
||||
|
||||
struct wlr_surface_state *state = state_by_seq(surface, lock->seq);
|
||||
--state->cached_state_locks;
|
||||
if (state == &lock->surface->pending) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (state->cached_state_link.prev != &surface->cached) {
|
||||
// This isn't the first cached state. This means we're blocked on a
|
||||
// previous cached state.
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: consider merging all committed states together
|
||||
struct wlr_surface_state *next, *tmp;
|
||||
wl_list_for_each_safe(next, tmp, &surface->cached, cached_state_link) {
|
||||
if (next->cached_state_locks > 0) {
|
||||
// TODO: consider merging all committed states together
|
||||
struct wlr_surface_state *tmp;
|
||||
wl_list_for_each_safe(state, tmp, &surface->cached, cached_state_link) {
|
||||
if (state->cached_state_locks > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
surface_commit_state(surface, next);
|
||||
surface_state_destroy_cached(next, surface);
|
||||
surface_commit_state(surface, state);
|
||||
surface_state_destroy_cached(state, surface);
|
||||
}
|
||||
}
|
||||
|
||||
bool wlr_surface_state_lock_locked(struct wlr_surface_state_lock *lock) {
|
||||
return lock->surface != NULL;
|
||||
}
|
||||
|
||||
struct wlr_surface *wlr_surface_get_root_surface(struct wlr_surface *surface) {
|
||||
struct wlr_subsurface *subsurface;
|
||||
while ((subsurface = wlr_subsurface_try_from_wlr_surface(surface))) {
|
||||
|
|
|
|||
|
|
@ -25,9 +25,7 @@ static bool subsurface_is_synchronized(struct wlr_subsurface *subsurface) {
|
|||
static const struct wl_subsurface_interface subsurface_implementation;
|
||||
|
||||
static void subsurface_destroy(struct wlr_subsurface *subsurface) {
|
||||
if (subsurface->has_cache) {
|
||||
wlr_surface_unlock_cached(subsurface->surface, subsurface->cached_seq);
|
||||
}
|
||||
wlr_surface_state_lock_release(&subsurface->cached_lock);
|
||||
|
||||
wlr_surface_unmap(subsurface->surface);
|
||||
|
||||
|
|
@ -170,11 +168,8 @@ static void subsurface_handle_set_desync(struct wl_client *client,
|
|||
if (subsurface->synchronized) {
|
||||
subsurface->synchronized = false;
|
||||
|
||||
if (!subsurface_is_synchronized(subsurface) &&
|
||||
subsurface->has_cache) {
|
||||
wlr_surface_unlock_cached(subsurface->surface,
|
||||
subsurface->cached_seq);
|
||||
subsurface->has_cache = false;
|
||||
if (!subsurface_is_synchronized(subsurface)) {
|
||||
wlr_surface_state_lock_release(&subsurface->cached_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -264,23 +259,20 @@ static void subsurface_handle_surface_client_commit(
|
|||
struct wlr_surface *surface = subsurface->surface;
|
||||
|
||||
if (subsurface_is_synchronized(subsurface)) {
|
||||
if (subsurface->has_cache) {
|
||||
if (wlr_surface_state_lock_locked(&subsurface->cached_lock)) {
|
||||
// We already lock a previous commit. The prevents any future
|
||||
// commit to be applied before we release the previous commit.
|
||||
return;
|
||||
}
|
||||
subsurface->has_cache = true;
|
||||
subsurface->cached_seq = wlr_surface_lock_pending(surface);
|
||||
} else if (subsurface->has_cache) {
|
||||
wlr_surface_unlock_cached(surface, subsurface->cached_seq);
|
||||
subsurface->has_cache = false;
|
||||
wlr_surface_state_lock_acquire(&subsurface->cached_lock, surface);
|
||||
} else {
|
||||
wlr_surface_state_lock_release(&subsurface->cached_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void subsurface_handle_parent_commit(struct wlr_subsurface *subsurface) {
|
||||
if (subsurface->synchronized && subsurface->has_cache) {
|
||||
wlr_surface_unlock_cached(subsurface->surface, subsurface->cached_seq);
|
||||
subsurface->has_cache = false;
|
||||
if (subsurface->synchronized) {
|
||||
wlr_surface_state_lock_release(&subsurface->cached_lock);
|
||||
}
|
||||
|
||||
if (!subsurface->added) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue