Merge branch 'cached-lock' into 'master'

compositor: wrap cached lock seq in struct

See merge request wlroots/wlroots!4501
This commit is contained in:
Simon Ser 2024-02-01 06:41:19 +00:00
commit b37404a42e
4 changed files with 22 additions and 16 deletions

View file

@ -33,9 +33,8 @@ enum wlr_surface_state_field {
struct wlr_surface_state {
uint32_t committed; // enum wlr_surface_state_field
// Sequence number of the surface state. Incremented on each commit, may
// overflow.
uint32_t seq;
// Sequence number of the surface state. Incremented on each commit.
uint64_t seq;
struct wlr_buffer *buffer;
int32_t dx, dy; // relative to previous position
@ -423,6 +422,13 @@ void wlr_surface_get_effective_damage(struct wlr_surface *surface,
void wlr_surface_get_buffer_source_box(struct wlr_surface *surface,
struct wlr_fbox *box);
/**
* A lock preventing cached state from being applied.
*/
struct wlr_surface_cached_lock {
uint64_t seq;
};
/**
* Acquire a lock for the pending surface state.
*
@ -432,7 +438,7 @@ void wlr_surface_get_buffer_source_box(struct wlr_surface *surface,
*
* Returns a surface commit sequence number for the cached state.
*/
uint32_t wlr_surface_lock_pending(struct wlr_surface *surface);
struct wlr_surface_cached_lock wlr_surface_lock_pending(struct wlr_surface *surface);
/**
* Release a lock for a cached state.
@ -440,7 +446,7 @@ uint32_t wlr_surface_lock_pending(struct wlr_surface *surface);
* Callers should not assume that the cached state will immediately be
* committed. Another caller may still have an active lock.
*/
void wlr_surface_unlock_cached(struct wlr_surface *surface, uint32_t seq);
void wlr_surface_unlock_cached(struct wlr_surface *surface, struct wlr_surface_cached_lock lock);
/**
* Set the preferred buffer scale for the surface.

View file

@ -35,7 +35,7 @@ struct wlr_subsurface {
struct wlr_subsurface_parent_state current, pending;
uint32_t cached_seq;
struct wlr_surface_cached_lock cached_lock;
bool has_cache;
bool synchronized;

View file

@ -957,13 +957,13 @@ static void surface_destroy_role_object(struct wlr_surface *surface) {
wl_list_init(&surface->role_resource_destroy.link);
}
uint32_t wlr_surface_lock_pending(struct wlr_surface *surface) {
struct wlr_surface_cached_lock wlr_surface_lock_pending(struct wlr_surface *surface) {
surface->pending.cached_state_locks++;
return surface->pending.seq;
return (struct wlr_surface_cached_lock){surface->pending.seq};
}
void wlr_surface_unlock_cached(struct wlr_surface *surface, uint32_t seq) {
if (surface->pending.seq == seq) {
void wlr_surface_unlock_cached(struct wlr_surface *surface, struct wlr_surface_cached_lock lock) {
if (surface->pending.seq == lock.seq) {
assert(surface->pending.cached_state_locks > 0);
surface->pending.cached_state_locks--;
return;
@ -972,7 +972,7 @@ void wlr_surface_unlock_cached(struct wlr_surface *surface, uint32_t seq) {
bool found = false;
struct wlr_surface_state *cached;
wl_list_for_each(cached, &surface->cached, cached_state_link) {
if (cached->seq == seq) {
if (cached->seq == lock.seq) {
found = true;
break;
}

View file

@ -26,7 +26,7 @@ static const struct wl_subsurface_interface subsurface_implementation;
static void subsurface_destroy(struct wlr_subsurface *subsurface) {
if (subsurface->has_cache) {
wlr_surface_unlock_cached(subsurface->surface, subsurface->cached_seq);
wlr_surface_unlock_cached(subsurface->surface, subsurface->cached_lock);
}
wlr_surface_unmap(subsurface->surface);
@ -177,7 +177,7 @@ static void subsurface_handle_set_desync(struct wl_client *client,
if (!subsurface_is_synchronized(subsurface) &&
subsurface->has_cache) {
wlr_surface_unlock_cached(subsurface->surface,
subsurface->cached_seq);
subsurface->cached_lock);
subsurface->has_cache = false;
}
}
@ -274,9 +274,9 @@ static void subsurface_handle_surface_client_commit(
return;
}
subsurface->has_cache = true;
subsurface->cached_seq = wlr_surface_lock_pending(surface);
subsurface->cached_lock = wlr_surface_lock_pending(surface);
} else if (subsurface->has_cache) {
wlr_surface_unlock_cached(surface, subsurface->cached_seq);
wlr_surface_unlock_cached(surface, subsurface->cached_lock);
subsurface->has_cache = false;
}
}
@ -302,7 +302,7 @@ void subsurface_handle_parent_commit(struct wlr_subsurface *subsurface) {
}
if (subsurface->synchronized && subsurface->has_cache) {
wlr_surface_unlock_cached(surface, subsurface->cached_seq);
wlr_surface_unlock_cached(surface, subsurface->cached_lock);
subsurface->has_cache = false;
}