compositor: replace wlr_surface.cached with states

This commit replaces the list of cached states with a list of all
states, including the current and pending ones, and renames
cached_state_locks to n_locks.

This is a prerequisite for the next commits.
This commit is contained in:
Kirill Primak 2022-02-04 09:23:44 +03:00
parent c1a2c09ade
commit 81f35f6d98
2 changed files with 44 additions and 25 deletions

View file

@ -66,8 +66,8 @@ struct wlr_surface_state {
} viewport; } viewport;
// Number of locks that prevent this surface state from being committed. // Number of locks that prevent this surface state from being committed.
size_t cached_state_locks; size_t n_locks;
struct wl_list cached_state_link; // wlr_surface.cached struct wl_list link; // wlr_surface.states
}; };
struct wlr_surface_role { struct wlr_surface_role {
@ -135,8 +135,6 @@ struct wlr_surface {
*/ */
struct wlr_surface_state current, pending; struct wlr_surface_state current, pending;
struct wl_list cached; // wlr_surface_state.cached_link
const struct wlr_surface_role *role; // the lifetime-bound role or NULL const struct wlr_surface_role *role; // the lifetime-bound role or NULL
void *role_data; // role-specific data void *role_data; // role-specific data
@ -154,6 +152,12 @@ struct wlr_surface {
// private state // private state
/**
* The queue of all states the surface has. The current state is always
* the first and the pending state is always the last.
*/
struct wl_list states; // wlr_surface_state.link
struct wl_listener renderer_destroy; struct wl_listener renderer_destroy;
struct { struct {

View file

@ -316,8 +316,8 @@ static void surface_state_move(struct wlr_surface_state *state,
state->seq = next->seq; state->seq = next->seq;
state->cached_state_locks = next->cached_state_locks; state->n_locks = next->n_locks;
next->cached_state_locks = 0; next->n_locks = 0;
} }
static void surface_apply_damage(struct wlr_surface *surface) { static void surface_apply_damage(struct wlr_surface *surface) {
@ -394,14 +394,15 @@ static void surface_cache_pending(struct wlr_surface *surface) {
surface_state_init(cached); surface_state_init(cached);
surface_state_move(cached, &surface->pending); surface_state_move(cached, &surface->pending);
wl_list_insert(surface->cached.prev, &cached->cached_state_link); // Insert before the pending state
wl_list_insert(surface->pending.link.prev, &cached->link);
surface->pending.seq++; surface->pending.seq++;
} }
static void surface_commit_state(struct wlr_surface *surface, static void surface_commit_state(struct wlr_surface *surface,
struct wlr_surface_state *next) { struct wlr_surface_state *next) {
assert(next->cached_state_locks == 0); assert(next->n_locks == 0);
if (surface->role && surface->role->precommit) { if (surface->role && surface->role->precommit) {
surface->role->precommit(surface, next); surface->role->precommit(surface, next);
@ -517,7 +518,8 @@ static void surface_handle_commit(struct wl_client *client,
wlr_signal_emit_safe(&surface->events.client_commit, NULL); wlr_signal_emit_safe(&surface->events.client_commit, NULL);
if (surface->pending.cached_state_locks > 0 || !wl_list_empty(&surface->cached)) { if (surface->pending.n_locks > 0 ||
surface->pending.link.prev != &surface->current.link) {
surface_cache_pending(surface); surface_cache_pending(surface);
} else { } else {
surface_commit_state(surface, &surface->pending); surface_commit_state(surface, &surface->pending);
@ -614,7 +616,7 @@ static void surface_state_finish(struct wlr_surface_state *state) {
static void surface_state_destroy_cached(struct wlr_surface_state *state) { static void surface_state_destroy_cached(struct wlr_surface_state *state) {
surface_state_finish(state); surface_state_finish(state);
wl_list_remove(&state->cached_state_link); wl_list_remove(&state->link);
free(state); free(state);
} }
@ -633,8 +635,11 @@ static void surface_handle_resource_destroy(struct wl_resource *resource) {
wlr_addon_set_finish(&surface->addons); wlr_addon_set_finish(&surface->addons);
wl_list_remove(&surface->current.link);
wl_list_remove(&surface->pending.link);
struct wlr_surface_state *cached, *cached_tmp; struct wlr_surface_state *cached, *cached_tmp;
wl_list_for_each_safe(cached, cached_tmp, &surface->cached, cached_state_link) { wl_list_for_each_safe(cached, cached_tmp, &surface->states, link) {
surface_state_destroy_cached(cached); surface_state_destroy_cached(cached);
} }
@ -683,12 +688,15 @@ static struct wlr_surface *surface_create(struct wl_client *client,
surface_state_init(&surface->pending); surface_state_init(&surface->pending);
surface->pending.seq = 1; surface->pending.seq = 1;
wl_list_init(&surface->states);
wl_list_insert(&surface->states, &surface->current.link);
wl_list_insert(surface->states.prev, &surface->pending.link);
wl_signal_init(&surface->events.client_commit); wl_signal_init(&surface->events.client_commit);
wl_signal_init(&surface->events.commit); wl_signal_init(&surface->events.commit);
wl_signal_init(&surface->events.destroy); wl_signal_init(&surface->events.destroy);
wl_signal_init(&surface->events.new_subsurface); wl_signal_init(&surface->events.new_subsurface);
wl_list_init(&surface->current_outputs); wl_list_init(&surface->current_outputs);
wl_list_init(&surface->cached);
pixman_region32_init(&surface->buffer_damage); pixman_region32_init(&surface->buffer_damage);
pixman_region32_init(&surface->external_damage); pixman_region32_init(&surface->external_damage);
pixman_region32_init(&surface->opaque_region); pixman_region32_init(&surface->opaque_region);
@ -740,20 +748,24 @@ bool wlr_surface_set_role(struct wlr_surface *surface,
} }
uint32_t wlr_surface_lock_pending(struct wlr_surface *surface) { uint32_t wlr_surface_lock_pending(struct wlr_surface *surface) {
surface->pending.cached_state_locks++; surface->pending.n_locks++;
return surface->pending.seq; return surface->pending.seq;
} }
void wlr_surface_unlock_cached(struct wlr_surface *surface, uint32_t seq) { void wlr_surface_unlock_cached(struct wlr_surface *surface, uint32_t seq) {
if (surface->pending.seq == seq) { if (surface->pending.seq == seq) {
assert(surface->pending.cached_state_locks > 0); assert(surface->pending.n_locks > 0);
surface->pending.cached_state_locks--; surface->pending.n_locks--;
return; return;
} }
bool found = false; bool found = false;
struct wlr_surface_state *cached; struct wlr_surface_state *cached;
wl_list_for_each(cached, &surface->cached, cached_state_link) { wl_list_for_each(cached, &surface->states, link) {
if (cached == &surface->current || cached == &surface->pending) {
continue;
}
if (cached->seq == seq) { if (cached->seq == seq) {
found = true; found = true;
break; break;
@ -761,28 +773,31 @@ void wlr_surface_unlock_cached(struct wlr_surface *surface, uint32_t seq) {
} }
assert(found); assert(found);
assert(cached->cached_state_locks > 0); assert(cached->n_locks > 0);
cached->cached_state_locks--; cached->n_locks--;
if (cached->cached_state_locks != 0) { if (cached->n_locks != 0) {
return; return;
} }
if (cached->cached_state_link.prev != &surface->cached) { if (cached->link.prev != &surface->current.link) {
// This isn't the first cached state. This means we're blocked on a // This isn't the first cached state. This means we're blocked on a
// previous cached state. // previous cached state.
return; return;
} }
// TODO: consider merging all committed states together // TODO: consider merging all committed states together
struct wlr_surface_state *next, *tmp; struct wlr_surface_state *tmp;
wl_list_for_each_safe(next, tmp, &surface->cached, cached_state_link) { wl_list_for_each_safe(cached, tmp, &surface->states, link) {
if (next->cached_state_locks > 0) { if (cached == &surface->current) {
continue;
}
if (cached == &surface->pending || cached->n_locks > 0) {
break; break;
} }
surface_commit_state(surface, next); surface_commit_state(surface, cached);
surface_state_destroy_cached(next); surface_state_destroy_cached(cached);
} }
} }