From 48874c9e86a3b82ddf06f68a9dfa21ce28466719 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 14 Sep 2024 17:42:40 -0400 Subject: [PATCH 01/29] wlr_raster: Introduce new abstraction --- include/wlr/types/wlr_raster.h | 86 +++++++++++++++++++++++ types/meson.build | 1 + types/wlr_raster.c | 122 +++++++++++++++++++++++++++++++++ 3 files changed, 209 insertions(+) create mode 100644 include/wlr/types/wlr_raster.h create mode 100644 types/wlr_raster.c diff --git a/include/wlr/types/wlr_raster.h b/include/wlr/types/wlr_raster.h new file mode 100644 index 000000000..c63cb8ee0 --- /dev/null +++ b/include/wlr/types/wlr_raster.h @@ -0,0 +1,86 @@ +/* + * This an unstable interface of wlroots. No guarantees are made regarding the + * future consistency of this API. + */ +#ifndef WLR_USE_UNSTABLE +#error "Add -DWLR_USE_UNSTABLE to enable unstable wlroots features" +#endif + +#ifndef WLR_TYPES_WLR_RASTER_H +#define WLR_TYPES_WLR_RASTER_H + +#include +#include +#include + +struct wlr_buffer; +struct wlr_texture; +struct wlr_renderer; +struct wlr_drm_syncobj_timeline; + +struct wlr_raster { + // May be NULL + struct wlr_buffer *buffer; + + uint32_t width, height; + bool opaque; + + struct wlr_drm_syncobj_timeline *wait_timeline; + uint64_t wait_point; + + struct { + struct wl_signal destroy; + } events; + + // private state + + size_t n_locks; + + struct wl_listener buffer_release; + + struct wlr_texture *texture; + struct wl_listener renderer_destroy; +}; + +struct wlr_raster_create_options { + struct wlr_drm_syncobj_timeline *wait_timeline; + uint64_t wait_point; +}; + +/** + * Creates a new wlr_raster being backed by the given buffer. The raster will + * not lock the given buffer meaning that once it's released, the raster will + * NULL its buffer reference and potentially become invalid. + * The creation function is referenced: once the creator is done with the raster, + * wlr_raster_unlock must be called as the reference count will start at 1 + * from creation. + * + * Options can be NULL. + */ +struct wlr_raster *wlr_raster_create(struct wlr_buffer *buffer, + const struct wlr_raster_create_options *options); + +/** + * Lock the raster for use. As long as the raster has at least one lock, it + * will not be destroyed. + */ +struct wlr_raster *wlr_raster_lock(struct wlr_raster *raster); + +/** + * Unlock the raster. This must be called after wlr_raster_lock once the raster + * has been finished being used or after creation from wlr_raster_create. + */ +void wlr_raster_unlock(struct wlr_raster *raster); + +/** + * Returns the texture allocated for this renderer. If there is none, + * a new texture will be created and attached to this wlr_raster. Users do not + * own the texture returned by this function and can only be used for read-only + * purposes. + * + * Will return NULL if the creation was unsuccessful. + */ +struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster, + struct wlr_renderer *renderer); + +#endif diff --git a/types/meson.build b/types/meson.build index ec70d4b7c..032143db6 100644 --- a/types/meson.build +++ b/types/meson.build @@ -68,6 +68,7 @@ wlr_files += files( 'wlr_presentation_time.c', 'wlr_primary_selection_v1.c', 'wlr_primary_selection.c', + 'wlr_raster.c', 'wlr_region.c', 'wlr_relative_pointer_v1.c', 'wlr_screencopy_v1.c', diff --git a/types/wlr_raster.c b/types/wlr_raster.c new file mode 100644 index 000000000..c1ce68438 --- /dev/null +++ b/types/wlr_raster.c @@ -0,0 +1,122 @@ +#include +#include +#include +#include +#include +#include +#include +#include "types/wlr_buffer.h" + +static void raster_handle_buffer_release(struct wl_listener *listener, void *data) { + struct wlr_raster *raster = wl_container_of(listener, raster, buffer_release); + raster->buffer = NULL; + wl_list_remove(&raster->buffer_release.link); + wl_list_init(&raster->buffer_release.link); +} + +struct wlr_raster *wlr_raster_create(struct wlr_buffer *buffer, + const struct wlr_raster_create_options *options) { + struct wlr_raster *raster = calloc(1, sizeof(*raster)); + if (!raster) { + return NULL; + } + + wl_signal_init(&raster->events.destroy); + + assert(buffer); + raster->opaque = buffer_is_opaque(buffer); + raster->width = buffer->width; + raster->height = buffer->height; + raster->buffer = buffer; + + raster->n_locks = 1; + + raster->buffer_release.notify = raster_handle_buffer_release; + wl_signal_add(&raster->buffer->events.release, &raster->buffer_release); + + if (options && options->wait_timeline) { + raster->wait_timeline = wlr_drm_syncobj_timeline_ref(options->wait_timeline); + raster->wait_point = options->wait_point; + } + + return raster; +} + +static void raster_consider_destroy(struct wlr_raster *raster) { + if (raster->n_locks > 0) { + return; + } + + wl_signal_emit_mutable(&raster->events.destroy, NULL); + + if (raster->texture) { + wl_list_remove(&raster->renderer_destroy.link); + wlr_texture_destroy(raster->texture); + } + + wl_list_remove(&raster->buffer_release.link); + wlr_drm_syncobj_timeline_unref(raster->wait_timeline); + free(raster); +} + +struct wlr_raster *wlr_raster_lock(struct wlr_raster *raster) { + raster->n_locks++; + return raster; +} + +void wlr_raster_unlock(struct wlr_raster *raster) { + if (!raster) { + return; + } + + assert(raster->n_locks > 0); + + raster->n_locks--; + raster_consider_destroy(raster); +} + +static void raster_detach(struct wlr_raster *raster, struct wlr_texture *texture) { + assert(texture); + assert(raster->texture == texture); + + wl_list_remove(&raster->renderer_destroy.link); + raster->texture = NULL; +} + +static void handle_renderer_destroy(struct wl_listener *listener, void *data) { + struct wlr_raster *raster = wl_container_of(listener, raster, renderer_destroy); + raster_detach(raster, raster->texture); +} + +static void raster_attach(struct wlr_raster *raster, struct wlr_texture *texture) { + assert(texture->width == raster->width && texture->height == raster->height); + assert(!raster->texture); + + raster->renderer_destroy.notify = handle_renderer_destroy; + wl_signal_add(&texture->renderer->events.destroy, &raster->renderer_destroy); + + raster->texture = texture; +} + +struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster, + struct wlr_renderer *renderer) { + if (raster->texture) { + assert(raster->texture->renderer == renderer); + return raster->texture; + } + + assert(raster->buffer); + + struct wlr_client_buffer *client_buffer = + wlr_client_buffer_get(raster->buffer); + if (client_buffer != NULL) { + return client_buffer->texture; + } + + struct wlr_texture *texture = wlr_texture_from_buffer(renderer, raster->buffer); + if (texture) { + raster_attach(raster, texture); + } + + return texture; +} From d9f6ec080fd21adab4318899ed1c89fb7cc3a709 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Fri, 21 Apr 2023 20:56:23 +0200 Subject: [PATCH 02/29] wlr_buffer: Introduce prerelease The prerelease signal lets users do things things at the last moment that would be inappropriate to do on the release signal. Inside the prerelease signal, it is allowed to lock the buffer and also upload/import the contents of the buffer to a texture. --- include/wlr/types/wlr_buffer.h | 1 + types/buffer/buffer.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/include/wlr/types/wlr_buffer.h b/include/wlr/types/wlr_buffer.h index 9a655dd2b..91543b5ea 100644 --- a/include/wlr/types/wlr_buffer.h +++ b/include/wlr/types/wlr_buffer.h @@ -56,6 +56,7 @@ struct wlr_buffer { struct { struct wl_signal destroy; struct wl_signal release; + struct wl_signal prerelease; } events; struct wlr_addon_set addons; diff --git a/types/buffer/buffer.c b/types/buffer/buffer.c index 953207a2c..ef744f1b5 100644 --- a/types/buffer/buffer.c +++ b/types/buffer/buffer.c @@ -19,6 +19,7 @@ void wlr_buffer_init(struct wlr_buffer *buffer, }; wl_signal_init(&buffer->events.destroy); wl_signal_init(&buffer->events.release); + wl_signal_init(&buffer->events.prerelease); wlr_addon_set_init(&buffer->addons); } @@ -58,6 +59,10 @@ void wlr_buffer_unlock(struct wlr_buffer *buffer) { assert(buffer->n_locks > 0); buffer->n_locks--; + if (buffer->n_locks == 0) { + wl_signal_emit_mutable(&buffer->events.prerelease, NULL); + } + if (buffer->n_locks == 0) { wl_signal_emit_mutable(&buffer->events.release, NULL); } From 46b0ba0da69eb8e47f453973bc67f2fd69fb16cd Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 14 Sep 2024 17:16:28 -0400 Subject: [PATCH 03/29] wlr_raster: Add surface helper --- include/wlr/types/wlr_raster.h | 9 +++ types/wlr_raster.c | 141 +++++++++++++++++++++++++++++++++ 2 files changed, 150 insertions(+) diff --git a/include/wlr/types/wlr_raster.h b/include/wlr/types/wlr_raster.h index c63cb8ee0..6e654bb21 100644 --- a/include/wlr/types/wlr_raster.h +++ b/include/wlr/types/wlr_raster.h @@ -17,6 +17,7 @@ struct wlr_buffer; struct wlr_texture; struct wlr_renderer; struct wlr_drm_syncobj_timeline; +struct wlr_surface; struct wlr_raster { // May be NULL @@ -83,4 +84,12 @@ void wlr_raster_unlock(struct wlr_raster *raster); struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster, struct wlr_renderer *renderer); +/** + * Creates a wlr_raster from a surface. This will automatically deduplicate + * rasters if multiple are consumed from the same surface so that redundant + * uploads are not performed. The raster returned will automatically be locked. + * Users are required to call wlr_raster_unlock() after invoking this function. + */ +struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface); + #endif diff --git a/types/wlr_raster.c b/types/wlr_raster.c index c1ce68438..bb1efa6d5 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -1,10 +1,13 @@ #include +#include #include +#include #include #include #include #include #include +#include #include "types/wlr_buffer.h" static void raster_handle_buffer_release(struct wl_listener *listener, void *data) { @@ -120,3 +123,141 @@ struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster, return texture; } + +struct surface_raster { + struct wlr_raster *raster; + struct wlr_surface *surface; + + struct wlr_addon addon; + + struct wl_listener buffer_prerelease; + + bool locking_buffer; +}; + +static void surface_raster_drop_raster(struct surface_raster *surface_raster) { + if (surface_raster->locking_buffer) { + wlr_buffer_unlock(surface_raster->raster->buffer); + surface_raster->locking_buffer = false; + } + + wlr_raster_unlock(surface_raster->raster); + surface_raster->raster = NULL; +} + +static void surface_raster_destroy(struct surface_raster *surface_raster) { + surface_raster_drop_raster(surface_raster); + + wl_list_remove(&surface_raster->buffer_prerelease.link); + wlr_addon_finish(&surface_raster->addon); + free(surface_raster); +} + +static void surface_raster_handle_addon_destroy(struct wlr_addon *addon) { + struct surface_raster *surface_raster = wl_container_of(addon, surface_raster, addon); + surface_raster_destroy(surface_raster); +} + +static void surface_raster_handle_buffer_prerelease(struct wl_listener *listener, void *data) { + struct surface_raster *surface_raster = + wl_container_of(listener, surface_raster, buffer_prerelease); + struct wlr_raster *raster = surface_raster->raster; + + struct wlr_surface_output *output; + wl_list_for_each(output, &surface_raster->surface->current_outputs, link) { + wlr_raster_obtain_texture(raster, output->output->renderer); + } + + // if there was a failed texture upload, keep on locking the buffer + if (!raster->texture) { + wlr_buffer_lock(raster->buffer); + surface_raster->locking_buffer = true; + } + + wl_list_remove(&surface_raster->buffer_prerelease.link); + wl_list_init(&surface_raster->buffer_prerelease.link); +} + +const struct wlr_addon_interface surface_raster_addon_impl = { + .name = "wlr_raster_surface", + .destroy = surface_raster_handle_addon_destroy, +}; + +static struct surface_raster *get_surface_raster(struct wlr_surface *surface) { + struct wlr_addon *addon = wlr_addon_find(&surface->addons, NULL, + &surface_raster_addon_impl); + if (!addon) { + return NULL; + } + + struct surface_raster *surface_raster = wl_container_of(addon, surface_raster, addon); + return surface_raster; +} + +struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface) { + struct wlr_linux_drm_syncobj_surface_v1_state *syncobj_surface_state = + wlr_linux_drm_syncobj_v1_get_surface_state(surface); + + struct wlr_raster_create_options options = {0}; + if (syncobj_surface_state) { + options.wait_timeline = syncobj_surface_state->acquire_timeline; + options.wait_point = syncobj_surface_state->acquire_point; + } + + struct surface_raster *surface_raster = get_surface_raster(surface); + if (!surface_raster) { + surface_raster = calloc(1, sizeof(*surface_raster)); + if (!surface_raster) { + return NULL; + } + + surface_raster->surface = surface; + + wlr_addon_init(&surface_raster->addon, &surface->addons, NULL, + &surface_raster_addon_impl); + + surface_raster->buffer_prerelease.notify = surface_raster_handle_buffer_prerelease; + wl_list_init(&surface_raster->buffer_prerelease.link); + } + + if (!surface->current.buffer) { + // surface is mapped but it hasn't committed a new buffer. We need to keep + // using the old one + if (wlr_surface_has_buffer(surface)) { + if (surface_raster->raster) { + return wlr_raster_lock(surface_raster->raster); + } else { + return NULL; + } + } + + wl_list_remove(&surface_raster->buffer_prerelease.link); + wl_list_init(&surface_raster->buffer_prerelease.link); + + surface_raster_drop_raster(surface_raster); + + return NULL; + } + + struct wlr_raster *raster; + if (surface_raster->raster) { + // make sure we haven't already seen this buffer + if (surface_raster->raster->buffer == surface->current.buffer) { + return wlr_raster_lock(surface_raster->raster); + } + } + + raster = wlr_raster_create(surface->current.buffer, &options); + + if (!raster) { + return NULL; + } + + surface_raster_drop_raster(surface_raster); + surface_raster->raster = wlr_raster_lock(raster); + + wl_list_remove(&surface_raster->buffer_prerelease.link); + wl_signal_add(&surface->current.buffer->events.prerelease, &surface_raster->buffer_prerelease); + + return raster; +} From ceff483764cb2a862bcb8df1e9cd9b91f81d397f Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 14 Sep 2024 16:42:24 -0400 Subject: [PATCH 04/29] wlr_raster: Add partial texture uploads to surface helper --- types/wlr_raster.c | 92 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 90 insertions(+), 2 deletions(-) diff --git a/types/wlr_raster.c b/types/wlr_raster.c index bb1efa6d5..a4a95a8d2 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -124,6 +124,91 @@ struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster, return texture; } +struct raster_update_state { + struct wlr_buffer *buffer; + pixman_region32_t damage; + + struct wlr_raster *new_raster; + struct wlr_raster *old_raster; + + struct wl_listener old_raster_destroy; + struct wl_listener new_raster_destroy; + struct wl_listener buffer_release; +}; + +static void destroy_raster_update_state(struct raster_update_state *state) { + wl_list_remove(&state->old_raster_destroy.link); + wl_list_remove(&state->new_raster_destroy.link); + wl_list_remove(&state->buffer_release.link); + pixman_region32_fini(&state->damage); + free(state); +} + +static void raster_update_handle_new_raster_destroy(struct wl_listener *listener, void *data) { + struct raster_update_state *state = wl_container_of(listener, state, new_raster_destroy); + destroy_raster_update_state(state); +} + +static void raster_update_handle_old_raster_destroy(struct wl_listener *listener, void *data) { + struct raster_update_state *state = wl_container_of(listener, state, old_raster_destroy); + + // if the new raster already has a texture, there's nothing we can do to help. + if (state->new_raster->texture) { + assert(state->new_raster->texture->renderer == state->old_raster->texture->renderer); + destroy_raster_update_state(state); + return; + } + + struct wlr_texture *texture = state->old_raster->texture; + if (!texture) { + destroy_raster_update_state(state); + return; + } + + if (wlr_texture_update_from_buffer(texture, state->buffer, &state->damage)) { + raster_detach(state->old_raster, texture); + raster_attach(state->new_raster, texture); + } + + destroy_raster_update_state(state); +} + +static void raster_update_handle_buffer_release(struct wl_listener *listener, void *data) { + struct raster_update_state *state = wl_container_of(listener, state, buffer_release); + destroy_raster_update_state(state); +} + +static struct wlr_raster *raster_update(struct wlr_raster *raster, + struct wlr_buffer *buffer, const pixman_region32_t *damage, + const struct wlr_raster_create_options *options) { + struct raster_update_state *state = calloc(1, sizeof(*state)); + if (!state) { + return NULL; + } + + struct wlr_raster *new_raster = wlr_raster_create(buffer, options); + if (!new_raster) { + free(state); + return NULL; + } + + state->old_raster_destroy.notify = raster_update_handle_old_raster_destroy; + wl_signal_add(&raster->events.destroy, &state->old_raster_destroy); + state->new_raster_destroy.notify = raster_update_handle_new_raster_destroy; + wl_signal_add(&new_raster->events.destroy, &state->new_raster_destroy); + state->buffer_release.notify = raster_update_handle_buffer_release; + wl_signal_add(&buffer->events.release, &state->buffer_release); + + state->new_raster = new_raster; + state->old_raster = raster; + state->buffer = buffer; + + pixman_region32_init(&state->damage); + pixman_region32_copy(&state->damage, damage); + + return new_raster; +} + struct surface_raster { struct wlr_raster *raster; struct wlr_surface *surface; @@ -245,9 +330,12 @@ struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface) { if (surface_raster->raster->buffer == surface->current.buffer) { return wlr_raster_lock(surface_raster->raster); } - } - raster = wlr_raster_create(surface->current.buffer, &options); + raster = raster_update(surface_raster->raster, + surface->current.buffer, &surface->buffer_damage, &options); + } else { + raster = wlr_raster_create(surface->current.buffer, &options); + } if (!raster) { return NULL; From 7a108f988357e5cc847d63ee349132be184a5491 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Wed, 10 Apr 2024 11:54:56 -0400 Subject: [PATCH 05/29] wlr_raster: Add backwards compatibility with wlr_client_buffer --- types/wlr_raster.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/types/wlr_raster.c b/types/wlr_raster.c index a4a95a8d2..9248ca471 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -279,6 +279,21 @@ static struct surface_raster *get_surface_raster(struct wlr_surface *surface) { return surface_raster; } +// Because wlr_raster doesn't lock the buffer itself, we need something extra +// to keep client buffer locked when operating in legacy mode. +struct client_buffer_compat { + struct wlr_client_buffer *buffer; + struct wl_listener destroy; +}; + +static void client_buffer_compat_raster_destroy(struct wl_listener *listener, void *data) { + struct client_buffer_compat *compat = wl_container_of(listener, compat, destroy); + + wlr_buffer_unlock(&compat->buffer->base); + wl_list_remove(&compat->destroy.link); + free(compat); +} + struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface) { struct wlr_linux_drm_syncobj_surface_v1_state *syncobj_surface_state = wlr_linux_drm_syncobj_v1_get_surface_state(surface); @@ -289,6 +304,32 @@ struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface) { options.wait_point = syncobj_surface_state->acquire_point; } + if (surface->compositor->renderer) { + // use legacy wlr_client_buffer + if (!surface->buffer) { + return NULL; + } + + struct client_buffer_compat *compat = calloc(1, sizeof(*compat)); + if (!compat) { + return NULL; + } + + struct wlr_raster *raster = wlr_raster_create(&surface->buffer->base, &options); + if (!raster) { + free(compat); + return NULL; + } + + compat->destroy.notify = client_buffer_compat_raster_destroy; + wl_signal_add(&raster->events.destroy, &compat->destroy); + + compat->buffer = surface->buffer; + wlr_buffer_lock(&surface->buffer->base); + + return raster; + } + struct surface_raster *surface_raster = get_surface_raster(surface); if (!surface_raster) { surface_raster = calloc(1, sizeof(*surface_raster)); From a9458c5bc75cb6b6581cb77b3c5145281a351688 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Thu, 11 Apr 2024 14:30:22 -0400 Subject: [PATCH 06/29] wlr_cursor_set_surface: Use wlr_raster to generate texture from surface Since wlr_raster supports wlr_compositor usage with and without a renderer, use it for wlr_curosr so cursors support running on a surface without a renderer. --- types/wlr_cursor.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/types/wlr_cursor.c b/types/wlr_cursor.c index a2489fbd1..d85893afc 100644 --- a/types/wlr_cursor.c +++ b/types/wlr_cursor.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -539,7 +540,13 @@ static void cursor_output_cursor_update(struct wlr_cursor_output_cursor *output_ } else if (cur->state->surface != NULL) { struct wlr_surface *surface = cur->state->surface; - struct wlr_texture *texture = wlr_surface_get_texture(surface); + struct wlr_texture *texture = NULL; + struct wlr_raster *raster = wlr_raster_from_surface(surface); + if (raster) { + texture = wlr_raster_obtain_texture(raster, output_cursor->output_cursor->output->renderer); + } + wlr_raster_unlock(raster); + int32_t hotspot_x = cur->state->surface_hotspot.x; int32_t hotspot_y = cur->state->surface_hotspot.y; From c218683adfeaadcf8337e7e87edb6b1bb8226ea3 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 14 Sep 2024 16:44:57 -0400 Subject: [PATCH 07/29] wlr_scene: Manage textures through a raster --- include/wlr/types/wlr_scene.h | 12 ++-- types/scene/wlr_scene.c | 118 ++++++++++------------------------ 2 files changed, 38 insertions(+), 92 deletions(-) diff --git a/include/wlr/types/wlr_scene.h b/include/wlr/types/wlr_scene.h index bcfbc72f9..2fa1d6b46 100644 --- a/include/wlr/types/wlr_scene.h +++ b/include/wlr/types/wlr_scene.h @@ -35,6 +35,7 @@ struct wlr_xdg_surface; struct wlr_layer_surface_v1; struct wlr_drag_icon; struct wlr_surface; +struct wlr_raster; struct wlr_scene_node; struct wlr_scene_buffer; @@ -158,6 +159,7 @@ struct wlr_scene_buffer { // May be NULL struct wlr_buffer *buffer; + struct wlr_raster *raster; struct { struct wl_signal outputs_update; // struct wlr_scene_outputs_update_event @@ -188,15 +190,9 @@ struct wlr_scene_buffer { // private state uint64_t active_outputs; - struct wlr_texture *texture; struct wlr_linux_dmabuf_feedback_v1_init_options prev_feedback_options; bool own_buffer; - int buffer_width, buffer_height; - bool buffer_is_opaque; - - struct wlr_drm_syncobj_timeline *wait_timeline; - uint64_t wait_point; struct wl_listener buffer_release; struct wl_listener renderer_destroy; @@ -427,7 +423,7 @@ struct wlr_scene_buffer *wlr_scene_buffer_create(struct wlr_scene_tree *parent, struct wlr_buffer *buffer); /** - * Sets the buffer's backing buffer. + * Sets the buffer's backing raster. * * If the buffer is NULL, the buffer node will not be displayed. */ @@ -435,7 +431,7 @@ void wlr_scene_buffer_set_buffer(struct wlr_scene_buffer *scene_buffer, struct wlr_buffer *buffer); /** - * Sets the buffer's backing buffer with a custom damage region. + * Sets the buffer's backing raster with a custom damage region. * * The damage region is in buffer-local coordinates. If the region is NULL, * the whole buffer node will be damaged. diff --git a/types/scene/wlr_scene.c b/types/scene/wlr_scene.c index 713bfdfd0..3116518cd 100644 --- a/types/scene/wlr_scene.c +++ b/types/scene/wlr_scene.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -91,8 +92,6 @@ struct highlight_region { static void scene_buffer_set_buffer(struct wlr_scene_buffer *scene_buffer, struct wlr_buffer *buffer); -static void scene_buffer_set_texture(struct wlr_scene_buffer *scene_buffer, - struct wlr_texture *texture); void wlr_scene_node_destroy(struct wlr_scene_node *node) { if (node == NULL) { @@ -123,9 +122,8 @@ void wlr_scene_node_destroy(struct wlr_scene_node *node) { } scene_buffer_set_buffer(scene_buffer, NULL); - scene_buffer_set_texture(scene_buffer, NULL); + wlr_raster_unlock(scene_buffer->raster); pixman_region32_fini(&scene_buffer->opaque_region); - wlr_drm_syncobj_timeline_unref(scene_buffer->wait_timeline); } else if (node->type == WLR_SCENE_NODE_TREE) { struct wlr_scene_tree *scene_tree = wlr_scene_tree_from_node(node); @@ -259,7 +257,7 @@ static void scene_node_opaque_region(struct wlr_scene_node *node, int x, int y, } else if (node->type == WLR_SCENE_NODE_BUFFER) { struct wlr_scene_buffer *scene_buffer = wlr_scene_buffer_from_node(node); - if (!scene_buffer->buffer) { + if (!scene_buffer->raster) { return; } @@ -267,7 +265,7 @@ static void scene_node_opaque_region(struct wlr_scene_node *node, int x, int y, return; } - if (!scene_buffer->buffer_is_opaque) { + if (!scene_buffer->raster->opaque) { pixman_region32_copy(opaque, &scene_buffer->opaque_region); pixman_region32_intersect_rect(opaque, opaque, 0, 0, width, height); pixman_region32_translate(opaque, x, y); @@ -748,9 +746,6 @@ static void scene_buffer_set_buffer(struct wlr_scene_buffer *scene_buffer, wlr_buffer_unlock(scene_buffer->buffer); } scene_buffer->buffer = NULL; - scene_buffer->own_buffer = false; - scene_buffer->buffer_width = scene_buffer->buffer_height = 0; - scene_buffer->buffer_is_opaque = false; if (!buffer) { return; @@ -758,46 +753,11 @@ static void scene_buffer_set_buffer(struct wlr_scene_buffer *scene_buffer, scene_buffer->own_buffer = true; scene_buffer->buffer = wlr_buffer_lock(buffer); - scene_buffer->buffer_width = buffer->width; - scene_buffer->buffer_height = buffer->height; - scene_buffer->buffer_is_opaque = buffer_is_opaque(buffer); scene_buffer->buffer_release.notify = scene_buffer_handle_buffer_release; wl_signal_add(&buffer->events.release, &scene_buffer->buffer_release); } -static void scene_buffer_handle_renderer_destroy(struct wl_listener *listener, - void *data) { - struct wlr_scene_buffer *scene_buffer = wl_container_of(listener, scene_buffer, renderer_destroy); - scene_buffer_set_texture(scene_buffer, NULL); -} - -static void scene_buffer_set_texture(struct wlr_scene_buffer *scene_buffer, - struct wlr_texture *texture) { - wl_list_remove(&scene_buffer->renderer_destroy.link); - wlr_texture_destroy(scene_buffer->texture); - scene_buffer->texture = texture; - - if (texture != NULL) { - scene_buffer->renderer_destroy.notify = scene_buffer_handle_renderer_destroy; - wl_signal_add(&texture->renderer->events.destroy, &scene_buffer->renderer_destroy); - } else { - wl_list_init(&scene_buffer->renderer_destroy.link); - } -} - -static void scene_buffer_set_wait_timeline(struct wlr_scene_buffer *scene_buffer, - struct wlr_drm_syncobj_timeline *timeline, uint64_t point) { - wlr_drm_syncobj_timeline_unref(scene_buffer->wait_timeline); - if (timeline != NULL) { - scene_buffer->wait_timeline = wlr_drm_syncobj_timeline_ref(timeline); - scene_buffer->wait_point = point; - } else { - scene_buffer->wait_timeline = NULL; - scene_buffer->wait_point = 0; - } -} - struct wlr_scene_buffer *wlr_scene_buffer_create(struct wlr_scene_tree *parent, struct wlr_buffer *buffer) { struct wlr_scene_buffer *scene_buffer = calloc(1, sizeof(*scene_buffer)); @@ -816,6 +776,7 @@ struct wlr_scene_buffer *wlr_scene_buffer_create(struct wlr_scene_tree *parent, wl_list_init(&scene_buffer->buffer_release.link); wl_list_init(&scene_buffer->renderer_destroy.link); scene_buffer->opacity = 1; + scene_buffer->raster = buffer ? wlr_raster_create(buffer, NULL) : NULL; scene_buffer_set_buffer(scene_buffer, buffer); scene_node_update(&scene_buffer->node, NULL); @@ -836,7 +797,7 @@ void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buf assert(buffer || !options->damage); bool mapped = buffer != NULL; - bool prev_mapped = scene_buffer->buffer != NULL || scene_buffer->texture != NULL; + bool prev_mapped = scene_buffer->raster != NULL; if (!mapped && !prev_mapped) { // unmapping already unmapped buffer - noop @@ -848,14 +809,20 @@ void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buf // produce we need to update the node. bool update = mapped != prev_mapped; if (buffer != NULL && scene_buffer->dst_width == 0 && scene_buffer->dst_height == 0) { - update = update || scene_buffer->buffer_width != buffer->width || - scene_buffer->buffer_height != buffer->height; + update = update || (int)scene_buffer->raster->width != buffer->width || + (int)scene_buffer->raster->height != buffer->height; } + wlr_raster_unlock(scene_buffer->raster); + scene_buffer->raster = NULL; + scene_buffer_set_buffer(scene_buffer, buffer); - scene_buffer_set_texture(scene_buffer, NULL); - scene_buffer_set_wait_timeline(scene_buffer, - options->wait_timeline, options->wait_point); + if (buffer) { + scene_buffer->raster = wlr_raster_create(buffer, &(struct wlr_raster_create_options) { + .wait_timeline = options->wait_timeline, + .wait_point = options->wait_point, + }); + } if (update) { scene_node_update(&scene_buffer->node, NULL); @@ -1049,28 +1016,6 @@ void wlr_scene_buffer_set_filter_mode(struct wlr_scene_buffer *scene_buffer, scene_node_update(&scene_buffer->node, NULL); } -static struct wlr_texture *scene_buffer_get_texture( - struct wlr_scene_buffer *scene_buffer, struct wlr_renderer *renderer) { - if (scene_buffer->buffer == NULL || scene_buffer->texture != NULL) { - return scene_buffer->texture; - } - - struct wlr_client_buffer *client_buffer = - wlr_client_buffer_get(scene_buffer->buffer); - if (client_buffer != NULL) { - return client_buffer->texture; - } - - struct wlr_texture *texture = - wlr_texture_from_buffer(renderer, scene_buffer->buffer); - if (texture != NULL && scene_buffer->own_buffer) { - scene_buffer->own_buffer = false; - wlr_buffer_unlock(scene_buffer->buffer); - } - scene_buffer_set_texture(scene_buffer, texture); - return texture; -} - static void scene_node_get_size(struct wlr_scene_node *node, int *width, int *height) { *width = 0; @@ -1089,9 +1034,9 @@ static void scene_node_get_size(struct wlr_scene_node *node, if (scene_buffer->dst_width > 0 && scene_buffer->dst_height > 0) { *width = scene_buffer->dst_width; *height = scene_buffer->dst_height; - } else { - *width = scene_buffer->buffer_width; - *height = scene_buffer->buffer_height; + } else if (scene_buffer->raster) { + *width = scene_buffer->raster->width; + *height = scene_buffer->raster->height; wlr_output_transform_coords(scene_buffer->transform, width, height); } break; @@ -1374,8 +1319,12 @@ static void scene_entry_render(struct render_list_entry *entry, const struct ren case WLR_SCENE_NODE_BUFFER:; struct wlr_scene_buffer *scene_buffer = wlr_scene_buffer_from_node(node); - struct wlr_texture *texture = scene_buffer_get_texture(scene_buffer, - data->output->output->renderer); + struct wlr_texture *texture = NULL; + if (scene_buffer->raster) { + texture = wlr_raster_obtain_texture(scene_buffer->raster, + data->output->output->renderer); + } + if (texture == NULL) { scene_output_damage(data->output, &render_region); break; @@ -1396,8 +1345,8 @@ static void scene_entry_render(struct render_list_entry *entry, const struct ren .blend_mode = !data->output->scene->calculate_visibility || pixman_region32_not_empty(&opaque) ? WLR_RENDER_BLEND_MODE_PREMULTIPLIED : WLR_RENDER_BLEND_MODE_NONE, - .wait_timeline = scene_buffer->wait_timeline, - .wait_point = scene_buffer->wait_point, + .wait_timeline = scene_buffer->raster->wait_timeline, + .wait_point = scene_buffer->raster->wait_point, }); struct wlr_scene_output_sample_event sample_event = { @@ -1700,7 +1649,7 @@ static bool scene_node_invisible(struct wlr_scene_node *node) { } else if (node->type == WLR_SCENE_NODE_BUFFER) { struct wlr_scene_buffer *buffer = wlr_scene_buffer_from_node(node); - return buffer->buffer == NULL && buffer->texture == NULL; + return buffer->raster == NULL; } return false; @@ -1823,8 +1772,8 @@ static bool scene_entry_try_direct_scanout(struct render_list_entry *entry, return false; } - int default_width = buffer->buffer->width; - int default_height = buffer->buffer->height; + int default_width = buffer->raster->width; + int default_height = buffer->raster->height; wlr_output_transform_coords(buffer->transform, &default_width, &default_height); struct wlr_fbox default_box = { .width = default_width, @@ -1864,8 +1813,9 @@ static bool scene_entry_try_direct_scanout(struct render_list_entry *entry, } wlr_output_state_set_buffer(&pending, buffer->buffer); - if (buffer->wait_timeline != NULL) { - wlr_output_state_set_wait_timeline(&pending, buffer->wait_timeline, buffer->wait_point); + if (buffer->raster->wait_timeline != NULL) { + wlr_output_state_set_wait_timeline(&pending, + buffer->raster->wait_timeline, buffer->raster->wait_point); } if (!wlr_output_test_state(scene_output->output, &pending)) { From ea69cb66e64e12a1164c9fd2685ca52342def358 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 14 Sep 2024 17:05:32 -0400 Subject: [PATCH 08/29] wlr_scene: Introduce wlr_scene_buffer_set_raster_with_damage --- include/wlr/types/wlr_scene.h | 9 ++++ types/scene/wlr_scene.c | 81 ++++++++++++++++++++--------------- 2 files changed, 55 insertions(+), 35 deletions(-) diff --git a/include/wlr/types/wlr_scene.h b/include/wlr/types/wlr_scene.h index 2fa1d6b46..c3571e6f0 100644 --- a/include/wlr/types/wlr_scene.h +++ b/include/wlr/types/wlr_scene.h @@ -461,6 +461,15 @@ struct wlr_scene_buffer_set_buffer_options { void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buffer, struct wlr_buffer *buffer, const struct wlr_scene_buffer_set_buffer_options *options); +/* + * Sets the buffer's backing raster with a custom damage region. + * + * The damage region is in buffer-local coordinates. If the region is NULL, + * the whole buffer node will be damaged. + */ +void wlr_scene_buffer_set_raster_with_damage(struct wlr_scene_buffer *scene_buffer, + struct wlr_raster *raster, const pixman_region32_t *damage); + /** * Sets the buffer's opaque region. This is an optimization hint used to * determine if buffers which reside under this one need to be rendered or not. diff --git a/types/scene/wlr_scene.c b/types/scene/wlr_scene.c index 3116518cd..3c86c3f98 100644 --- a/types/scene/wlr_scene.c +++ b/types/scene/wlr_scene.c @@ -745,15 +745,13 @@ static void scene_buffer_set_buffer(struct wlr_scene_buffer *scene_buffer, if (scene_buffer->own_buffer) { wlr_buffer_unlock(scene_buffer->buffer); } - scene_buffer->buffer = NULL; + scene_buffer->buffer = buffer; + scene_buffer->own_buffer = false; if (!buffer) { return; } - scene_buffer->own_buffer = true; - scene_buffer->buffer = wlr_buffer_lock(buffer); - scene_buffer->buffer_release.notify = scene_buffer_handle_buffer_release; wl_signal_add(&buffer->events.release, &scene_buffer->buffer_release); } @@ -776,9 +774,14 @@ struct wlr_scene_buffer *wlr_scene_buffer_create(struct wlr_scene_tree *parent, wl_list_init(&scene_buffer->buffer_release.link); wl_list_init(&scene_buffer->renderer_destroy.link); scene_buffer->opacity = 1; - scene_buffer->raster = buffer ? wlr_raster_create(buffer, NULL) : NULL; - scene_buffer_set_buffer(scene_buffer, buffer); + + if (buffer) { + scene_buffer->raster = wlr_raster_create(buffer, NULL); + wlr_buffer_lock(buffer); + scene_buffer->own_buffer = true; + } + scene_node_update(&scene_buffer->node, NULL); return scene_buffer; @@ -791,38 +794,47 @@ void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buf options = &default_options; } - // specifying a region for a NULL buffer doesn't make sense. We need to know - // about the buffer to scale the buffer local coordinates down to scene + struct wlr_raster *raster = NULL; + if (buffer) { + raster = wlr_raster_create(buffer, &(struct wlr_raster_create_options) { + .wait_timeline = options->wait_timeline, + .wait_point = options->wait_point, + }); + } + + wlr_scene_buffer_set_raster_with_damage(scene_buffer, raster, options->damage); + + if (raster) { + wlr_buffer_lock(buffer); + scene_buffer->own_buffer = true; + } + + wlr_raster_unlock(raster); +} + +void wlr_scene_buffer_set_raster_with_damage(struct wlr_scene_buffer *scene_buffer, + struct wlr_raster *raster, const pixman_region32_t *damage) { + // specifying a region for a NULL raster doesn't make sense. We need to know + // about the raster to scale the raster local coordinates down to scene // coordinates. - assert(buffer || !options->damage); + assert(raster || !damage); - bool mapped = buffer != NULL; - bool prev_mapped = scene_buffer->raster != NULL; - - if (!mapped && !prev_mapped) { - // unmapping already unmapped buffer - noop + if (raster == scene_buffer->raster) { return; } // if this node used to not be mapped or its previous displayed // buffer region will be different from what the new buffer would // produce we need to update the node. - bool update = mapped != prev_mapped; - if (buffer != NULL && scene_buffer->dst_width == 0 && scene_buffer->dst_height == 0) { - update = update || (int)scene_buffer->raster->width != buffer->width || - (int)scene_buffer->raster->height != buffer->height; + bool update = !raster != !scene_buffer->raster; + if (raster != NULL && scene_buffer->dst_width == 0 && scene_buffer->dst_height == 0) { + update = update || scene_buffer->raster->width != raster->width || + scene_buffer->raster->height != raster->height; } wlr_raster_unlock(scene_buffer->raster); - scene_buffer->raster = NULL; - - scene_buffer_set_buffer(scene_buffer, buffer); - if (buffer) { - scene_buffer->raster = wlr_raster_create(buffer, &(struct wlr_raster_create_options) { - .wait_timeline = options->wait_timeline, - .wait_point = options->wait_point, - }); - } + scene_buffer_set_buffer(scene_buffer, raster ? raster->buffer : NULL); + scene_buffer->raster = raster ? wlr_raster_lock(raster) : NULL; if (update) { scene_node_update(&scene_buffer->node, NULL); @@ -837,8 +849,7 @@ void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buf } pixman_region32_t fallback_damage; - pixman_region32_init_rect(&fallback_damage, 0, 0, buffer->width, buffer->height); - const pixman_region32_t *damage = options->damage; + pixman_region32_init_rect(&fallback_damage, 0, 0, raster->width, raster->height); if (!damage) { damage = &fallback_damage; } @@ -847,26 +858,26 @@ void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buf if (wlr_fbox_empty(&box)) { box.x = 0; box.y = 0; - box.width = buffer->width; - box.height = buffer->height; + box.width = raster->width; + box.height = raster->height; } wlr_fbox_transform(&box, &box, scene_buffer->transform, - buffer->width, buffer->height); + raster->width, raster->height); float scale_x, scale_y; if (scene_buffer->dst_width || scene_buffer->dst_height) { scale_x = scene_buffer->dst_width / box.width; scale_y = scene_buffer->dst_height / box.height; } else { - scale_x = buffer->width / box.width; - scale_y = buffer->height / box.height; + scale_x = raster->width / box.width; + scale_y = raster->height / box.height; } pixman_region32_t trans_damage; pixman_region32_init(&trans_damage); wlr_region_transform(&trans_damage, damage, - scene_buffer->transform, buffer->width, buffer->height); + scene_buffer->transform, raster->width, raster->height); pixman_region32_intersect_rect(&trans_damage, &trans_damage, box.x, box.y, box.width, box.height); pixman_region32_translate(&trans_damage, -box.x, -box.y); From d74b6fb41fa566a30e6256dc3a7b53b678e7f240 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 14 Sep 2024 17:40:05 -0400 Subject: [PATCH 09/29] wlr_scene: Remove usage of wlr_client_buffer --- types/scene/surface.c | 50 ++++++------------------------------------- 1 file changed, 6 insertions(+), 44 deletions(-) diff --git a/types/scene/surface.c b/types/scene/surface.c index 2aff5af37..edcbbb0db 100644 --- a/types/scene/surface.c +++ b/types/scene/surface.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "types/wlr_scene.h" @@ -76,28 +77,6 @@ static void scene_surface_handle_surface_destroy( wlr_scene_node_destroy(&surface->buffer->node); } -// This is used for wlr_scene where it unconditionally locks buffers preventing -// reuse of the existing texture for shm clients. With the usage pattern of -// wlr_scene surface handling, we can mark its locked buffer as safe -// for mutation. -static void client_buffer_mark_next_can_damage(struct wlr_client_buffer *buffer) { - buffer->n_ignore_locks++; -} - -static void scene_buffer_unmark_client_buffer(struct wlr_scene_buffer *scene_buffer) { - if (!scene_buffer->buffer) { - return; - } - - struct wlr_client_buffer *buffer = wlr_client_buffer_get(scene_buffer->buffer); - if (!buffer) { - return; - } - - assert(buffer->n_ignore_locks > 0); - buffer->n_ignore_locks--; -} - static int min(int a, int b) { return a < b ? a : b; } @@ -160,29 +139,13 @@ static void surface_reconfigure(struct wlr_scene_surface *scene_surface) { wlr_scene_buffer_set_transform(scene_buffer, state->transform); wlr_scene_buffer_set_opacity(scene_buffer, opacity); - scene_buffer_unmark_client_buffer(scene_buffer); - - if (surface->buffer) { - client_buffer_mark_next_can_damage(surface->buffer); + struct wlr_raster *raster = wlr_raster_from_surface(surface); + if (raster) { + wlr_scene_buffer_set_raster_with_damage(scene_buffer, + raster, &surface->buffer_damage); struct wlr_linux_drm_syncobj_surface_v1_state *syncobj_surface_state = wlr_linux_drm_syncobj_v1_get_surface_state(surface); - - struct wlr_drm_syncobj_timeline *wait_timeline = NULL; - uint64_t wait_point = 0; - if (syncobj_surface_state != NULL) { - wait_timeline = syncobj_surface_state->acquire_timeline; - wait_point = syncobj_surface_state->acquire_point; - } - - struct wlr_scene_buffer_set_buffer_options options = { - .damage = &surface->buffer_damage, - .wait_timeline = wait_timeline, - .wait_point = wait_point, - }; - wlr_scene_buffer_set_buffer_with_options(scene_buffer, - &surface->buffer->base, &options); - if (syncobj_surface_state != NULL && (surface->current.committed & WLR_SURFACE_STATE_BUFFER)) { wlr_linux_drm_syncobj_v1_state_signal_release_with_buffer(syncobj_surface_state, @@ -192,6 +155,7 @@ static void surface_reconfigure(struct wlr_scene_surface *scene_surface) { wlr_scene_buffer_set_buffer(scene_buffer, NULL); } + wlr_raster_unlock(raster); pixman_region32_fini(&opaque); } @@ -231,8 +195,6 @@ static bool scene_buffer_point_accepts_input(struct wlr_scene_buffer *scene_buff static void surface_addon_destroy(struct wlr_addon *addon) { struct wlr_scene_surface *surface = wl_container_of(addon, surface, addon); - scene_buffer_unmark_client_buffer(surface->buffer); - wlr_addon_finish(&surface->addon); wl_list_remove(&surface->outputs_update.link); From a96efda6c538ed2b6d8884f16378955f71de0f32 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 14 Sep 2024 17:32:39 -0400 Subject: [PATCH 10/29] tinywl: Use wlr_raster --- tinywl/tinywl.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/tinywl/tinywl.c b/tinywl/tinywl.c index edc6269b3..c2be8f7fc 100644 --- a/tinywl/tinywl.c +++ b/tinywl/tinywl.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -54,6 +55,7 @@ struct tinywl_server { struct wl_listener cursor_frame; struct wlr_seat *seat; + struct wl_listener new_surface; struct wl_listener new_input; struct wl_listener request_cursor; struct wl_listener request_set_selection; @@ -109,6 +111,12 @@ struct tinywl_keyboard { struct wl_listener destroy; }; +struct tinywl_surface { + struct wlr_surface *surface; + struct wl_listener commit; + struct wl_listener destroy; +}; + static void focus_toplevel(struct tinywl_toplevel *toplevel, struct wlr_surface *surface) { /* Note: this function only deals with keyboard focus. */ if (toplevel == NULL) { @@ -876,6 +884,48 @@ static void server_new_xdg_popup(struct wl_listener *listener, void *data) { wl_signal_add(&xdg_popup->events.destroy, &popup->destroy); } +static void surface_handle_commit(struct wl_listener *listener, void *data) { + struct tinywl_surface *surface = wl_container_of(listener, surface, commit); + + /* + * wlr_raster_from_surface will not automatically latch onto a surface and + * update itself as the surface commits new buffers. We have to handle that + * ourselves. Every time a surface is committed, we have to make sure to + * read from the surface buffer before it is unlocked at the end of the + * commit. Since wlr_raster_from_surface will de-duplicate rasters created + * from the same surface, wlr_scene will consume rasters that are created + * here. + */ + struct wlr_raster *raster = wlr_raster_from_surface(surface->surface); + + // unlock the raster immediately as we're only prepping the surface + wlr_raster_unlock(raster); +} + +static void surface_handle_destroy(struct wl_listener *listener, void *data) { + struct tinywl_surface *surface = wl_container_of(listener, surface, destroy); + wl_list_remove(&surface->commit.link); + wl_list_remove(&surface->destroy.link); + free(surface); +} + +static void handle_new_surface(struct wl_listener *listener, void *data) { + struct wlr_surface *wlr_surface = data; + + struct tinywl_surface *surface = calloc(1, sizeof(*surface)); + if (!surface) { + return; + } + + surface->surface = wlr_surface; + + surface->commit.notify = surface_handle_commit; + wl_signal_add(&wlr_surface->events.commit, &surface->commit); + + surface->destroy.notify = surface_handle_destroy; + wl_signal_add(&wlr_surface->events.destroy, &surface->destroy); +} + int main(int argc, char *argv[]) { wlr_log_init(WLR_DEBUG, NULL); char *startup_cmd = NULL; @@ -940,7 +990,18 @@ int main(int argc, char *argv[]) { * to dig your fingers in and play with their behavior if you want. Note that * the clients cannot set the selection directly without compositor approval, * see the handling of the request_set_selection event below.*/ - wlr_compositor_create(server.wl_display, 5, server.renderer); + struct wlr_compositor *compositor = wlr_compositor_create(server.wl_display, 5, NULL); + + /* + * Surfaces act as a container for state that come from a wayland surface + * of a client. Surfaces provide buffers that act as the pixel data that the + * client wants to show. However, buffers aren't immetiately useful for us. + * We need to upload them to the GPU and for this, we'll use wlr_raster to + * help us do that. + */ + server.new_surface.notify = handle_new_surface; + wl_signal_add(&compositor->events.new_surface, &server.new_surface); + wlr_subcompositor_create(server.wl_display); wlr_data_device_manager_create(server.wl_display); From ce918e81390de0a3734bc310ca8f095a4b7e0bd1 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Thu, 4 May 2023 13:50:27 -0400 Subject: [PATCH 11/29] wlr_compositor: Move buffer damage clear to end of commit It makes sense to do it here because it is where we null the buffer. The buffer damage is only useful to us as long as we have the buffer. --- types/wlr_compositor.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/types/wlr_compositor.c b/types/wlr_compositor.c index 081af3539..4b3e71554 100644 --- a/types/wlr_compositor.c +++ b/types/wlr_compositor.c @@ -564,6 +564,8 @@ static void surface_commit_state(struct wlr_surface *surface, // released immediately on commit when they are uploaded to the GPU. wlr_buffer_unlock(surface->current.buffer); surface->current.buffer = NULL; + + pixman_region32_clear(&surface->buffer_damage); } static void surface_handle_commit(struct wl_client *client, From 4f0058f85c1dc10d977711f101fd2c748bb0fb8c Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sun, 5 May 2024 12:57:29 -0400 Subject: [PATCH 12/29] wlr_compositor: release state on buffer release wlr_compositor will now wait for the current buffer to be released before clearing relevant state. For now, this will always happen at the end of the commit so there should be no functional change here. --- include/wlr/types/wlr_compositor.h | 2 ++ types/wlr_compositor.c | 43 ++++++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 5 deletions(-) diff --git a/include/wlr/types/wlr_compositor.h b/include/wlr/types/wlr_compositor.h index 6ba8d8a07..6524aea25 100644 --- a/include/wlr/types/wlr_compositor.h +++ b/include/wlr/types/wlr_compositor.h @@ -239,6 +239,7 @@ struct wlr_surface { // private state struct wl_listener role_resource_destroy; + struct wl_listener current_buffer_release; struct { int32_t scale; @@ -250,6 +251,7 @@ struct wlr_surface { bool unmap_commit; bool opaque; + bool consumed; bool handling_commit; bool pending_rejected; diff --git a/types/wlr_compositor.c b/types/wlr_compositor.c index 4b3e71554..24bdaa8fc 100644 --- a/types/wlr_compositor.c +++ b/types/wlr_compositor.c @@ -405,8 +405,12 @@ static void surface_state_move(struct wlr_surface_state *state, } static void surface_apply_damage(struct wlr_surface *surface) { + wl_list_remove(&surface->current_buffer_release.link); + if (surface->current.buffer == NULL) { // NULL commit + wl_list_init(&surface->current_buffer_release.link); + if (surface->buffer != NULL) { wlr_buffer_unlock(&surface->buffer->base); } @@ -415,13 +419,14 @@ static void surface_apply_damage(struct wlr_surface *surface) { return; } + wl_signal_add(&surface->current.buffer->events.release, + &surface->current_buffer_release); + surface->opaque = buffer_is_opaque(surface->current.buffer); if (surface->buffer != NULL) { if (wlr_client_buffer_apply_damage(surface->buffer, surface->current.buffer, &surface->buffer_damage)) { - wlr_buffer_unlock(surface->current.buffer); - surface->current.buffer = NULL; return; } } @@ -508,10 +513,26 @@ error: wl_resource_post_no_memory(surface->resource); } +static void surface_clean_state(struct wlr_surface *surface) { + assert(surface->consumed); + + wl_list_remove(&surface->current_buffer_release.link); + wl_list_init(&surface->current_buffer_release.link); + pixman_region32_clear(&surface->buffer_damage); + surface->current.buffer = NULL; + surface->consumed = false; +} + static void surface_commit_state(struct wlr_surface *surface, struct wlr_surface_state *next) { assert(next->cached_state_locks == 0); + // if the surface was consumed that means we don't own the current buffer + // anymore. + if (surface->consumed) { + surface_clean_state(surface); + } + bool invalid_buffer = next->committed & WLR_SURFACE_STATE_BUFFER; if (invalid_buffer && next->buffer == NULL) { @@ -562,10 +583,8 @@ static void surface_commit_state(struct wlr_surface *surface, // Release the buffer after emitting the commit event, so that listeners can // access it. Don't leave the buffer locked so that wl_shm buffers can be // released immediately on commit when they are uploaded to the GPU. + surface->consumed = true; wlr_buffer_unlock(surface->current.buffer); - surface->current.buffer = NULL; - - pixman_region32_clear(&surface->buffer_damage); } static void surface_handle_commit(struct wl_client *client, @@ -720,6 +739,10 @@ static void surface_destroy_role_object(struct wlr_surface *surface); static void surface_handle_resource_destroy(struct wl_resource *resource) { struct wlr_surface *surface = wlr_surface_from_resource(resource); + if (surface->consumed) { + surface_clean_state(surface); + } + struct wlr_surface_output *surface_output, *surface_output_tmp; wl_list_for_each_safe(surface_output, surface_output_tmp, &surface->current_outputs, link) { @@ -738,6 +761,7 @@ static void surface_handle_resource_destroy(struct wl_resource *resource) { surface_state_destroy_cached(cached, surface); } + wl_list_remove(&surface->current_buffer_release.link); wl_list_remove(&surface->role_resource_destroy.link); wl_list_remove(&surface->pending_buffer_resource_destroy.link); @@ -753,6 +777,12 @@ static void surface_handle_resource_destroy(struct wl_resource *resource) { free(surface); } +static void surface_handle_current_buffer_release(struct wl_listener *listener, + void *data) { + struct wlr_surface *surface = wl_container_of(listener, surface, current_buffer_release); + surface_clean_state(surface); +} + static struct wlr_surface *surface_create(struct wl_client *client, uint32_t version, uint32_t id, struct wlr_compositor *compositor) { struct wlr_surface *surface = calloc(1, sizeof(*surface)); @@ -797,6 +827,9 @@ static struct wlr_surface *surface_create(struct wl_client *client, surface->pending_buffer_resource_destroy.notify = pending_buffer_resource_handle_destroy; wl_list_init(&surface->pending_buffer_resource_destroy.link); + surface->current_buffer_release.notify = surface_handle_current_buffer_release; + wl_list_init(&surface->current_buffer_release.link); + return surface; } From 7ba7f774ca13f29084b37ecd1fa1acd9c1314f7a Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Wed, 3 May 2023 22:58:39 -0400 Subject: [PATCH 13/29] wlr_compositor: Introduce wlr_surface_consume If the compositor is running without a renderer, that means that the compositor must be driven by something external that may or may not be there. So we have two scenarios: 1. This compositor is currently being watched and driven by some external source that is consuming buffers. This is okay, because during the commit handler `surface->current.buffer` and `surface->buffer_damage` will be usable and things will be handled like normal. 2. Things break however if the compositor is not currently driven. This however is commonly temporary. Something may not be interested right now, but later it can be. In this case we have to accumulate state until this external consumer is ready. Here, we have to accumulate the `buffer_damage` and keep the buffer locked until the consumer is ready. `wlr_surface_consume` needs to be called when the state of this surface was consumed so that it is safe to release these resources. --- include/wlr/types/wlr_compositor.h | 6 ++++++ types/wlr_compositor.c | 15 +++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/include/wlr/types/wlr_compositor.h b/include/wlr/types/wlr_compositor.h index 6524aea25..dc54a5f12 100644 --- a/include/wlr/types/wlr_compositor.h +++ b/include/wlr/types/wlr_compositor.h @@ -538,6 +538,12 @@ void wlr_surface_synced_finish(struct wlr_surface_synced *synced); void *wlr_surface_synced_get_state(struct wlr_surface_synced *synced, const struct wlr_surface_state *state); +/* + * Consumes buffer and damage state of the buffer so that the compositor may + * drop references to any of these resources. + */ +void wlr_surface_consume(struct wlr_surface *surface); + /** * Get a Pixman region from a wl_region resource. */ diff --git a/types/wlr_compositor.c b/types/wlr_compositor.c index 24bdaa8fc..6f7c5061f 100644 --- a/types/wlr_compositor.c +++ b/types/wlr_compositor.c @@ -419,6 +419,10 @@ static void surface_apply_damage(struct wlr_surface *surface) { return; } + // lock the buffer during the commit so that everything watching the surface + // can have a chance to take a look at the buffer. + wlr_buffer_lock(surface->current.buffer); + wl_signal_add(&surface->current.buffer->events.release, &surface->current_buffer_release); @@ -427,6 +431,7 @@ static void surface_apply_damage(struct wlr_surface *surface) { if (surface->buffer != NULL) { if (wlr_client_buffer_apply_damage(surface->buffer, surface->current.buffer, &surface->buffer_damage)) { + wlr_surface_consume(surface); return; } } @@ -437,6 +442,7 @@ static void surface_apply_damage(struct wlr_surface *surface) { struct wlr_client_buffer *buffer = wlr_client_buffer_create( surface->current.buffer, surface->compositor->renderer); + wlr_surface_consume(surface); if (buffer == NULL) { wlr_log(WLR_ERROR, "Failed to upload buffer"); @@ -783,6 +789,15 @@ static void surface_handle_current_buffer_release(struct wl_listener *listener, surface_clean_state(surface); } +void wlr_surface_consume(struct wlr_surface *surface) { + if (surface->consumed || !surface->current.buffer) { + return; + } + + surface->consumed = true; + wlr_buffer_unlock(surface->current.buffer); +} + static struct wlr_surface *surface_create(struct wl_client *client, uint32_t version, uint32_t id, struct wlr_compositor *compositor) { struct wlr_surface *surface = calloc(1, sizeof(*surface)); From f1c5184ec3466982ed8f29fc564e13a03a505b30 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 8 Jun 2024 16:11:25 -0400 Subject: [PATCH 14/29] wlr_raster: Use wlr_surface_consume() --- tinywl/tinywl.c | 63 +--------------------------------------------- types/wlr_raster.c | 1 + 2 files changed, 2 insertions(+), 62 deletions(-) diff --git a/tinywl/tinywl.c b/tinywl/tinywl.c index c2be8f7fc..2c844006b 100644 --- a/tinywl/tinywl.c +++ b/tinywl/tinywl.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -55,7 +54,6 @@ struct tinywl_server { struct wl_listener cursor_frame; struct wlr_seat *seat; - struct wl_listener new_surface; struct wl_listener new_input; struct wl_listener request_cursor; struct wl_listener request_set_selection; @@ -111,12 +109,6 @@ struct tinywl_keyboard { struct wl_listener destroy; }; -struct tinywl_surface { - struct wlr_surface *surface; - struct wl_listener commit; - struct wl_listener destroy; -}; - static void focus_toplevel(struct tinywl_toplevel *toplevel, struct wlr_surface *surface) { /* Note: this function only deals with keyboard focus. */ if (toplevel == NULL) { @@ -884,48 +876,6 @@ static void server_new_xdg_popup(struct wl_listener *listener, void *data) { wl_signal_add(&xdg_popup->events.destroy, &popup->destroy); } -static void surface_handle_commit(struct wl_listener *listener, void *data) { - struct tinywl_surface *surface = wl_container_of(listener, surface, commit); - - /* - * wlr_raster_from_surface will not automatically latch onto a surface and - * update itself as the surface commits new buffers. We have to handle that - * ourselves. Every time a surface is committed, we have to make sure to - * read from the surface buffer before it is unlocked at the end of the - * commit. Since wlr_raster_from_surface will de-duplicate rasters created - * from the same surface, wlr_scene will consume rasters that are created - * here. - */ - struct wlr_raster *raster = wlr_raster_from_surface(surface->surface); - - // unlock the raster immediately as we're only prepping the surface - wlr_raster_unlock(raster); -} - -static void surface_handle_destroy(struct wl_listener *listener, void *data) { - struct tinywl_surface *surface = wl_container_of(listener, surface, destroy); - wl_list_remove(&surface->commit.link); - wl_list_remove(&surface->destroy.link); - free(surface); -} - -static void handle_new_surface(struct wl_listener *listener, void *data) { - struct wlr_surface *wlr_surface = data; - - struct tinywl_surface *surface = calloc(1, sizeof(*surface)); - if (!surface) { - return; - } - - surface->surface = wlr_surface; - - surface->commit.notify = surface_handle_commit; - wl_signal_add(&wlr_surface->events.commit, &surface->commit); - - surface->destroy.notify = surface_handle_destroy; - wl_signal_add(&wlr_surface->events.destroy, &surface->destroy); -} - int main(int argc, char *argv[]) { wlr_log_init(WLR_DEBUG, NULL); char *startup_cmd = NULL; @@ -990,18 +940,7 @@ int main(int argc, char *argv[]) { * to dig your fingers in and play with their behavior if you want. Note that * the clients cannot set the selection directly without compositor approval, * see the handling of the request_set_selection event below.*/ - struct wlr_compositor *compositor = wlr_compositor_create(server.wl_display, 5, NULL); - - /* - * Surfaces act as a container for state that come from a wayland surface - * of a client. Surfaces provide buffers that act as the pixel data that the - * client wants to show. However, buffers aren't immetiately useful for us. - * We need to upload them to the GPU and for this, we'll use wlr_raster to - * help us do that. - */ - server.new_surface.notify = handle_new_surface; - wl_signal_add(&compositor->events.new_surface, &server.new_surface); - + wlr_compositor_create(server.wl_display, 5, NULL); wlr_subcompositor_create(server.wl_display); wlr_data_device_manager_create(server.wl_display); diff --git a/types/wlr_raster.c b/types/wlr_raster.c index 9248ca471..703722a9a 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -388,5 +388,6 @@ struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface) { wl_list_remove(&surface_raster->buffer_prerelease.link); wl_signal_add(&surface->current.buffer->events.prerelease, &surface_raster->buffer_prerelease); + wlr_surface_consume(surface); return raster; } From 38d761c837dcf0a978a4e377bd27a219d3ade344 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Fri, 21 Apr 2023 01:14:35 +0200 Subject: [PATCH 15/29] wlr_raster: Support multiple renderers --- include/wlr/types/wlr_raster.h | 10 ++- types/wlr_raster.c | 113 ++++++++++++++++++++++++--------- 2 files changed, 93 insertions(+), 30 deletions(-) diff --git a/include/wlr/types/wlr_raster.h b/include/wlr/types/wlr_raster.h index 6e654bb21..39295c6e8 100644 --- a/include/wlr/types/wlr_raster.h +++ b/include/wlr/types/wlr_raster.h @@ -19,10 +19,19 @@ struct wlr_renderer; struct wlr_drm_syncobj_timeline; struct wlr_surface; +struct wlr_raster_source { + struct wlr_texture *texture; + struct wl_list link; + + struct wl_listener renderer_destroy; +}; + struct wlr_raster { // May be NULL struct wlr_buffer *buffer; + struct wl_list sources; + uint32_t width, height; bool opaque; @@ -39,7 +48,6 @@ struct wlr_raster { struct wl_listener buffer_release; - struct wlr_texture *texture; struct wl_listener renderer_destroy; }; diff --git a/types/wlr_raster.c b/types/wlr_raster.c index 703722a9a..68f05c821 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -24,6 +24,7 @@ struct wlr_raster *wlr_raster_create(struct wlr_buffer *buffer, return NULL; } + wl_list_init(&raster->sources); wl_signal_init(&raster->events.destroy); assert(buffer); @@ -45,6 +46,12 @@ struct wlr_raster *wlr_raster_create(struct wlr_buffer *buffer, return raster; } +static void raster_source_destroy(struct wlr_raster_source *source) { + wl_list_remove(&source->link); + wl_list_remove(&source->renderer_destroy.link); + free(source); +} + static void raster_consider_destroy(struct wlr_raster *raster) { if (raster->n_locks > 0) { return; @@ -52,9 +59,10 @@ static void raster_consider_destroy(struct wlr_raster *raster) { wl_signal_emit_mutable(&raster->events.destroy, NULL); - if (raster->texture) { - wl_list_remove(&raster->renderer_destroy.link); - wlr_texture_destroy(raster->texture); + struct wlr_raster_source *source, *source_tmp; + wl_list_for_each_safe(source, source_tmp, &raster->sources, link) { + wlr_texture_destroy(source->texture); + raster_source_destroy(source); } wl_list_remove(&raster->buffer_release.link); @@ -79,33 +87,63 @@ void wlr_raster_unlock(struct wlr_raster *raster) { } static void raster_detach(struct wlr_raster *raster, struct wlr_texture *texture) { - assert(texture); - assert(raster->texture == texture); + if (!texture) { + return; + } - wl_list_remove(&raster->renderer_destroy.link); - raster->texture = NULL; + struct wlr_raster_source *source; + wl_list_for_each(source, &raster->sources, link) { + if (source->texture == texture) { + raster_source_destroy(source); + return; + } + } + + assert(false); } static void handle_renderer_destroy(struct wl_listener *listener, void *data) { - struct wlr_raster *raster = wl_container_of(listener, raster, renderer_destroy); - raster_detach(raster, raster->texture); + struct wlr_raster_source *source = wl_container_of(listener, source, renderer_destroy); + raster_source_destroy(source); } static void raster_attach(struct wlr_raster *raster, struct wlr_texture *texture) { assert(texture->width == raster->width && texture->height == raster->height); - assert(!raster->texture); - raster->renderer_destroy.notify = handle_renderer_destroy; - wl_signal_add(&texture->renderer->events.destroy, &raster->renderer_destroy); + struct wlr_raster_source *source; + wl_list_for_each(source, &raster->sources, link) { + assert(source->texture != texture); + } - raster->texture = texture; + source = calloc(1, sizeof(*source)); + if (!source) { + return; + } + + source->renderer_destroy.notify = handle_renderer_destroy; + wl_signal_add(&texture->renderer->events.destroy, &source->renderer_destroy); + + wl_list_insert(&raster->sources, &source->link); + source->texture = texture; +} + +static struct wlr_texture *wlr_raster_get_texture(struct wlr_raster *raster, + struct wlr_renderer *renderer) { + struct wlr_raster_source *source; + wl_list_for_each(source, &raster->sources, link) { + if (source->texture->renderer == renderer) { + return source->texture; + } + } + + return NULL; } struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster, struct wlr_renderer *renderer) { - if (raster->texture) { - assert(raster->texture->renderer == renderer); - return raster->texture; + struct wlr_texture *texture = wlr_raster_get_texture(raster, renderer); + if (texture) { + return texture; } assert(raster->buffer); @@ -116,7 +154,7 @@ struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster, return client_buffer->texture; } - struct wlr_texture *texture = wlr_texture_from_buffer(renderer, raster->buffer); + texture = wlr_texture_from_buffer(renderer, raster->buffer); if (texture) { raster_attach(raster, texture); } @@ -153,21 +191,18 @@ static void raster_update_handle_old_raster_destroy(struct wl_listener *listener struct raster_update_state *state = wl_container_of(listener, state, old_raster_destroy); // if the new raster already has a texture, there's nothing we can do to help. - if (state->new_raster->texture) { - assert(state->new_raster->texture->renderer == state->old_raster->texture->renderer); + if (!wl_list_empty(&state->new_raster->sources)) { destroy_raster_update_state(state); return; } - struct wlr_texture *texture = state->old_raster->texture; - if (!texture) { - destroy_raster_update_state(state); - return; - } - - if (wlr_texture_update_from_buffer(texture, state->buffer, &state->damage)) { - raster_detach(state->old_raster, texture); - raster_attach(state->new_raster, texture); + struct wlr_raster_source *source, *tmp_source; + wl_list_for_each_safe(source, tmp_source, &state->old_raster->sources, link) { + struct wlr_texture *texture = source->texture; + if (wlr_texture_update_from_buffer(texture, state->buffer, &state->damage)) { + raster_detach(state->old_raster, texture); + raster_attach(state->new_raster, texture); + } } destroy_raster_update_state(state); @@ -254,7 +289,7 @@ static void surface_raster_handle_buffer_prerelease(struct wl_listener *listener } // if there was a failed texture upload, keep on locking the buffer - if (!raster->texture) { + if (wl_list_empty(&raster->sources)) { wlr_buffer_lock(raster->buffer); surface_raster->locking_buffer = true; } @@ -372,6 +407,26 @@ struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface) { return wlr_raster_lock(surface_raster->raster); } + // before we try to update the old raster, remove obsolete textures + struct wlr_raster_source *source, *tmp_source; + wl_list_for_each_safe(source, tmp_source, &surface_raster->raster->sources, link) { + struct wlr_texture *texture = source->texture; + + bool found = false; + struct wlr_surface_output *output; + wl_list_for_each(output, &surface->current_outputs, link) { + if (output->output->renderer == texture->renderer) { + found = true; + break; + } + } + + if (!found) { + raster_detach(surface_raster->raster, texture); + wlr_texture_destroy(texture); + } + } + raster = raster_update(surface_raster->raster, surface->current.buffer, &surface->buffer_damage, &options); } else { From dc7855f674fc02637b3f4328ed5a16a3fdeee551 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sun, 5 May 2024 13:36:33 -0400 Subject: [PATCH 16/29] backend/drm: Drop parent drm device Compositors should instead blit to secondary drm devices themselves. --- backend/backend.c | 2 +- backend/drm/atomic.c | 6 +- backend/drm/backend.c | 64 +---------------- backend/drm/drm.c | 122 ++------------------------------- backend/drm/monitor.c | 3 +- backend/drm/renderer.c | 85 ----------------------- include/backend/drm/drm.h | 10 --- include/backend/drm/renderer.h | 8 --- include/wlr/backend/drm.h | 11 +-- types/wlr_linux_dmabuf_v1.c | 12 +--- 10 files changed, 13 insertions(+), 310 deletions(-) diff --git a/backend/backend.c b/backend/backend.c index e4e8c8d8e..9d1b9b754 100644 --- a/backend/backend.c +++ b/backend/backend.c @@ -258,7 +258,7 @@ static struct wlr_backend *attempt_drm_backend(struct wlr_backend *backend, stru struct wlr_backend *primary_drm = NULL; for (size_t i = 0; i < (size_t)num_gpus; ++i) { - struct wlr_backend *drm = wlr_drm_backend_create(session, gpus[i], primary_drm); + struct wlr_backend *drm = wlr_drm_backend_create(session, gpus[i]); if (!drm) { wlr_log(WLR_ERROR, "Failed to create DRM backend"); continue; diff --git a/backend/drm/atomic.c b/backend/drm/atomic.c index 16b08f7a1..d3a91efbe 100644 --- a/backend/drm/atomic.c +++ b/backend/drm/atomic.c @@ -275,9 +275,9 @@ bool drm_atomic_connector_prepare(struct wlr_drm_connector_state *state, bool mo } int in_fence_fd = -1; - if (state->wait_timeline != NULL) { - in_fence_fd = wlr_drm_syncobj_timeline_export_sync_file(state->wait_timeline, - state->wait_point); + if (state->base->committed & WLR_OUTPUT_STATE_WAIT_TIMELINE) { + in_fence_fd = wlr_drm_syncobj_timeline_export_sync_file(state->base->wait_timeline, + state->base->wait_point); if (in_fence_fd < 0) { return false; } diff --git a/backend/drm/backend.c b/backend/drm/backend.c index d166f4672..b66907892 100644 --- a/backend/drm/backend.c +++ b/backend/drm/backend.c @@ -49,14 +49,9 @@ static void backend_destroy(struct wlr_backend *backend) { wl_list_remove(&drm->session_destroy.link); wl_list_remove(&drm->session_active.link); - wl_list_remove(&drm->parent_destroy.link); wl_list_remove(&drm->dev_change.link); wl_list_remove(&drm->dev_remove.link); - if (drm->parent) { - finish_drm_renderer(&drm->mgpu_renderer); - } - finish_drm_resources(drm); struct wlr_drm_fb *fb, *fb_tmp; @@ -104,11 +99,6 @@ bool wlr_backend_is_drm(struct wlr_backend *b) { return b->impl == &backend_impl; } -struct wlr_backend *wlr_drm_backend_get_parent(struct wlr_backend *backend) { - struct wlr_drm_backend *drm = get_drm_backend_from_backend(backend); - return drm->parent ? &drm->parent->backend : NULL; -} - static void handle_session_active(struct wl_listener *listener, void *data) { struct wlr_drm_backend *drm = wl_container_of(listener, drm, session_active); @@ -159,16 +149,8 @@ static void handle_session_destroy(struct wl_listener *listener, void *data) { backend_destroy(&drm->backend); } -static void handle_parent_destroy(struct wl_listener *listener, void *data) { - struct wlr_drm_backend *drm = - wl_container_of(listener, drm, parent_destroy); - backend_destroy(&drm->backend); -} - -struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session, - struct wlr_device *dev, struct wlr_backend *parent) { +struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session, struct wlr_device *dev) { assert(session && dev); - assert(!parent || wlr_backend_is_drm(parent)); char *name = drmGetDeviceNameFromFd2(dev->fd); if (name == NULL) { @@ -201,15 +183,6 @@ struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session, drm->fd = dev->fd; drm->name = name; - if (parent != NULL) { - drm->parent = get_drm_backend_from_backend(parent); - - drm->parent_destroy.notify = handle_parent_destroy; - wl_signal_add(&parent->events.destroy, &drm->parent_destroy); - } else { - wl_list_init(&drm->parent_destroy.link); - } - drm->dev_change.notify = handle_dev_change; wl_signal_add(&dev->events.change, &drm->dev_change); @@ -234,52 +207,17 @@ struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session, goto error_event; } - if (drm->parent) { - if (!init_drm_renderer(drm, &drm->mgpu_renderer)) { - wlr_log(WLR_ERROR, "Failed to initialize renderer"); - goto error_resources; - } - - // We'll perform a multi-GPU copy for all submitted buffers, we need - // to be able to texture from them - struct wlr_renderer *renderer = drm->mgpu_renderer.wlr_rend; - const struct wlr_drm_format_set *texture_formats = - wlr_renderer_get_texture_formats(renderer, WLR_BUFFER_CAP_DMABUF); - if (texture_formats == NULL) { - wlr_log(WLR_ERROR, "Failed to query renderer texture formats"); - goto error_mgpu_renderer; - } - - // Forbid implicit modifiers, because their meaning changes from one - // GPU to another. - for (size_t i = 0; i < texture_formats->len; i++) { - const struct wlr_drm_format *fmt = &texture_formats->formats[i]; - for (size_t j = 0; j < fmt->len; j++) { - uint64_t mod = fmt->modifiers[j]; - if (mod == DRM_FORMAT_MOD_INVALID) { - continue; - } - wlr_drm_format_set_add(&drm->mgpu_formats, fmt->format, mod); - } - } - } - drm->session_destroy.notify = handle_session_destroy; wl_signal_add(&session->events.destroy, &drm->session_destroy); return &drm->backend; -error_mgpu_renderer: - finish_drm_renderer(&drm->mgpu_renderer); -error_resources: - finish_drm_resources(drm); error_event: wl_list_remove(&drm->session_active.link); wl_event_source_remove(drm->drm_event); error_fd: wl_list_remove(&drm->dev_remove.link); wl_list_remove(&drm->dev_change.link); - wl_list_remove(&drm->parent_destroy.link); wlr_session_close_file(drm->session, dev); free(drm->name); free(drm); diff --git a/backend/drm/drm.c b/backend/drm/drm.c index a20442d40..4d6178248 100644 --- a/backend/drm/drm.c +++ b/backend/drm/drm.c @@ -65,15 +65,6 @@ bool check_drm_features(struct wlr_drm_backend *drm) { return false; } - if (drm->parent) { - if (drmGetCap(drm->parent->fd, DRM_CAP_PRIME, &cap) || - !(cap & DRM_PRIME_CAP_EXPORT)) { - wlr_log(WLR_ERROR, - "PRIME export not supported on primary GPU"); - return false; - } - } - if (drmSetClientCap(drm->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1)) { wlr_log(WLR_ERROR, "DRM universal planes unsupported"); return false; @@ -369,8 +360,6 @@ static void drm_plane_finish_surface(struct wlr_drm_plane *plane) { drm_fb_clear(&plane->queued_fb); drm_fb_clear(&plane->current_fb); - - finish_drm_surface(&plane->mgpu_surf); } void finish_drm_resources(struct wlr_drm_backend *drm) { @@ -709,52 +698,8 @@ static bool drm_connector_state_update_primary_fb(struct wlr_drm_connector *conn struct wlr_drm_plane *plane = crtc->primary; struct wlr_buffer *source_buf = state->base->buffer; - struct wlr_drm_syncobj_timeline *wait_timeline = NULL; - uint64_t wait_point = 0; - if (state->base->committed & WLR_OUTPUT_STATE_WAIT_TIMELINE) { - wait_timeline = state->base->wait_timeline; - wait_point = state->base->wait_point; - } - assert(state->wait_timeline == NULL); - - struct wlr_buffer *local_buf; - if (drm->parent) { - struct wlr_drm_format format = {0}; - if (!drm_plane_pick_render_format(plane, &format, &drm->mgpu_renderer)) { - wlr_log(WLR_ERROR, "Failed to pick primary plane format"); - return false; - } - - // TODO: fallback to modifier-less buffer allocation - bool ok = init_drm_surface(&plane->mgpu_surf, &drm->mgpu_renderer, - source_buf->width, source_buf->height, &format); - wlr_drm_format_finish(&format); - if (!ok) { - return false; - } - - local_buf = drm_surface_blit(&plane->mgpu_surf, source_buf, - wait_timeline, wait_point); - if (local_buf == NULL) { - return false; - } - - if (plane->mgpu_surf.timeline != NULL) { - state->wait_timeline = wlr_drm_syncobj_timeline_ref(plane->mgpu_surf.timeline); - state->wait_point = plane->mgpu_surf.point; - } - } else { - local_buf = wlr_buffer_lock(source_buf); - - if (wait_timeline != NULL) { - state->wait_timeline = wlr_drm_syncobj_timeline_ref(wait_timeline); - state->wait_point = wait_point; - } - } - - bool ok = drm_fb_import(&state->primary_fb, drm, local_buf, + bool ok = drm_fb_import(&state->primary_fb, drm, source_buf, &plane->formats); - wlr_buffer_unlock(local_buf); if (!ok) { wlr_drm_conn_log(conn, WLR_DEBUG, "Failed to import buffer for scan-out"); @@ -769,7 +714,7 @@ static bool drm_connector_set_pending_layer_fbs(struct wlr_drm_connector *conn, struct wlr_drm_backend *drm = conn->backend; struct wlr_drm_crtc *crtc = conn->crtc; - if (!crtc || drm->parent) { + if (!crtc) { return false; } @@ -828,12 +773,6 @@ static bool drm_connector_prepare(struct wlr_drm_connector_state *conn_state, bo return false; } - if (test_only && conn->backend->parent) { - // If we're running as a secondary GPU, we can't perform an atomic - // commit without blitting a buffer. - return true; - } - if (state->committed & WLR_OUTPUT_STATE_BUFFER) { if (!drm_connector_state_update_primary_fb(conn, conn_state)) { return false; @@ -898,13 +837,6 @@ static bool drm_connector_commit_state(struct wlr_drm_connector *conn, goto out; } - if (test_only && conn->backend->parent) { - // If we're running as a secondary GPU, we can't perform an atomic - // commit without blitting a buffer. - ok = true; - goto out; - } - if (!pending.active && conn->crtc == NULL) { // Disabling an already-disabled connector ok = true; @@ -1094,28 +1026,7 @@ static bool drm_connector_set_cursor(struct wlr_output *output, return false; } - struct wlr_buffer *local_buf; - if (drm->parent) { - struct wlr_drm_format format = {0}; - if (!drm_plane_pick_render_format(plane, &format, &drm->mgpu_renderer)) { - wlr_log(WLR_ERROR, "Failed to pick cursor plane format"); - return false; - } - - bool ok = init_drm_surface(&plane->mgpu_surf, &drm->mgpu_renderer, - buffer->width, buffer->height, &format); - wlr_drm_format_finish(&format); - if (!ok) { - return false; - } - - local_buf = drm_surface_blit(&plane->mgpu_surf, buffer, NULL, 0); - if (local_buf == NULL) { - return false; - } - } else { - local_buf = wlr_buffer_lock(buffer); - } + struct wlr_buffer *local_buf = wlr_buffer_lock(buffer); bool ok = drm_fb_import(&conn->cursor_pending_fb, drm, local_buf, &plane->formats); @@ -1209,9 +1120,6 @@ static const struct wlr_drm_format_set *drm_connector_get_cursor_formats( if (!plane) { return NULL; } - if (conn->backend->parent) { - return &conn->backend->mgpu_formats; - } return &plane->formats; } @@ -1238,9 +1146,6 @@ static const struct wlr_drm_format_set *drm_connector_get_primary_formats( if (!drm_connector_alloc_crtc(conn)) { return NULL; } - if (conn->backend->parent) { - return &conn->backend->mgpu_formats; - } return &conn->crtc->primary->formats; } @@ -1647,9 +1552,6 @@ static bool connect_drm_connector(struct wlr_drm_connector *wlr_conn, } output->timeline = drm->iface != &legacy_iface; - if (drm->parent) { - output->timeline = output->timeline && drm->mgpu_renderer.wlr_rend->features.timeline; - } memset(wlr_conn->max_bpc_bounds, 0, sizeof(wlr_conn->max_bpc_bounds)); if (wlr_conn->props.max_bpc != 0) { @@ -2000,13 +1902,6 @@ bool commit_drm_device(struct wlr_drm_backend *drm, modeset |= output_state->base.allow_reconfiguration; } - if (test_only && drm->parent) { - // If we're running as a secondary GPU, we can't perform an atomic - // commit without blitting a buffer. - ok = true; - goto out; - } - uint32_t flags = 0; if (!test_only) { flags |= DRM_MODE_PAGE_FLIP_EVENT; @@ -2039,7 +1934,8 @@ static void handle_page_flip(int fd, unsigned seq, conn->pending_page_flip = NULL; } - uint32_t present_flags = WLR_OUTPUT_PRESENT_HW_CLOCK | WLR_OUTPUT_PRESENT_HW_COMPLETION; + uint32_t present_flags = WLR_OUTPUT_PRESENT_HW_CLOCK | WLR_OUTPUT_PRESENT_HW_COMPLETION | + WLR_OUTPUT_PRESENT_ZERO_COPY; if (!page_flip->async) { present_flags |= WLR_OUTPUT_PRESENT_VSYNC; } @@ -2074,14 +1970,6 @@ static void handle_page_flip(int fd, unsigned seq, drm_fb_move(&layer->current_fb, &layer->queued_fb); } - /* Don't report ZERO_COPY in multi-gpu situations, because we had to copy - * data between the GPUs, even if we were using the direct scanout - * interface. - */ - if (!drm->parent) { - present_flags |= WLR_OUTPUT_PRESENT_ZERO_COPY; - } - struct wlr_output_event_present present_event = { /* The DRM backend guarantees that the presentation event will be for * the last submitted frame. */ diff --git a/backend/drm/monitor.c b/backend/drm/monitor.c index efd853747..c01f9ad57 100644 --- a/backend/drm/monitor.c +++ b/backend/drm/monitor.c @@ -25,8 +25,7 @@ static void handle_add_drm_card(struct wl_listener *listener, void *data) { } wlr_log(WLR_DEBUG, "Creating DRM backend for %s after hotplug", event->path); - struct wlr_backend *child_drm = wlr_drm_backend_create(backend_monitor->session, - dev, backend_monitor->primary_drm); + struct wlr_backend *child_drm = wlr_drm_backend_create(backend_monitor->session, dev); if (!child_drm) { wlr_log(WLR_ERROR, "Failed to create DRM backend after hotplug"); return; diff --git a/backend/drm/renderer.c b/backend/drm/renderer.c index 60acc57b9..1b3399889 100644 --- a/backend/drm/renderer.c +++ b/backend/drm/renderer.c @@ -13,35 +13,6 @@ #include "render/pixel_format.h" #include "render/wlr_renderer.h" -bool init_drm_renderer(struct wlr_drm_backend *drm, - struct wlr_drm_renderer *renderer) { - renderer->wlr_rend = renderer_autocreate_with_drm_fd(drm->fd); - if (!renderer->wlr_rend) { - wlr_log(WLR_ERROR, "Failed to create renderer"); - return false; - } - - uint32_t backend_caps = backend_get_buffer_caps(&drm->backend); - renderer->allocator = allocator_autocreate_with_drm_fd(backend_caps, - renderer->wlr_rend, drm->fd); - if (renderer->allocator == NULL) { - wlr_log(WLR_ERROR, "Failed to create allocator"); - wlr_renderer_destroy(renderer->wlr_rend); - return false; - } - - return true; -} - -void finish_drm_renderer(struct wlr_drm_renderer *renderer) { - if (!renderer) { - return; - } - - wlr_allocator_destroy(renderer->allocator); - wlr_renderer_destroy(renderer->wlr_rend); -} - void finish_drm_surface(struct wlr_drm_surface *surf) { if (!surf || !surf->renderer) { return; @@ -85,62 +56,6 @@ bool init_drm_surface(struct wlr_drm_surface *surf, return true; } -struct wlr_buffer *drm_surface_blit(struct wlr_drm_surface *surf, - struct wlr_buffer *buffer, - struct wlr_drm_syncobj_timeline *wait_timeline, uint64_t wait_point) { - struct wlr_renderer *renderer = surf->renderer->wlr_rend; - - if (surf->swapchain->width != buffer->width || - surf->swapchain->height != buffer->height) { - wlr_log(WLR_ERROR, "Surface size doesn't match buffer size"); - return NULL; - } - - struct wlr_texture *tex = wlr_texture_from_buffer(renderer, buffer); - if (tex == NULL) { - wlr_log(WLR_ERROR, "Failed to import source buffer into multi-GPU renderer"); - return NULL; - } - - struct wlr_buffer *dst = wlr_swapchain_acquire(surf->swapchain); - if (!dst) { - wlr_log(WLR_ERROR, "Failed to acquire multi-GPU swapchain buffer"); - goto error_tex; - } - - surf->point++; - const struct wlr_buffer_pass_options pass_options = { - .signal_timeline = surf->timeline, - .signal_point = surf->point, - }; - struct wlr_render_pass *pass = wlr_renderer_begin_buffer_pass(renderer, dst, &pass_options); - if (pass == NULL) { - wlr_log(WLR_ERROR, "Failed to begin render pass with multi-GPU destination buffer"); - goto error_dst; - } - - wlr_render_pass_add_texture(pass, &(struct wlr_render_texture_options){ - .texture = tex, - .blend_mode = WLR_RENDER_BLEND_MODE_NONE, - .wait_timeline = wait_timeline, - .wait_point = wait_point, - }); - if (!wlr_render_pass_submit(pass)) { - wlr_log(WLR_ERROR, "Failed to submit multi-GPU render pass"); - goto error_dst; - } - - wlr_texture_destroy(tex); - - return dst; - -error_dst: - wlr_buffer_unlock(dst); -error_tex: - wlr_texture_destroy(tex); - return NULL; -} - bool drm_plane_pick_render_format(struct wlr_drm_plane *plane, struct wlr_drm_format *fmt, struct wlr_drm_renderer *renderer) { const struct wlr_drm_format_set *render_formats = diff --git a/include/backend/drm/drm.h b/include/backend/drm/drm.h index 5b239a18e..d5e5d292c 100644 --- a/include/backend/drm/drm.h +++ b/include/backend/drm/drm.h @@ -19,9 +19,6 @@ struct wlr_drm_plane { uint32_t type; uint32_t id; - /* Only initialized on multi-GPU setups */ - struct wlr_drm_surface mgpu_surf; - /* Buffer submitted to the kernel, will be presented on next vblank */ struct wlr_drm_fb *queued_fb; /* Buffer currently displayed on screen */ @@ -80,7 +77,6 @@ struct wlr_drm_crtc { struct wlr_drm_backend { struct wlr_backend backend; - struct wlr_drm_backend *parent; const struct wlr_drm_interface *iface; bool addfb2_modifiers; @@ -99,7 +95,6 @@ struct wlr_drm_backend { struct wl_listener session_destroy; struct wl_listener session_active; - struct wl_listener parent_destroy; struct wl_listener dev_change; struct wl_listener dev_remove; @@ -108,15 +103,10 @@ struct wlr_drm_backend { struct wl_list page_flips; // wlr_drm_page_flip.link - /* Only initialized on multi-GPU setups */ - struct wlr_drm_renderer mgpu_renderer; - struct wlr_session *session; uint64_t cursor_width, cursor_height; - struct wlr_drm_format_set mgpu_formats; - bool supports_tearing_page_flips; }; diff --git a/include/backend/drm/renderer.h b/include/backend/drm/renderer.h index 2cf98fdb9..9831a86ff 100644 --- a/include/backend/drm/renderer.h +++ b/include/backend/drm/renderer.h @@ -25,19 +25,11 @@ struct wlr_drm_surface { uint64_t point; }; -bool init_drm_renderer(struct wlr_drm_backend *drm, - struct wlr_drm_renderer *renderer); -void finish_drm_renderer(struct wlr_drm_renderer *renderer); - bool init_drm_surface(struct wlr_drm_surface *surf, struct wlr_drm_renderer *renderer, int width, int height, const struct wlr_drm_format *drm_format); void finish_drm_surface(struct wlr_drm_surface *surf); -struct wlr_buffer *drm_surface_blit(struct wlr_drm_surface *surf, - struct wlr_buffer *buffer, - struct wlr_drm_syncobj_timeline *wait_timeline, uint64_t wait_point); - bool drm_plane_pick_render_format(struct wlr_drm_plane *plane, struct wlr_drm_format *fmt, struct wlr_drm_renderer *renderer); diff --git a/include/wlr/backend/drm.h b/include/wlr/backend/drm.h index 3ca6390ab..1f2651c68 100644 --- a/include/wlr/backend/drm.h +++ b/include/wlr/backend/drm.h @@ -32,21 +32,12 @@ struct wlr_drm_lease { /** * Creates a DRM backend using the specified GPU file descriptor (typically from * a device node in /dev/dri). - * - * To slave this to another DRM backend, pass it as the parent (which _must_ be - * a DRM backend, other kinds of backends raise SIGABRT). */ -struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session, - struct wlr_device *dev, struct wlr_backend *parent); +struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session, struct wlr_device *dev); bool wlr_backend_is_drm(struct wlr_backend *backend); bool wlr_output_is_drm(struct wlr_output *output); -/** - * Get the parent DRM backend, if any. - */ -struct wlr_backend *wlr_drm_backend_get_parent(struct wlr_backend *backend); - /** * Get the KMS connector object ID. */ diff --git a/types/wlr_linux_dmabuf_v1.c b/types/wlr_linux_dmabuf_v1.c index bfd97637a..4a2c88bad 100644 --- a/types/wlr_linux_dmabuf_v1.c +++ b/types/wlr_linux_dmabuf_v1.c @@ -1070,15 +1070,6 @@ static bool devid_from_fd(int fd, dev_t *devid) { return true; } -static bool is_secondary_drm_backend(struct wlr_backend *backend) { -#if WLR_HAS_DRM_BACKEND - return wlr_backend_is_drm(backend) && - wlr_drm_backend_get_parent(backend) != NULL; -#else - return false; -#endif -} - bool wlr_linux_dmabuf_feedback_v1_init_with_options(struct wlr_linux_dmabuf_feedback_v1 *feedback, const struct wlr_linux_dmabuf_feedback_v1_init_options *options) { assert(options->main_renderer != NULL); @@ -1121,8 +1112,7 @@ bool wlr_linux_dmabuf_feedback_v1_init_with_options(struct wlr_linux_dmabuf_feed wlr_log(WLR_ERROR, "Failed to intersect renderer and scanout formats"); goto error; } - } else if (options->scanout_primary_output != NULL && - !is_secondary_drm_backend(options->scanout_primary_output->backend)) { + } else if (options->scanout_primary_output != NULL) { int backend_drm_fd = wlr_backend_get_drm_fd(options->scanout_primary_output->backend); if (backend_drm_fd < 0) { wlr_log(WLR_ERROR, "Failed to get backend DRM FD"); From 873ce330a7c606bfa806cdd5bf5f2a59ab5da05a Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sun, 28 Jan 2024 17:50:18 -0500 Subject: [PATCH 17/29] drm/monitor: Remove primary_drm This field is not used anymore. This also gives us the opportunity to clean up backend autocreation. --- backend/backend.c | 31 ++++++++++++------------------- backend/drm/monitor.c | 14 +------------- include/backend/drm/monitor.h | 5 +---- 3 files changed, 14 insertions(+), 36 deletions(-) diff --git a/backend/backend.c b/backend/backend.c index 9d1b9b754..7decc4ed6 100644 --- a/backend/backend.c +++ b/backend/backend.c @@ -240,23 +240,23 @@ static struct wlr_backend *attempt_headless_backend(struct wl_event_loop *loop) return backend; } -static struct wlr_backend *attempt_drm_backend(struct wlr_backend *backend, struct wlr_session *session) { +static bool attempt_drm_backend(struct wlr_backend *backend, struct wlr_session *session) { #if WLR_HAS_DRM_BACKEND struct wlr_device *gpus[8]; ssize_t num_gpus = wlr_session_find_gpus(session, 8, gpus); if (num_gpus < 0) { wlr_log(WLR_ERROR, "Failed to find GPUs"); - return NULL; + return false; } if (num_gpus == 0) { wlr_log(WLR_ERROR, "Found 0 GPUs, cannot create backend"); - return NULL; + return false; } else { wlr_log(WLR_INFO, "Found %zu GPUs", num_gpus); } - struct wlr_backend *primary_drm = NULL; + bool ok = false; for (size_t i = 0; i < (size_t)num_gpus; ++i) { struct wlr_backend *drm = wlr_drm_backend_create(session, gpus[i]); if (!drm) { @@ -264,22 +264,20 @@ static struct wlr_backend *attempt_drm_backend(struct wlr_backend *backend, stru continue; } - if (!primary_drm) { - primary_drm = drm; - } - wlr_multi_backend_add(backend, drm); + ok = true; } - if (!primary_drm) { + + if (!ok) { wlr_log(WLR_ERROR, "Could not successfully create backend on any GPU"); - return NULL; + return false; } if (getenv("WLR_DRM_DEVICES") == NULL) { - drm_backend_monitor_create(backend, primary_drm, session); + drm_backend_monitor_create(backend, session); } - return primary_drm; + return true; #else wlr_log(WLR_ERROR, "Cannot create DRM backend: disabled at compile-time"); return NULL; @@ -319,7 +317,7 @@ static bool attempt_backend_by_name(struct wl_event_loop *loop, backend = attempt_libinput_backend(*session_ptr); } else { // attempt_drm_backend() adds the multi drm backends itself - return attempt_drm_backend(multi, *session_ptr) != NULL; + return attempt_drm_backend(multi, *session_ptr); } } else { wlr_log(WLR_ERROR, "unrecognized backend '%s'", name); @@ -423,16 +421,11 @@ struct wlr_backend *wlr_backend_autocreate(struct wl_event_loop *loop, goto error; } - struct wlr_backend *primary_drm = attempt_drm_backend(multi, session); - if (primary_drm == NULL) { + if (!attempt_drm_backend(multi, session)) { wlr_log(WLR_ERROR, "Failed to open any DRM device"); goto error; } - if (!auto_backend_monitor_create(multi, primary_drm)) { - goto error; - } - success: if (session_ptr != NULL) { *session_ptr = session; diff --git a/backend/drm/monitor.c b/backend/drm/monitor.c index c01f9ad57..6e5a6376d 100644 --- a/backend/drm/monitor.c +++ b/backend/drm/monitor.c @@ -7,7 +7,6 @@ static void drm_backend_monitor_destroy(struct wlr_drm_backend_monitor* monitor) { wl_list_remove(&monitor->session_add_drm_card.link); wl_list_remove(&monitor->session_destroy.link); - wl_list_remove(&monitor->primary_drm_destroy.link); wl_list_remove(&monitor->multi_destroy.link); free(monitor); } @@ -49,12 +48,6 @@ static void handle_session_destroy(struct wl_listener *listener, void *data) { drm_backend_monitor_destroy(backend_monitor); } -static void handle_primary_drm_destroy(struct wl_listener *listener, void *data) { - struct wlr_drm_backend_monitor *backend_monitor = - wl_container_of(listener, backend_monitor, primary_drm_destroy); - drm_backend_monitor_destroy(backend_monitor); -} - static void handle_multi_destroy(struct wl_listener *listener, void *data) { struct wlr_drm_backend_monitor *backend_monitor = wl_container_of(listener, backend_monitor, multi_destroy); @@ -62,8 +55,7 @@ static void handle_multi_destroy(struct wl_listener *listener, void *data) { } struct wlr_drm_backend_monitor *drm_backend_monitor_create( - struct wlr_backend *multi, struct wlr_backend *primary_drm, - struct wlr_session *session) { + struct wlr_backend *multi, struct wlr_session *session) { struct wlr_drm_backend_monitor *monitor = calloc(1, sizeof(*monitor)); if (!monitor) { wlr_log_errno(WLR_ERROR, "Allocation failed"); @@ -71,7 +63,6 @@ struct wlr_drm_backend_monitor *drm_backend_monitor_create( } monitor->multi = multi; - monitor->primary_drm = primary_drm; monitor->session = session; monitor->session_add_drm_card.notify = handle_add_drm_card; @@ -80,9 +71,6 @@ struct wlr_drm_backend_monitor *drm_backend_monitor_create( monitor->session_destroy.notify = handle_session_destroy; wl_signal_add(&session->events.destroy, &monitor->session_destroy); - monitor->primary_drm_destroy.notify = handle_primary_drm_destroy; - wl_signal_add(&primary_drm->events.destroy, &monitor->primary_drm_destroy); - monitor->multi_destroy.notify = handle_multi_destroy; wl_signal_add(&multi->events.destroy, &monitor->multi_destroy); diff --git a/include/backend/drm/monitor.h b/include/backend/drm/monitor.h index 518171932..06b182a9f 100644 --- a/include/backend/drm/monitor.h +++ b/include/backend/drm/monitor.h @@ -8,17 +8,14 @@ */ struct wlr_drm_backend_monitor { struct wlr_backend *multi; - struct wlr_backend *primary_drm; struct wlr_session *session; struct wl_listener multi_destroy; - struct wl_listener primary_drm_destroy; struct wl_listener session_destroy; struct wl_listener session_add_drm_card; }; struct wlr_drm_backend_monitor *drm_backend_monitor_create( - struct wlr_backend *multi, struct wlr_backend *primary_drm, - struct wlr_session *session); + struct wlr_backend *multi, struct wlr_session *session); #endif From 61c4ba5f7039fa420827f897058307c604547dc1 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 1 Jul 2023 15:31:18 -0400 Subject: [PATCH 18/29] wlr_scene: Precompute raster textures before rendering --- types/scene/wlr_scene.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/types/scene/wlr_scene.c b/types/scene/wlr_scene.c index 3c86c3f98..485591d98 100644 --- a/types/scene/wlr_scene.c +++ b/types/scene/wlr_scene.c @@ -2087,6 +2087,19 @@ bool wlr_scene_output_build_state(struct wlr_scene_output *scene_output, return false; } + // upload all the textures that will be used within this pass before we start + // rendering. We need to do this because some of those textures might be + // created as part of a multirender blit. + for (int i = list_len - 1; i >= 0; i--) { + struct render_list_entry *entry = &list_data[i]; + if (entry->node->type != WLR_SCENE_NODE_BUFFER) { + continue; + } + + struct wlr_scene_buffer *buffer = wlr_scene_buffer_from_node(entry->node); + wlr_raster_create_texture(buffer->raster, output->renderer); + } + render_data.render_pass = render_pass; pixman_region32_init(&render_data.damage); From a28efaafa9a4a4de1735304f2b98859f51cbc102 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 14 Sep 2024 18:02:46 -0400 Subject: [PATCH 19/29] wlr_raster: Implement multi-renderer blits We also need to introduce allocator variants to wlr_raster_upload_texture and wlr_raster_attach so that the given allocator can be used to allocate a stanging buffer to aid in blitting --- include/wlr/types/wlr_raster.h | 19 +++- types/wlr_raster.c | 178 +++++++++++++++++++++++++++++++-- 2 files changed, 185 insertions(+), 12 deletions(-) diff --git a/include/wlr/types/wlr_raster.h b/include/wlr/types/wlr_raster.h index 39295c6e8..2857dbe3f 100644 --- a/include/wlr/types/wlr_raster.h +++ b/include/wlr/types/wlr_raster.h @@ -18,12 +18,15 @@ struct wlr_texture; struct wlr_renderer; struct wlr_drm_syncobj_timeline; struct wlr_surface; +struct wlr_allocator; struct wlr_raster_source { struct wlr_texture *texture; + struct wlr_allocator *allocator; // may be NULL struct wl_list link; struct wl_listener renderer_destroy; + struct wl_listener allocator_destroy; }; struct wlr_raster { @@ -47,8 +50,6 @@ struct wlr_raster { size_t n_locks; struct wl_listener buffer_release; - - struct wl_listener renderer_destroy; }; struct wlr_raster_create_options { @@ -92,6 +93,20 @@ void wlr_raster_unlock(struct wlr_raster *raster); struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster, struct wlr_renderer *renderer); +/** + * Returns the texture allocated for this renderer. If there is none, + * a new texture will be created and attached to this wlr_raster. Users do not + * own the texture returned by this function and can only be used for read-only + * purposes. + * + * An optional allocator can be provided which will be used to allocate staging + * buffers to blit between graphics devices if needed. + * + * Will return NULL if the creation was unsuccessful. + */ +struct wlr_texture *wlr_raster_obtain_texture_with_allocator(struct wlr_raster *raster, + struct wlr_renderer *renderer, struct wlr_allocator *allocator); + /** * Creates a wlr_raster from a surface. This will automatically deduplicate * rasters if multiple are consumed from the same surface so that redundant diff --git a/types/wlr_raster.c b/types/wlr_raster.c index 68f05c821..5c073dc27 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -1,13 +1,21 @@ #include +#include #include +#include +#include +#include #include #include #include +#include #include #include #include #include #include +#include +#include "render/drm_format_set.h" +#include "render/wlr_renderer.h" #include "types/wlr_buffer.h" static void raster_handle_buffer_release(struct wl_listener *listener, void *data) { @@ -49,6 +57,7 @@ struct wlr_raster *wlr_raster_create(struct wlr_buffer *buffer, static void raster_source_destroy(struct wlr_raster_source *source) { wl_list_remove(&source->link); wl_list_remove(&source->renderer_destroy.link); + wl_list_remove(&source->allocator_destroy.link); free(source); } @@ -107,7 +116,15 @@ static void handle_renderer_destroy(struct wl_listener *listener, void *data) { raster_source_destroy(source); } -static void raster_attach(struct wlr_raster *raster, struct wlr_texture *texture) { +static void handle_allocator_destroy(struct wl_listener *listener, void *data) { + struct wlr_raster_source *source = wl_container_of(listener, source, allocator_destroy); + source->allocator = NULL; + wl_list_remove(&source->allocator_destroy.link); + wl_list_init(&source->allocator_destroy.link); +} + +static void raster_attach_with_allocator(struct wlr_raster *raster, + struct wlr_texture *texture, struct wlr_allocator *allocator) { assert(texture->width == raster->width && texture->height == raster->height); struct wlr_raster_source *source; @@ -123,8 +140,20 @@ static void raster_attach(struct wlr_raster *raster, struct wlr_texture *texture source->renderer_destroy.notify = handle_renderer_destroy; wl_signal_add(&texture->renderer->events.destroy, &source->renderer_destroy); + if (allocator) { + source->allocator_destroy.notify = handle_allocator_destroy; + wl_signal_add(&allocator->events.destroy, &source->allocator_destroy); + } else { + wl_list_init(&source->allocator_destroy.link); + } + wl_list_insert(&raster->sources, &source->link); source->texture = texture; + source->allocator = allocator; +} + +static void raster_attach(struct wlr_raster *raster, struct wlr_texture *texture) { + raster_attach_with_allocator(raster, texture, NULL); } static struct wlr_texture *wlr_raster_get_texture(struct wlr_raster *raster, @@ -139,27 +168,156 @@ static struct wlr_texture *wlr_raster_get_texture(struct wlr_raster *raster, return NULL; } -struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster, +static bool compute_import_buffer_format(struct wlr_raster *raster, struct wlr_drm_format *drm_fmt, + struct wlr_renderer *dst) { + const struct wlr_drm_format_set *texture_formats = + wlr_renderer_get_texture_formats(dst, WLR_BUFFER_CAP_DMABUF); + if (!texture_formats) { + wlr_log(WLR_ERROR, "Failed to get texture_formats"); + return NULL; + } + + // For now, let's only use XRGB + uint32_t fmt = raster->opaque ? DRM_FORMAT_XRGB8888 : DRM_FORMAT_ARGB8888; + const struct wlr_drm_format *drm_fmt_inv = + wlr_drm_format_set_get(texture_formats, fmt); + + if (!wlr_drm_format_copy(drm_fmt, drm_fmt_inv)) { + return false; + } + + for (size_t i = 0; i < drm_fmt->len; i++) { + uint64_t mod = drm_fmt->modifiers[i]; + if (mod != DRM_FORMAT_MOD_INVALID) { + continue; + } + + for (size_t j = i + 1; j < drm_fmt->len; j++) { + drm_fmt->modifiers[j] = drm_fmt->modifiers[j + 1]; + } + + drm_fmt->len--; + break; + } + + return true; +} + +static struct wlr_buffer *raster_try_blit(struct wlr_raster *raster, + struct wlr_raster_source *source, struct wlr_renderer *dst) { + if (!source->allocator) { + return NULL; + } + + wlr_log(WLR_DEBUG, "Attempting a multigpu blit through a GPU"); + + struct wlr_renderer *src = source->texture->renderer; + + // The src needs to be able to render into this format + const struct wlr_drm_format_set *render_formats = + wlr_renderer_get_render_formats(src); + if (!render_formats) { + wlr_log(WLR_ERROR, "Failed to get render_formats"); + return NULL; + } + + struct wlr_drm_format fmt = {0}; + if (!compute_import_buffer_format(raster, &fmt, dst)) { + wlr_log(WLR_ERROR, "Could not find a common format modifiers for all GPUs"); + return NULL; + } + + if (wlr_drm_format_intersect(&fmt, &fmt, + wlr_drm_format_set_get(render_formats, fmt.format))) { + wlr_drm_format_finish(&fmt); + return NULL; + } + + struct wlr_buffer *buffer = wlr_allocator_create_buffer( + source->allocator, raster->width, raster->height, &fmt); + wlr_drm_format_finish(&fmt); + if (!buffer) { + wlr_log(WLR_ERROR, "Failed to allocate multirenderer blit buffer"); + return NULL; + } + + struct wlr_render_pass *pass = wlr_renderer_begin_buffer_pass(src, buffer, NULL); + if (!pass) { + wlr_log(WLR_ERROR, "Failed to create a render pass"); + wlr_buffer_drop(buffer); + return NULL; + } + + wlr_render_pass_add_texture(pass, &(struct wlr_render_texture_options) { + .texture = source->texture, + .blend_mode = WLR_RENDER_BLEND_MODE_NONE, + }); + + if (!wlr_render_pass_submit(pass)) { + wlr_log(WLR_ERROR, "Failed to renedr to a multigpu blit buffer"); + wlr_buffer_drop(buffer); + return NULL; + } + + return buffer; +} + +static struct wlr_texture *raster_try_texture_from_blit(struct wlr_raster *raster, struct wlr_renderer *renderer) { + struct wlr_buffer *imported = NULL; + + struct wlr_raster_source *source; + wl_list_for_each(source, &raster->sources, link) { + imported = raster_try_blit(raster, source, renderer); + if (imported) { + break; + } + } + + if (!imported) { + return NULL; + } + + wlr_buffer_drop(imported); + + return wlr_texture_from_buffer(renderer, imported); +} + +struct wlr_texture *wlr_raster_obtain_texture_with_allocator(struct wlr_raster *raster, + struct wlr_renderer *renderer, struct wlr_allocator *allocator) { struct wlr_texture *texture = wlr_raster_get_texture(raster, renderer); if (texture) { return texture; } - assert(raster->buffer); + if (raster->buffer) { + struct wlr_client_buffer *client_buffer = + wlr_client_buffer_get(raster->buffer); + if (client_buffer != NULL) { + return client_buffer->texture; + } - struct wlr_client_buffer *client_buffer = - wlr_client_buffer_get(raster->buffer); - if (client_buffer != NULL) { - return client_buffer->texture; + // if we have a buffer, try and import that + texture = wlr_texture_from_buffer(renderer, raster->buffer); + if (texture) { + raster_attach_with_allocator(raster, texture, allocator); + return texture; + } } - texture = wlr_texture_from_buffer(renderer, raster->buffer); + // try to blit using the textures already available to us + texture = raster_try_texture_from_blit(raster, renderer); if (texture) { - raster_attach(raster, texture); + raster_attach_with_allocator(raster, texture, allocator); + return texture; } - return texture; + return NULL; +} + +struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster, + struct wlr_renderer *renderer) { + return wlr_raster_create_texture_with_allocator(raster, renderer, NULL); } struct raster_update_state { From 99d543379eec67e4bf2b8f916c109e99308a0eaa Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 14 Sep 2024 18:03:04 -0400 Subject: [PATCH 20/29] wlr_raster: Implement explicit sync for multigpu blits --- types/wlr_raster.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/types/wlr_raster.c b/types/wlr_raster.c index 5c073dc27..651fd2f1f 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -241,7 +241,18 @@ static struct wlr_buffer *raster_try_blit(struct wlr_raster *raster, return NULL; } - struct wlr_render_pass *pass = wlr_renderer_begin_buffer_pass(src, buffer, NULL); + struct wlr_drm_syncobj_timeline *timeline; + int drm_fd = wlr_renderer_get_drm_fd(src); + if (src->features.timeline && drm_fd >= 0) { + timeline = wlr_drm_syncobj_timeline_create(drm_fd); + } + + const struct wlr_buffer_pass_options pass_options = { + .signal_timeline = timeline, + .signal_point = 1, + }; + + struct wlr_render_pass *pass = wlr_renderer_begin_buffer_pass(src, buffer, &pass_options); if (!pass) { wlr_log(WLR_ERROR, "Failed to create a render pass"); wlr_buffer_drop(buffer); @@ -251,8 +262,12 @@ static struct wlr_buffer *raster_try_blit(struct wlr_raster *raster, wlr_render_pass_add_texture(pass, &(struct wlr_render_texture_options) { .texture = source->texture, .blend_mode = WLR_RENDER_BLEND_MODE_NONE, + .wait_timeline = timeline, + .wait_point = 1, }); + wlr_drm_syncobj_timeline_unref(timeline); + if (!wlr_render_pass_submit(pass)) { wlr_log(WLR_ERROR, "Failed to renedr to a multigpu blit buffer"); wlr_buffer_drop(buffer); From 5eb9a2ea10c7af1addbb12167c36a207d1d36d6c Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Tue, 27 Feb 2024 14:47:38 -0500 Subject: [PATCH 21/29] wlr_raster: Implement CPU blits We need this really slow path if the user is using GPUs that don't have common compatible modifiers. One example of a vendor that doesn't support rendering to a LINEAR modifier (which otherwise should always exist) is NVIDIA. --- types/wlr_raster.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/types/wlr_raster.c b/types/wlr_raster.c index 651fd2f1f..6347e3ef1 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -298,6 +298,47 @@ static struct wlr_texture *raster_try_texture_from_blit(struct wlr_raster *raste return wlr_texture_from_buffer(renderer, imported); } +static struct wlr_texture *raster_try_cpu_copy(struct wlr_raster *raster, + struct wlr_renderer *dst) { + if (wl_list_empty(&raster->sources)) { + return NULL; + } + + wlr_log(WLR_DEBUG, "Performing multigpu blit through the CPU"); + struct wlr_texture *texture = NULL; + + uint32_t format = DRM_FORMAT_ARGB8888; + uint32_t stride = raster->width * 4; + void *data = malloc(stride * raster->height); + if (!data) { + return NULL; + } + + struct wlr_raster_source *source; + wl_list_for_each(source, &raster->sources, link) { + if (!wlr_texture_read_pixels(source->texture, &(struct wlr_texture_read_pixels_options){ + .format = format, + .stride = stride, + .data = data, + })) { + wlr_log(WLR_ERROR, "Failed to read pixels"); + continue; + } + + texture = wlr_texture_from_pixels(dst, format, + stride, raster->width, raster->height, data); + if (!texture) { + wlr_log(WLR_ERROR, "Failed to upload texture from cpu data"); + continue; + } + + break; + } + + free(data); + return texture; +} + struct wlr_texture *wlr_raster_obtain_texture_with_allocator(struct wlr_raster *raster, struct wlr_renderer *renderer, struct wlr_allocator *allocator) { struct wlr_texture *texture = wlr_raster_get_texture(raster, renderer); @@ -327,6 +368,13 @@ struct wlr_texture *wlr_raster_obtain_texture_with_allocator(struct wlr_raster * return texture; } + // as a last resort we need to do a copy through the CPU + texture = raster_try_cpu_copy(raster, renderer); + if (texture) { + raster_attach_with_allocator(raster, texture, allocator); + return texture; + } + return NULL; } From b0e8e6eae796bb08fa1fffe84d26d525c8d54268 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Fri, 2 Feb 2024 20:38:56 -0500 Subject: [PATCH 22/29] linux_dmabuf_v1: Introduce main device Introduce properties for the main renderer and allocator the compositor wants wlroots to use when it needs to blit to a staging buffer during multigpu. We need to perform an additional blit if the modifier is not compatible with the target GPU. --- include/wlr/types/wlr_linux_dmabuf_v1.h | 30 +++++++++++++ types/wlr_linux_dmabuf_v1.c | 56 +++++++++++++++++++++++-- types/wlr_raster.c | 26 +++++++++++- 3 files changed, 107 insertions(+), 5 deletions(-) diff --git a/include/wlr/types/wlr_linux_dmabuf_v1.h b/include/wlr/types/wlr_linux_dmabuf_v1.h index cf967f952..ebf13364d 100644 --- a/include/wlr/types/wlr_linux_dmabuf_v1.h +++ b/include/wlr/types/wlr_linux_dmabuf_v1.h @@ -17,6 +17,8 @@ #include struct wlr_surface; +struct wlr_renderer; +struct wlr_allocator; struct wlr_dmabuf_v1_buffer { struct wlr_buffer base; @@ -27,6 +29,8 @@ struct wlr_dmabuf_v1_buffer { // private state struct wl_listener release; + + struct wlr_linux_dmabuf_v1 *linux_dmabuf_v1; }; /** @@ -63,7 +67,13 @@ struct wlr_linux_dmabuf_v1 { int main_device_fd; // to sanity check FDs sent by clients, -1 if unavailable + // used for multigpu + struct wlr_renderer *main_renderer; + struct wlr_allocator *main_allocator; + struct wl_listener display_destroy; + struct wl_listener main_renderer_destroy; + struct wl_listener main_allocator_destroy; bool (*check_dmabuf_callback)(struct wlr_dmabuf_attributes *attribs, void *data); void *check_dmabuf_callback_data; @@ -78,6 +88,23 @@ struct wlr_linux_dmabuf_v1 { struct wlr_linux_dmabuf_v1 *wlr_linux_dmabuf_v1_create(struct wl_display *display, uint32_t version, const struct wlr_linux_dmabuf_feedback_v1 *default_feedback); +/** + * Returns the associated dmabuf object from a generic buffer. Returnns + * NULL if the generic buffer is not a dmabuf. + */ +struct wlr_dmabuf_v1_buffer *wlr_dmabuf_v1_buffer_try_from_buffer( + struct wlr_buffer *buffer); + +/** + * Sets the main blit device used for multigpu. With multigpu, dmabufs with + * implicit modifiers or just modifiers that aren't supported by other GPUs + * might need to be blitted into a staging buffer with correct modifiers. This + * will be done with this allocator (to allocate the staging buffer) and renderer + * to render into the staging buffer. + */ +void wlr_linux_dmabuf_v1_set_main_blit_device(struct wlr_linux_dmabuf_v1 *linux_dmabuf, + struct wlr_renderer *renderer, struct wlr_allocator *allocator); + /** * Create the linux-dmabuf-v1 global. * @@ -120,6 +147,9 @@ void wlr_linux_dmabuf_feedback_v1_finish(struct wlr_linux_dmabuf_feedback_v1 *fe struct wlr_linux_dmabuf_feedback_v1_init_options { // Main renderer used by the compositor struct wlr_renderer *main_renderer; + // Optional allocator created for the primary GPU used by the default feedback. + // This is used for multi gpu for allocating staging buffers. + struct wlr_allocator *main_allocator; // Output on which direct scan-out is possible on the primary plane, or NULL struct wlr_output *scanout_primary_output; // Output layer feedback event, or NULL diff --git a/types/wlr_linux_dmabuf_v1.c b/types/wlr_linux_dmabuf_v1.c index 4a2c88bad..cd22fd02d 100644 --- a/types/wlr_linux_dmabuf_v1.c +++ b/types/wlr_linux_dmabuf_v1.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -92,16 +93,19 @@ struct wlr_dmabuf_v1_buffer *wlr_dmabuf_v1_buffer_try_from_buffer_resource( static const struct wlr_buffer_impl buffer_impl; -static struct wlr_dmabuf_v1_buffer *dmabuf_v1_buffer_from_buffer( +struct wlr_dmabuf_v1_buffer *wlr_dmabuf_v1_buffer_try_from_buffer( struct wlr_buffer *wlr_buffer) { - assert(wlr_buffer->impl == &buffer_impl); + if (wlr_buffer->impl != &buffer_impl) { + return NULL; + } + struct wlr_dmabuf_v1_buffer *buffer = wl_container_of(wlr_buffer, buffer, base); return buffer; } static void buffer_destroy(struct wlr_buffer *wlr_buffer) { struct wlr_dmabuf_v1_buffer *buffer = - dmabuf_v1_buffer_from_buffer(wlr_buffer); + wlr_dmabuf_v1_buffer_try_from_buffer(wlr_buffer); if (buffer->resource != NULL) { wl_resource_set_user_data(buffer->resource, NULL); } @@ -113,7 +117,7 @@ static void buffer_destroy(struct wlr_buffer *wlr_buffer) { static bool buffer_get_dmabuf(struct wlr_buffer *wlr_buffer, struct wlr_dmabuf_attributes *attribs) { struct wlr_dmabuf_v1_buffer *buffer = - dmabuf_v1_buffer_from_buffer(wlr_buffer); + wlr_dmabuf_v1_buffer_try_from_buffer(wlr_buffer); *attribs = buffer->attributes; return true; } @@ -368,6 +372,7 @@ static void params_create_common(struct wl_resource *params_resource, &wl_buffer_impl, buffer, buffer_handle_resource_destroy); buffer->attributes = attribs; + buffer->linux_dmabuf_v1 = linux_dmabuf; buffer->release.notify = buffer_handle_release; wl_signal_add(&buffer->base.events.release, &buffer->release); @@ -872,6 +877,8 @@ static void linux_dmabuf_v1_destroy(struct wlr_linux_dmabuf_v1 *linux_dmabuf) { } wl_list_remove(&linux_dmabuf->display_destroy.link); + wl_list_remove(&linux_dmabuf->main_renderer_destroy.link); + wl_list_remove(&linux_dmabuf->main_allocator_destroy.link); wl_global_destroy(linux_dmabuf->global); free(linux_dmabuf); @@ -959,6 +966,8 @@ struct wlr_linux_dmabuf_v1 *wlr_linux_dmabuf_v1_create(struct wl_display *displa linux_dmabuf->main_device_fd = -1; wl_list_init(&linux_dmabuf->surfaces); + wl_list_init(&linux_dmabuf->main_renderer_destroy.link); + wl_list_init(&linux_dmabuf->main_allocator_destroy.link); wl_signal_init(&linux_dmabuf->events.destroy); linux_dmabuf->global = wl_global_create(display, &zwp_linux_dmabuf_v1_interface, @@ -1161,3 +1170,42 @@ error: wlr_linux_dmabuf_feedback_v1_finish(feedback); return false; } + +static void linux_dmabuf_unregister_main_blit_device(struct wlr_linux_dmabuf_v1 *linux_dmabuf) { + wl_list_remove(&linux_dmabuf->main_renderer_destroy.link); + wl_list_remove(&linux_dmabuf->main_allocator_destroy.link); + wl_list_init(&linux_dmabuf->main_renderer_destroy.link); + wl_list_init(&linux_dmabuf->main_allocator_destroy.link); + + linux_dmabuf->main_renderer = NULL; + linux_dmabuf->main_allocator = NULL; +} + +static void linux_dmabuf_handle_main_renderer_destroy(struct wl_listener *listener, void *data) { + struct wlr_linux_dmabuf_v1 *linux_dmabuf = wl_container_of( + listener, linux_dmabuf, main_renderer_destroy); + linux_dmabuf_unregister_main_blit_device(linux_dmabuf); +} + +static void linux_dmabuf_handle_main_allocator_destroy(struct wl_listener *listener, void *data) { + struct wlr_linux_dmabuf_v1 *linux_dmabuf = wl_container_of( + listener, linux_dmabuf, main_allocator_destroy); + linux_dmabuf_unregister_main_blit_device(linux_dmabuf); +} + +void wlr_linux_dmabuf_v1_set_main_blit_device(struct wlr_linux_dmabuf_v1 *linux_dmabuf, + struct wlr_renderer *renderer, struct wlr_allocator *allocator) { + assert(renderer != NULL && allocator != NULL); + + wl_list_remove(&linux_dmabuf->main_renderer_destroy.link); + wl_list_remove(&linux_dmabuf->main_allocator_destroy.link); + + linux_dmabuf->main_renderer_destroy.notify = linux_dmabuf_handle_main_renderer_destroy; + wl_signal_add(&renderer->events.destroy, &linux_dmabuf->main_renderer_destroy); + + linux_dmabuf->main_allocator_destroy.notify = linux_dmabuf_handle_main_allocator_destroy; + wl_signal_add(&allocator->events.destroy, &linux_dmabuf->main_allocator_destroy); + + linux_dmabuf->main_renderer = renderer; + linux_dmabuf->main_allocator = allocator; +} diff --git a/types/wlr_raster.c b/types/wlr_raster.c index 6347e3ef1..4cdfe365a 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -6,8 +6,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -368,6 +368,30 @@ struct wlr_texture *wlr_raster_obtain_texture_with_allocator(struct wlr_raster * return texture; } + // if this is a linux_dmabuf_v1 buffer, then we can try to use the + // main device for blitting which should support all the modifiers we + // advertise. + if (raster->buffer) { + struct wlr_dmabuf_v1_buffer *dmabuf_buffer = + wlr_dmabuf_v1_buffer_try_from_buffer(raster->buffer); + if (dmabuf_buffer && dmabuf_buffer->linux_dmabuf_v1->main_renderer) { + struct wlr_linux_dmabuf_v1 *linux_dmabuf = dmabuf_buffer->linux_dmabuf_v1; + struct wlr_texture *texture = wlr_texture_from_buffer( + linux_dmabuf->main_renderer, raster->buffer); + if (texture) { + wlr_raster_attach_with_allocator(raster, texture, + linux_dmabuf->main_allocator); + + // try to create a blit but this time through the primary device + texture = raster_try_texture_from_blit(raster, renderer); + if (texture) { + wlr_raster_attach_with_allocator(raster, texture, allocator); + return texture; + } + } + } + } + // as a last resort we need to do a copy through the CPU texture = raster_try_cpu_copy(raster, renderer); if (texture) { From 4fe6a8e8575646405439fa2e6c3950f51779170d Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sun, 5 May 2024 13:41:50 -0400 Subject: [PATCH 23/29] Use wlr_raster allocator variants --- types/scene/wlr_scene.c | 3 ++- types/wlr_raster.c | 16 +++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/types/scene/wlr_scene.c b/types/scene/wlr_scene.c index 485591d98..bd6fb33b0 100644 --- a/types/scene/wlr_scene.c +++ b/types/scene/wlr_scene.c @@ -2097,7 +2097,8 @@ bool wlr_scene_output_build_state(struct wlr_scene_output *scene_output, } struct wlr_scene_buffer *buffer = wlr_scene_buffer_from_node(entry->node); - wlr_raster_create_texture(buffer->raster, output->renderer); + wlr_raster_obtain_texture_with_allocator(buffer->raster, + output->renderer, output->allocator); } render_data.render_pass = render_pass; diff --git a/types/wlr_raster.c b/types/wlr_raster.c index 4cdfe365a..86303415a 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -152,10 +152,6 @@ static void raster_attach_with_allocator(struct wlr_raster *raster, source->allocator = allocator; } -static void raster_attach(struct wlr_raster *raster, struct wlr_texture *texture) { - raster_attach_with_allocator(raster, texture, NULL); -} - static struct wlr_texture *wlr_raster_get_texture(struct wlr_raster *raster, struct wlr_renderer *renderer) { struct wlr_raster_source *source; @@ -379,13 +375,13 @@ struct wlr_texture *wlr_raster_obtain_texture_with_allocator(struct wlr_raster * struct wlr_texture *texture = wlr_texture_from_buffer( linux_dmabuf->main_renderer, raster->buffer); if (texture) { - wlr_raster_attach_with_allocator(raster, texture, + raster_attach_with_allocator(raster, texture, linux_dmabuf->main_allocator); // try to create a blit but this time through the primary device texture = raster_try_texture_from_blit(raster, renderer); if (texture) { - wlr_raster_attach_with_allocator(raster, texture, allocator); + raster_attach_with_allocator(raster, texture, allocator); return texture; } } @@ -404,7 +400,7 @@ struct wlr_texture *wlr_raster_obtain_texture_with_allocator(struct wlr_raster * struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster, struct wlr_renderer *renderer) { - return wlr_raster_create_texture_with_allocator(raster, renderer, NULL); + return wlr_raster_obtain_texture_with_allocator(raster, renderer, NULL); } struct raster_update_state { @@ -444,9 +440,10 @@ static void raster_update_handle_old_raster_destroy(struct wl_listener *listener struct wlr_raster_source *source, *tmp_source; wl_list_for_each_safe(source, tmp_source, &state->old_raster->sources, link) { struct wlr_texture *texture = source->texture; + struct wlr_allocator *allocator = source->allocator; if (wlr_texture_update_from_buffer(texture, state->buffer, &state->damage)) { raster_detach(state->old_raster, texture); - raster_attach(state->new_raster, texture); + raster_attach_with_allocator(state->new_raster, texture, allocator); } } @@ -530,7 +527,8 @@ static void surface_raster_handle_buffer_prerelease(struct wl_listener *listener struct wlr_surface_output *output; wl_list_for_each(output, &surface_raster->surface->current_outputs, link) { - wlr_raster_obtain_texture(raster, output->output->renderer); + wlr_raster_obtain_texture_with_allocator(raster, + output->output->renderer, output->output->allocator); } // if there was a failed texture upload, keep on locking the buffer From bf3ee9274175cc340d8795f00482b8677f4c330a Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sun, 5 May 2024 13:37:54 -0400 Subject: [PATCH 24/29] wlr_output_manager: Introduce new abstraction --- include/wlr/types/wlr_output_manager.h | 92 ++++++++ types/meson.build | 1 + types/wlr_output_manager.c | 280 +++++++++++++++++++++++++ 3 files changed, 373 insertions(+) create mode 100644 include/wlr/types/wlr_output_manager.h create mode 100644 types/wlr_output_manager.c diff --git a/include/wlr/types/wlr_output_manager.h b/include/wlr/types/wlr_output_manager.h new file mode 100644 index 000000000..fef00a07e --- /dev/null +++ b/include/wlr/types/wlr_output_manager.h @@ -0,0 +1,92 @@ +/* + * This an unstable interface of wlroots. No guarantees are made regarding the + * future consistency of this API. + */ +#ifndef WLR_USE_UNSTABLE +#error "Add -DWLR_USE_UNSTABLE to enable unstable wlroots features" +#endif + +#ifndef WLR_TYPES_OUTPUT_MANAGER_H +#define WLR_TYPES_OUTPUT_MANAGER_H + +#include +#include +#include + +struct wlr_renderer; +struct wlr_allocator; +struct wlr_backend; +struct wlr_output; + +struct wlr_output_manager_backend { + struct wlr_output_manager *manager; + + struct wlr_renderer *renderer; + struct wlr_allocator *allocator; + struct wlr_backend *backend; + + struct wl_list link; // wlr_output_manager.backends + + // private state + + uint32_t locks; + struct wl_listener backend_destroy; +}; + +struct wlr_output_manager { + struct wl_list backends; // wlr_output_manager_backend.link + + struct wlr_output_manager_backend primary; +}; + +/** + * Initializes the output given output manager. wlr_output_manager_finish + * must be called to clean up this manager. + */ +bool wlr_output_manager_init(struct wlr_output_manager *manager, + struct wlr_backend *backend); + +/** + * Finishes this output_manager and cleans up all its resources including any + * output manager backends. + */ +void wlr_output_manager_finish(struct wlr_output_manager *manager); + +/** + * This will return a output_manager backend that will be reference counted. + * wlr_output_manager_unlock_backend is required to be called after the usage + * of this is finished. + */ +struct wlr_output_manager_backend *wlr_output_manager_lock_backend( + struct wlr_output_manager *manager, struct wlr_backend *wlr_backend); + +/** + * wlr_output_manager_unlock_backend will unlock any backend returned by + * wlr_output_manager_lock_rendener. The allocator and backend allocated + * may be destroyed when the reference count reaches 0 + */ +void wlr_output_manager_unlock_backend(struct wlr_output_manager_backend *backend); + +/** + * wlr_output_manager_init_output will automatically initialize the given output. + * This is a helder function that will handle unlocking backends automatically + * upon output destroy + */ +bool wlr_output_manager_init_output(struct wlr_output_manager *manager, + struct wlr_output *output); + +/** + * Initializes shm for the given wl_display given the constraints all devices + * on the manager have + */ +bool wlr_output_manager_init_wl_shm(struct wlr_output_manager *manager, + struct wl_display *wl_display); + +/** + * Initializes the given wl_display given the constraints all devices + * on the manager have + */ +bool wlr_output_manager_init_wl_display(struct wlr_output_manager *manager, + struct wl_display *wl_display); + +#endif diff --git a/types/meson.build b/types/meson.build index 032143db6..580b7811f 100644 --- a/types/meson.build +++ b/types/meson.build @@ -60,6 +60,7 @@ wlr_files += files( 'wlr_output_layer.c', 'wlr_output_layout.c', 'wlr_output_management_v1.c', + 'wlr_output_manager.c', 'wlr_output_power_management_v1.c', 'wlr_output_swapchain_manager.c', 'wlr_pointer_constraints_v1.c', diff --git a/types/wlr_output_manager.c b/types/wlr_output_manager.c new file mode 100644 index 000000000..45ced4620 --- /dev/null +++ b/types/wlr_output_manager.c @@ -0,0 +1,280 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void output_manager_backend_finish( + struct wlr_output_manager_backend *backend) { + wlr_allocator_destroy(backend->allocator); + wlr_renderer_destroy(backend->renderer); + wl_list_remove(&backend->backend_destroy.link); + wl_list_remove(&backend->link); +} + +static void output_manager_handle_backend_destroy( + struct wl_listener *listener, void *data) { + struct wlr_output_manager_backend *backend = + wl_container_of(listener, backend, backend_destroy); + + output_manager_backend_finish(backend); + + if (backend == &backend->manager->primary) { + *backend = (struct wlr_output_manager_backend){0}; + } else { + free(backend); + } +} + +static bool output_manager_backend_init(struct wlr_output_manager *manager, + struct wlr_output_manager_backend *backend, struct wlr_backend *wlr_backend) { + backend->renderer = wlr_renderer_autocreate(wlr_backend); + if (!backend->renderer) { + return false; + } + + backend->allocator = wlr_allocator_autocreate(wlr_backend, + manager->primary.renderer); + if (!backend->allocator) { + wlr_renderer_destroy(manager->primary.renderer); + return false; + } + + backend->manager = manager; + backend->backend = wlr_backend; + backend->locks = 1; + + backend->backend_destroy.notify = output_manager_handle_backend_destroy; + wl_signal_add(&wlr_backend->events.destroy, &backend->backend_destroy); + + wl_list_insert(&manager->backends, &backend->link); + return true; +} + +struct multi_backend_iterator_data { + struct wlr_output_manager *manager; + bool primary; +}; + +static void multi_backend_iterator(struct wlr_backend *wlr_backend, void *_data) { + struct multi_backend_iterator_data *data = _data; + + // Use the first device as the primary + if (data->primary) { + if (!output_manager_backend_init(data->manager, &data->manager->primary, wlr_backend)) { + return; + } + data->primary = false; + return; + } + + struct wlr_output_manager_backend *backend = calloc(1, sizeof(*backend)); + if (!backend) { + return; + } + + if (!output_manager_backend_init(data->manager, backend, wlr_backend)) { + free(backend); + return; + } +} + +bool wlr_output_manager_init(struct wlr_output_manager *manager, + struct wlr_backend *backend) { + *manager = (struct wlr_output_manager){0}; + wl_list_init(&manager->backends); + + struct multi_backend_iterator_data iter_data = { + .manager = manager, + .primary = true, + }; + + if (wlr_backend_is_multi(backend)) { + wlr_multi_for_each_backend(backend, multi_backend_iterator, &iter_data); + } else { + multi_backend_iterator(backend, &iter_data); + } + + return !wl_list_empty(&manager->backends); +} + +void wlr_output_manager_finish(struct wlr_output_manager *manager) { + struct wlr_output_manager_backend *backend; + wl_list_for_each(backend, &manager->backends, link) { + output_manager_backend_finish(backend); + } +} + +struct wlr_output_manager_backend *wlr_output_manager_lock_backend( + struct wlr_output_manager *manager, struct wlr_backend *wlr_backend) { + assert(!wlr_backend_is_multi(wlr_backend)); + + struct wlr_output_manager_backend *backend; + wl_list_for_each(backend, &manager->backends, link) { + if (backend->backend == wlr_backend) { + backend->locks++; + return backend; + } + } + + backend = calloc(1, sizeof(*backend)); + if (!backend) { + return NULL; + } + + if (!output_manager_backend_init(manager, backend, wlr_backend)) { + free(backend); + return NULL; + } + + return backend; +} + +void wlr_output_manager_unlock_backend(struct wlr_output_manager_backend *backend) { + assert(backend->locks > 0); + backend->locks--; + + if (backend->locks != 0) { + return; + } + + output_manager_backend_finish(backend); + free(backend); +} + +struct output_manager_output { + struct wlr_output_manager_backend *backend; + struct wlr_addon addon; +}; + +static void manager_output_handle_output_destroy(struct wlr_addon *addon) { + struct output_manager_output *manager_output = + wl_container_of(addon, manager_output, addon); + wlr_addon_finish(&manager_output->addon); + wlr_output_manager_unlock_backend(manager_output->backend); + free(manager_output); +} + +static const struct wlr_addon_interface output_addon_impl = { + .name = "wlr_output_manager_output", + .destroy = manager_output_handle_output_destroy, +}; + +bool wlr_output_manager_init_output(struct wlr_output_manager *manager, + struct wlr_output *output) { + struct output_manager_output *manager_output = calloc(1, sizeof(*manager_output)); + if (!manager_output) { + return false; + } + + manager_output->backend = wlr_output_manager_lock_backend( + manager, output->backend); + if (!manager_output->backend) { + free(manager_output); + return false; + } + + wlr_addon_init(&manager_output->addon, &output->addons, manager, &output_addon_impl); + + wlr_output_init_render(output, manager_output->backend->allocator, + manager_output->backend->renderer); + + return true; +} + +bool wlr_output_manager_init_wl_shm(struct wlr_output_manager *manager, + struct wl_display *wl_display) { + size_t shm_formats_len = 0; + uint32_t *shm_formats = NULL; + + struct wlr_output_manager_backend *backend; + wl_list_for_each(backend, &manager->backends, link) { + const struct wlr_drm_format_set *format_set = wlr_renderer_get_texture_formats( + backend->renderer, WLR_BUFFER_CAP_DATA_PTR); + if (format_set == NULL || format_set->len == 0) { + wlr_log(WLR_ERROR, "Failed to initialize wl_shm: " + "cannot get renderer formats"); + return NULL; + } + + if (!shm_formats) { + shm_formats = malloc(format_set->len * sizeof(uint32_t)); + if (!shm_formats) { + wlr_log(WLR_INFO, "Cannot allocate a format set"); + return false; + } + + for (size_t i = 0; i < format_set->len; i++) { + shm_formats[i] = format_set->formats[i].format; + } + + shm_formats_len = format_set->len; + continue; + } + + // interset the format lists - null out any formats from the shm_formats + // list when the current renderer doesn't have the format as well. + for (size_t i = 0; i < shm_formats_len; i++) { + if (shm_formats[i] == 0) { + continue; + } + + bool found = false; + for (size_t j = 0; j < format_set->len; j++) { + if (format_set->formats[j].format == shm_formats[i]) { + found = true; + break; + } + } + + if (!found) { + shm_formats[i] = 0; + } + } + } + + // clear out all null formats from the format list + size_t j = 0; + for (size_t i = 0; i < shm_formats_len; i++) { + if (shm_formats[i] != 0) { + shm_formats[j++] = shm_formats[i]; + } + } + shm_formats_len = j; + + bool ok = wlr_shm_create(wl_display, 1, shm_formats, shm_formats_len); + free(shm_formats); + return ok; +} + +bool wlr_output_manager_init_wl_display(struct wlr_output_manager *manager, + struct wl_display *wl_display) { + if (!wlr_output_manager_init_wl_shm(manager, wl_display)) { + return false; + } + + struct wlr_renderer *r = manager->primary.renderer; + if (wlr_renderer_get_texture_formats(r, WLR_BUFFER_CAP_DMABUF) != NULL) { + if (wlr_renderer_get_drm_fd(r) >= 0) { + if (wlr_drm_create(wl_display, r) == NULL) { + return false; + } + } else { + wlr_log(WLR_INFO, "Cannot get renderer DRM FD, disabling wl_drm"); + } + + if (wlr_linux_dmabuf_v1_create_with_renderer(wl_display, 4, r) == NULL) { + return false; + } + } + + return true; +} From 5a72ea9ac1a6035b5a9c53b2aa5754c90ce3b010 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sun, 5 May 2024 13:37:29 -0400 Subject: [PATCH 25/29] tinywl: Use wlr_output_manager --- tinywl/tinywl.c | 42 ++++++++++++++---------------------------- 1 file changed, 14 insertions(+), 28 deletions(-) diff --git a/tinywl/tinywl.c b/tinywl/tinywl.c index 2c844006b..5789513a5 100644 --- a/tinywl/tinywl.c +++ b/tinywl/tinywl.c @@ -8,13 +8,13 @@ #include #include #include -#include #include #include #include #include #include #include +#include #include #include #include @@ -35,8 +35,7 @@ enum tinywl_cursor_mode { struct tinywl_server { struct wl_display *wl_display; struct wlr_backend *backend; - struct wlr_renderer *renderer; - struct wlr_allocator *allocator; + struct wlr_output_manager output_manager; struct wlr_scene *scene; struct wlr_scene_output_layout *scene_layout; @@ -599,9 +598,10 @@ static void server_new_output(struct wl_listener *listener, void *data) { wl_container_of(listener, server, new_output); struct wlr_output *wlr_output = data; - /* Configures the output created by the backend to use our allocator - * and our renderer. Must be done once, before commiting the output */ - wlr_output_init_render(wlr_output, server->allocator, server->renderer); + /* Configures the output created by the backend using the output manager + * to allocate a renderer and a allocator for us. Must be done once, + * before commiting the output */ + wlr_output_manager_init_output(&server->output_manager, wlr_output); /* The output may be disabled, switch it on. */ struct wlr_output_state state; @@ -910,28 +910,16 @@ int main(int argc, char *argv[]) { return 1; } - /* Autocreates a renderer, either Pixman, GLES2 or Vulkan for us. The user - * can also specify a renderer using the WLR_RENDERER env var. - * The renderer is responsible for defining the various pixel formats it - * supports for shared memory, this configures that for clients. */ - server.renderer = wlr_renderer_autocreate(server.backend); - if (server.renderer == NULL) { - wlr_log(WLR_ERROR, "failed to create wlr_renderer"); + /* This is a helper that will automatically create renderers and allocators + * for each output. This serves as the bridge between the output and the + * backend for rendering. + */ + if (!wlr_output_manager_init(&server.output_manager, server.backend)) { + wlr_log(WLR_ERROR, "failed to create wlr_output_manager"); return 1; } - wlr_renderer_init_wl_display(server.renderer, server.wl_display); - - /* Autocreates an allocator for us. - * The allocator is the bridge between the renderer and the backend. It - * handles the buffer creation, allowing wlroots to render onto the - * screen */ - server.allocator = wlr_allocator_autocreate(server.backend, - server.renderer); - if (server.allocator == NULL) { - wlr_log(WLR_ERROR, "failed to create wlr_allocator"); - return 1; - } + wlr_output_manager_init_wl_display(&server.output_manager, server.wl_display); /* This creates some hands-off wlroots interfaces. The compositor is * necessary for clients to allocate surfaces, the subcompositor allows to @@ -1063,10 +1051,8 @@ int main(int argc, char *argv[]) { wl_display_destroy_clients(server.wl_display); wlr_scene_node_destroy(&server.scene->tree.node); wlr_xcursor_manager_destroy(server.cursor_mgr); - wlr_cursor_destroy(server.cursor); - wlr_allocator_destroy(server.allocator); - wlr_renderer_destroy(server.renderer); wlr_backend_destroy(server.backend); + wlr_output_manager_finish(&server.output_manager); wl_display_destroy(server.wl_display); return 0; } From 941c8261e3fcbbfdf85013591d896e4162eafcf8 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Tue, 27 Feb 2024 19:11:33 -0500 Subject: [PATCH 26/29] wlr_output_manager: Automatically recreate the context on loss --- include/wlr/types/wlr_output_manager.h | 5 +++ types/wlr_output_manager.c | 62 ++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/include/wlr/types/wlr_output_manager.h b/include/wlr/types/wlr_output_manager.h index fef00a07e..6a8d26b4f 100644 --- a/include/wlr/types/wlr_output_manager.h +++ b/include/wlr/types/wlr_output_manager.h @@ -27,10 +27,15 @@ struct wlr_output_manager_backend { struct wl_list link; // wlr_output_manager.backends + struct { + struct wl_signal recovery; + } events; + // private state uint32_t locks; struct wl_listener backend_destroy; + struct wl_listener renderer_lost; }; struct wlr_output_manager { diff --git a/types/wlr_output_manager.c b/types/wlr_output_manager.c index 45ced4620..34942291d 100644 --- a/types/wlr_output_manager.c +++ b/types/wlr_output_manager.c @@ -17,6 +17,7 @@ static void output_manager_backend_finish( wlr_allocator_destroy(backend->allocator); wlr_renderer_destroy(backend->renderer); wl_list_remove(&backend->backend_destroy.link); + wl_list_remove(&backend->renderer_lost.link); wl_list_remove(&backend->link); } @@ -34,6 +35,44 @@ static void output_manager_handle_backend_destroy( } } +static void output_manager_handle_renderer_lost( + struct wl_listener *listener, void *data) { + struct wlr_output_manager_backend *backend = + wl_container_of(listener, backend, renderer_lost); + + wlr_log(WLR_INFO, "Attempting renderer recovery after GPU reset!"); + + struct wlr_renderer *renderer = wlr_renderer_autocreate(backend->backend); + if (!renderer) { + wlr_log(WLR_ERROR, "Could not create a new renderer after GPU reset"); + return; + } + + struct wlr_allocator *allocator = + wlr_allocator_autocreate(backend->backend, renderer); + if (!allocator) { + wlr_log(WLR_ERROR, "Could not create a new allocator after GPU reset"); + wlr_renderer_destroy(renderer); + return; + } + + wlr_log(WLR_INFO, "Created new renderer and allocator after reset. Attempting to swap..."); + + struct wlr_renderer *old_renderer = backend->renderer; + struct wlr_allocator *old_allocator = backend->allocator; + backend->renderer = renderer; + backend->allocator = allocator; + + wl_signal_add(&backend->renderer->events.lost, &backend->renderer_lost); + + // Only destroy the old state once we signal a recovery to avoid the old + // state being referenced during its destruction. + wlr_allocator_destroy(old_allocator); + wlr_renderer_destroy(old_renderer); + + wl_signal_emit_mutable(&backend->events.recovery, NULL); +} + static bool output_manager_backend_init(struct wlr_output_manager *manager, struct wlr_output_manager_backend *backend, struct wlr_backend *wlr_backend) { backend->renderer = wlr_renderer_autocreate(wlr_backend); @@ -52,9 +91,14 @@ static bool output_manager_backend_init(struct wlr_output_manager *manager, backend->backend = wlr_backend; backend->locks = 1; + wl_signal_init(&backend->events.recovery); + backend->backend_destroy.notify = output_manager_handle_backend_destroy; wl_signal_add(&wlr_backend->events.destroy, &backend->backend_destroy); + backend->renderer_lost.notify = output_manager_handle_renderer_lost; + wl_signal_add(&backend->renderer->events.lost, &backend->renderer_lost); + wl_list_insert(&manager->backends, &backend->link); return true; } @@ -152,7 +196,11 @@ void wlr_output_manager_unlock_backend(struct wlr_output_manager_backend *backen struct output_manager_output { struct wlr_output_manager_backend *backend; + struct wlr_output *output; struct wlr_addon addon; + + // recover from GPU resets + struct wl_listener backend_recovery; }; static void manager_output_handle_output_destroy(struct wlr_addon *addon) { @@ -160,6 +208,7 @@ static void manager_output_handle_output_destroy(struct wlr_addon *addon) { wl_container_of(addon, manager_output, addon); wlr_addon_finish(&manager_output->addon); wlr_output_manager_unlock_backend(manager_output->backend); + wl_list_remove(&manager_output->backend_recovery.link); free(manager_output); } @@ -168,6 +217,14 @@ static const struct wlr_addon_interface output_addon_impl = { .destroy = manager_output_handle_output_destroy, }; +static void output_handle_recovery(struct wl_listener *listener, void *data) { + struct output_manager_output *manager = wl_container_of(listener, manager, backend_recovery); + + // we lost the context, create a new renderer and switch everything out. + wlr_output_init_render(manager->output, manager->backend->allocator, + manager->backend->renderer); +} + bool wlr_output_manager_init_output(struct wlr_output_manager *manager, struct wlr_output *output) { struct output_manager_output *manager_output = calloc(1, sizeof(*manager_output)); @@ -175,6 +232,8 @@ bool wlr_output_manager_init_output(struct wlr_output_manager *manager, return false; } + manager_output->output = output; + manager_output->backend = wlr_output_manager_lock_backend( manager, output->backend); if (!manager_output->backend) { @@ -184,6 +243,9 @@ bool wlr_output_manager_init_output(struct wlr_output_manager *manager, wlr_addon_init(&manager_output->addon, &output->addons, manager, &output_addon_impl); + manager_output->backend_recovery.notify = output_handle_recovery; + wl_signal_add(&manager_output->backend->events.recovery, &manager_output->backend_recovery); + wlr_output_init_render(output, manager_output->backend->allocator, manager_output->backend->renderer); From 204a2b733bb4c7bcb6227fef173bbe0e3b80d5f4 Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Fri, 28 Apr 2023 23:27:08 +0200 Subject: [PATCH 27/29] surface_invalidation_v1: New protocol implementation --- .../wlr/types/wlr_surface_invalidation_v1.h | 32 +++ protocol/meson.build | 1 + types/meson.build | 1 + types/wlr_surface_invalidation_v1.c | 204 ++++++++++++++++++ 4 files changed, 238 insertions(+) create mode 100644 include/wlr/types/wlr_surface_invalidation_v1.h create mode 100644 types/wlr_surface_invalidation_v1.c diff --git a/include/wlr/types/wlr_surface_invalidation_v1.h b/include/wlr/types/wlr_surface_invalidation_v1.h new file mode 100644 index 000000000..a0d11dcdd --- /dev/null +++ b/include/wlr/types/wlr_surface_invalidation_v1.h @@ -0,0 +1,32 @@ +/* + * This an unstable interface of wlroots. No guarantees are made regarding the + * future consistency of this API. + */ +#ifndef WLR_USE_UNSTABLE +#error "Add -DWLR_USE_UNSTABLE to enable unstable wlroots features" +#endif + +#ifndef WLR_TYPES_WLR_SURFACE_INVALIDATION_V1_H +#define WLR_TYPES_WLR_SURFACE_INVALIDATION_V1_H + +#include + +struct wlr_surface; + +struct wlr_surface_invalidation_manager_v1 { + struct wl_global *global; + + struct { + struct wl_signal destroy; + } events; + + struct wl_listener display_destroy; +}; + +struct wlr_surface_invalidation_manager_v1 *wlr_surface_invalidation_manager_v1_create( + struct wl_display *display, uint32_t version); + +void wlr_surface_invalidation_manager_v1_send_surface_invalidation( + struct wlr_surface *surface); + +#endif diff --git a/protocol/meson.build b/protocol/meson.build index a4476918b..788b5273f 100644 --- a/protocol/meson.build +++ b/protocol/meson.build @@ -31,6 +31,7 @@ protocols = { 'linux-drm-syncobj-v1': wl_protocol_dir / 'staging/linux-drm-syncobj/linux-drm-syncobj-v1.xml', 'security-context-v1': wl_protocol_dir / 'staging/security-context/security-context-v1.xml', 'single-pixel-buffer-v1': wl_protocol_dir / 'staging/single-pixel-buffer/single-pixel-buffer-v1.xml', + 'surface-invalidation-v1': wl_protocol_dir / 'staging/surface-invalidation/surface-invalidation-v1.xml', 'xdg-activation-v1': wl_protocol_dir / 'staging/xdg-activation/xdg-activation-v1.xml', 'xwayland-shell-v1': wl_protocol_dir / 'staging/xwayland-shell/xwayland-shell-v1.xml', 'tearing-control-v1': wl_protocol_dir / 'staging/tearing-control/tearing-control-v1.xml', diff --git a/types/meson.build b/types/meson.build index 580b7811f..3e9aa2e3e 100644 --- a/types/meson.build +++ b/types/meson.build @@ -79,6 +79,7 @@ wlr_files += files( 'wlr_shm.c', 'wlr_single_pixel_buffer_v1.c', 'wlr_subcompositor.c', + 'wlr_surface_invalidation_v1.c', 'wlr_fractional_scale_v1.c', 'wlr_switch.c', 'wlr_tablet_pad.c', diff --git a/types/wlr_surface_invalidation_v1.c b/types/wlr_surface_invalidation_v1.c new file mode 100644 index 000000000..35fc45b2c --- /dev/null +++ b/types/wlr_surface_invalidation_v1.c @@ -0,0 +1,204 @@ +#include +#include +#include +#include +#include +#include "surface-invalidation-v1-protocol.h" + +#define SURFACE_INVALIDATION_MANAGER_VERSION 1 + +struct wlr_surface_invalidation_v1_configure { + struct wl_list link; // struct wlr_surface_invalidation_v1.configures + uint32_t serial; + bool configured; +}; + +struct wlr_surface_invalidation_v1 { + struct wl_resource *resource; + struct wl_list configures; // struct wlr_surface_invalidation_v1_configure.link + struct wlr_addon addon; +}; + +static void wlr_surface_invalidation_v1_configure_destroy( + struct wlr_surface_invalidation_v1_configure *configure) { + wl_list_remove(&configure->link); + free(configure); +} + +static const struct wp_surface_invalidation_v1_interface surface_inval_impl; + +static struct wlr_surface_invalidation_v1 *surface_invalidation_v1_from_resource( + struct wl_resource *resource) { + assert(wl_resource_instance_of(resource, &wp_surface_invalidation_v1_interface, + &surface_inval_impl)); + return wl_resource_get_user_data(resource); +} + +static void surface_handle_resource_destroy(struct wl_resource *resource) { + struct wlr_surface_invalidation_v1 *surface = + surface_invalidation_v1_from_resource(resource); + surface->resource = NULL; + wlr_addon_finish(&surface->addon); + + struct wlr_surface_invalidation_v1_configure *configure, *tmp_configure; + wl_list_for_each_safe(configure, tmp_configure, &surface->configures, link) { + wlr_surface_invalidation_v1_configure_destroy(configure); + } + + free(surface); +} + +static void surface_inval_handle_ack(struct wl_client *client, + struct wl_resource *resource, uint32_t serial) { + struct wlr_surface_invalidation_v1 *surface = + surface_invalidation_v1_from_resource(resource); + + // First find the ack'ed configure + bool found = false; + struct wlr_surface_invalidation_v1_configure *configure, *tmp_configure; + wl_list_for_each(configure, &surface->configures, link) { + if (configure->serial == serial) { + found = true; + break; + } + } + if (!found) { + /* + TODO: What do we do here? + wl_resource_post_error(resource, + ZWLR_LAYER_SURFACE_V1_ERROR_INVALID_SURFACE_STATE, + "wrong configure serial: %" PRIu32, serial); + */ + return; + } + + configure->configured = true; + + // Then remove old configures from the list + wl_list_for_each_safe(configure, tmp_configure, &surface->configures, link) { + if (configure->serial == serial) { + break; + } + wlr_surface_invalidation_v1_configure_destroy(configure); + } +} + +static void destroy_resource(struct wl_client *client, + struct wl_resource *resource) { + wl_resource_destroy(resource); +} + +static const struct wp_surface_invalidation_v1_interface surface_inval_impl = { + .destroy = destroy_resource, + .ack = surface_inval_handle_ack, +}; + +static void surface_addon_handle_destroy(struct wlr_addon *addon) { + struct wlr_surface_invalidation_v1 *surface = wl_container_of(addon, surface, addon); + wl_resource_destroy(surface->resource); +} + +static const struct wlr_addon_interface surface_addon_impl = { + .name = "surface_invalidation_v1", + .destroy = surface_addon_handle_destroy, +}; + +static const struct wp_surface_invalidation_manager_v1_interface manager_impl; + +static void manager_handle_get_surface_invalidation(struct wl_client *client, + struct wl_resource *resource, uint32_t id, struct wl_resource *surface_resource) { + struct wlr_surface *wlr_surface = wlr_surface_from_resource(surface_resource); + + struct wlr_surface_invalidation_v1 *surface = calloc(1, sizeof(*surface)); + if (!surface) { + wl_client_post_no_memory(client); + return; + } + + surface->resource = wl_resource_create(client, + &wp_surface_invalidation_v1_interface, 1, id); + if (!surface->resource) { + wl_client_post_no_memory(client); + free(surface); + return; + } + + wl_list_init(&surface->configures); + wlr_addon_init(&surface->addon, &wlr_surface->addons, NULL, &surface_addon_impl); + + wl_resource_set_implementation(surface->resource, + &surface_inval_impl, surface, surface_handle_resource_destroy); +} + +static const struct wp_surface_invalidation_manager_v1_interface manager_impl = { + .destroy = destroy_resource, + .get_surface_invalidation = manager_handle_get_surface_invalidation, +}; + +static void manager_bind(struct wl_client *client, void *data, + uint32_t version, uint32_t id) { + struct wlr_surface_invalidation_manager_v1 *manager = data; + struct wl_resource *resource = wl_resource_create(client, + &wp_surface_invalidation_manager_v1_interface, version, id); + if (!resource) { + wl_client_post_no_memory(client); + return; + } + wl_resource_set_implementation(resource, &manager_impl, manager, NULL); +} + +static void handle_display_destroy(struct wl_listener *listener, void *data) { + struct wlr_surface_invalidation_manager_v1 *manager = + wl_container_of(listener, manager, display_destroy); + wl_signal_emit_mutable(&manager->events.destroy, NULL); + wl_global_destroy(manager->global); + free(manager); +} + +struct wlr_surface_invalidation_manager_v1 *wlr_surface_invalidation_manager_v1_create( + struct wl_display *display, uint32_t version) { + assert(version <= SURFACE_INVALIDATION_MANAGER_VERSION); + + struct wlr_surface_invalidation_manager_v1 *manager = calloc(1, sizeof(*manager)); + if (!manager) { + return NULL; + } + + manager->global = wl_global_create(display, &wp_surface_invalidation_manager_v1_interface, + version, manager, manager_bind); + if (!manager->global) { + free(manager); + return NULL; + } + + manager->display_destroy.notify = handle_display_destroy; + wl_display_add_destroy_listener(display, &manager->display_destroy); + + wl_signal_init(&manager->events.destroy); + + return manager; +} + +void wlr_surface_invalidation_manager_v1_send_surface_invalidation( + struct wlr_surface *wlr_surface) { + struct wlr_addon *addon = wlr_addon_find( + &wlr_surface->addons, NULL, &surface_addon_impl); + if (!addon) { + return; + } + + struct wlr_surface_invalidation_v1 *surface = + wl_container_of(addon, surface, addon); + struct wl_display *display = + wl_client_get_display(wl_resource_get_client(surface->resource)); + + struct wlr_surface_invalidation_v1_configure *configure = calloc(1, sizeof(*configure)); + if (!configure) { + wl_client_post_no_memory(wl_resource_get_client(surface->resource)); + return; + } + + configure->serial = wl_display_next_serial(display); + wl_list_insert(&surface->configures, &configure->link); + wp_surface_invalidation_v1_send_invalidated(surface->resource, configure->serial); +} From dbf10ebdf1e785ea63c9b1c346fccb3d55ab97ac Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 24 Feb 2024 08:35:56 -0500 Subject: [PATCH 28/29] wlr_raster: Introduce invalidated signal If this raster becomes invalid meaning that the buffer neither has a buffer or textures. The invalidated signal will be called to signify to its owner that it should be recreated. --- include/wlr/types/wlr_raster.h | 2 ++ types/wlr_raster.c | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/include/wlr/types/wlr_raster.h b/include/wlr/types/wlr_raster.h index 2857dbe3f..c37d49c0b 100644 --- a/include/wlr/types/wlr_raster.h +++ b/include/wlr/types/wlr_raster.h @@ -23,6 +23,7 @@ struct wlr_allocator; struct wlr_raster_source { struct wlr_texture *texture; struct wlr_allocator *allocator; // may be NULL + struct wlr_raster *raster; struct wl_list link; struct wl_listener renderer_destroy; @@ -43,6 +44,7 @@ struct wlr_raster { struct { struct wl_signal destroy; + struct wl_signal invalidated; } events; // private state diff --git a/types/wlr_raster.c b/types/wlr_raster.c index 86303415a..1cf540976 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -23,6 +23,10 @@ static void raster_handle_buffer_release(struct wl_listener *listener, void *dat raster->buffer = NULL; wl_list_remove(&raster->buffer_release.link); wl_list_init(&raster->buffer_release.link); + + if (wl_list_empty(&raster->sources)) { + wl_signal_emit_mutable(&raster->events.invalidated, NULL); + } } struct wlr_raster *wlr_raster_create(struct wlr_buffer *buffer, @@ -34,6 +38,7 @@ struct wlr_raster *wlr_raster_create(struct wlr_buffer *buffer, wl_list_init(&raster->sources); wl_signal_init(&raster->events.destroy); + wl_signal_init(&raster->events.invalidated); assert(buffer); raster->opaque = buffer_is_opaque(buffer); @@ -58,6 +63,11 @@ static void raster_source_destroy(struct wlr_raster_source *source) { wl_list_remove(&source->link); wl_list_remove(&source->renderer_destroy.link); wl_list_remove(&source->allocator_destroy.link); + + if (!source->raster->buffer && wl_list_empty(&source->raster->sources)) { + wl_signal_emit_mutable(&source->raster->events.invalidated, NULL); + } + free(source); } @@ -68,6 +78,9 @@ static void raster_consider_destroy(struct wlr_raster *raster) { wl_signal_emit_mutable(&raster->events.destroy, NULL); + // we don't want to call invalidation signals as we're destroying the raster + wl_signal_init(&raster->events.invalidated); + struct wlr_raster_source *source, *source_tmp; wl_list_for_each_safe(source, source_tmp, &raster->sources, link) { wlr_texture_destroy(source->texture); @@ -150,6 +163,7 @@ static void raster_attach_with_allocator(struct wlr_raster *raster, wl_list_insert(&raster->sources, &source->link); source->texture = texture; source->allocator = allocator; + source->raster = raster; } static struct wlr_texture *wlr_raster_get_texture(struct wlr_raster *raster, From 9b2b9d13678ef6d5d47456a6fb11a4eba13a03bd Mon Sep 17 00:00:00 2001 From: Alexander Orzechowski Date: Sat, 24 Feb 2024 08:22:50 -0500 Subject: [PATCH 29/29] wlr_raster_from_surface: Implement surface invalidation --- types/wlr_raster.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/types/wlr_raster.c b/types/wlr_raster.c index 1cf540976..bbd495466 100644 --- a/types/wlr_raster.c +++ b/types/wlr_raster.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -507,6 +508,7 @@ struct surface_raster { struct wlr_addon addon; struct wl_listener buffer_prerelease; + struct wl_listener raster_invalidated; bool locking_buffer; }; @@ -525,6 +527,7 @@ static void surface_raster_destroy(struct surface_raster *surface_raster) { surface_raster_drop_raster(surface_raster); wl_list_remove(&surface_raster->buffer_prerelease.link); + wl_list_remove(&surface_raster->raster_invalidated.link); wlr_addon_finish(&surface_raster->addon); free(surface_raster); } @@ -555,6 +558,14 @@ static void surface_raster_handle_buffer_prerelease(struct wl_listener *listener wl_list_init(&surface_raster->buffer_prerelease.link); } +static void surface_raster_handle_raster_invalidated(struct wl_listener *listener, void *data) { + struct surface_raster *surface_raster = + wl_container_of(listener, surface_raster, raster_invalidated); + + wlr_surface_invalidation_manager_v1_send_surface_invalidation( + surface_raster->surface); +} + const struct wlr_addon_interface surface_raster_addon_impl = { .name = "wlr_raster_surface", .destroy = surface_raster_handle_addon_destroy, @@ -636,6 +647,8 @@ struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface) { surface_raster->buffer_prerelease.notify = surface_raster_handle_buffer_prerelease; wl_list_init(&surface_raster->buffer_prerelease.link); + surface_raster->raster_invalidated.notify = surface_raster_handle_raster_invalidated; + wl_list_init(&surface_raster->raster_invalidated.link); } if (!surface->current.buffer) { @@ -652,6 +665,9 @@ struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface) { wl_list_remove(&surface_raster->buffer_prerelease.link); wl_list_init(&surface_raster->buffer_prerelease.link); + wl_list_remove(&surface_raster->raster_invalidated.link); + wl_list_init(&surface_raster->raster_invalidated.link); + surface_raster_drop_raster(surface_raster); return NULL; @@ -694,6 +710,9 @@ struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface) { return NULL; } + wl_list_remove(&surface_raster->raster_invalidated.link); + wl_signal_add(&raster->events.invalidated, &surface_raster->raster_invalidated); + surface_raster_drop_raster(surface_raster); surface_raster->raster = wlr_raster_lock(raster);