Merge branch 'gpu-reset-recover' into 'master'

Automatic GPU reset recovery

See merge request wlroots/wlroots!3910
This commit is contained in:
Alexander Orzechowski 2024-10-09 09:31:41 +00:00
commit 863ed02d54
29 changed files with 1822 additions and 541 deletions

View file

@ -19,6 +19,7 @@ void wlr_buffer_init(struct wlr_buffer *buffer,
};
wl_signal_init(&buffer->events.destroy);
wl_signal_init(&buffer->events.release);
wl_signal_init(&buffer->events.prerelease);
wlr_addon_set_init(&buffer->addons);
}
@ -58,6 +59,10 @@ void wlr_buffer_unlock(struct wlr_buffer *buffer) {
assert(buffer->n_locks > 0);
buffer->n_locks--;
if (buffer->n_locks == 0) {
wl_signal_emit_mutable(&buffer->events.prerelease, NULL);
}
if (buffer->n_locks == 0) {
wl_signal_emit_mutable(&buffer->events.release, NULL);
}

View file

@ -60,6 +60,7 @@ wlr_files += files(
'wlr_output_layer.c',
'wlr_output_layout.c',
'wlr_output_management_v1.c',
'wlr_output_manager.c',
'wlr_output_power_management_v1.c',
'wlr_output_swapchain_manager.c',
'wlr_pointer_constraints_v1.c',
@ -68,6 +69,7 @@ wlr_files += files(
'wlr_presentation_time.c',
'wlr_primary_selection_v1.c',
'wlr_primary_selection.c',
'wlr_raster.c',
'wlr_region.c',
'wlr_relative_pointer_v1.c',
'wlr_screencopy_v1.c',
@ -77,6 +79,7 @@ wlr_files += files(
'wlr_shm.c',
'wlr_single_pixel_buffer_v1.c',
'wlr_subcompositor.c',
'wlr_surface_invalidation_v1.c',
'wlr_fractional_scale_v1.c',
'wlr_switch.c',
'wlr_tablet_pad.c',

View file

@ -6,6 +6,7 @@
#include <wlr/types/wlr_fractional_scale_v1.h>
#include <wlr/types/wlr_linux_drm_syncobj_v1.h>
#include <wlr/types/wlr_presentation_time.h>
#include <wlr/types/wlr_raster.h>
#include <wlr/util/transform.h>
#include "types/wlr_scene.h"
@ -76,28 +77,6 @@ static void scene_surface_handle_surface_destroy(
wlr_scene_node_destroy(&surface->buffer->node);
}
// This is used for wlr_scene where it unconditionally locks buffers preventing
// reuse of the existing texture for shm clients. With the usage pattern of
// wlr_scene surface handling, we can mark its locked buffer as safe
// for mutation.
static void client_buffer_mark_next_can_damage(struct wlr_client_buffer *buffer) {
buffer->n_ignore_locks++;
}
static void scene_buffer_unmark_client_buffer(struct wlr_scene_buffer *scene_buffer) {
if (!scene_buffer->buffer) {
return;
}
struct wlr_client_buffer *buffer = wlr_client_buffer_get(scene_buffer->buffer);
if (!buffer) {
return;
}
assert(buffer->n_ignore_locks > 0);
buffer->n_ignore_locks--;
}
static int min(int a, int b) {
return a < b ? a : b;
}
@ -160,29 +139,13 @@ static void surface_reconfigure(struct wlr_scene_surface *scene_surface) {
wlr_scene_buffer_set_transform(scene_buffer, state->transform);
wlr_scene_buffer_set_opacity(scene_buffer, opacity);
scene_buffer_unmark_client_buffer(scene_buffer);
if (surface->buffer) {
client_buffer_mark_next_can_damage(surface->buffer);
struct wlr_raster *raster = wlr_raster_from_surface(surface);
if (raster) {
wlr_scene_buffer_set_raster_with_damage(scene_buffer,
raster, &surface->buffer_damage);
struct wlr_linux_drm_syncobj_surface_v1_state *syncobj_surface_state =
wlr_linux_drm_syncobj_v1_get_surface_state(surface);
struct wlr_drm_syncobj_timeline *wait_timeline = NULL;
uint64_t wait_point = 0;
if (syncobj_surface_state != NULL) {
wait_timeline = syncobj_surface_state->acquire_timeline;
wait_point = syncobj_surface_state->acquire_point;
}
struct wlr_scene_buffer_set_buffer_options options = {
.damage = &surface->buffer_damage,
.wait_timeline = wait_timeline,
.wait_point = wait_point,
};
wlr_scene_buffer_set_buffer_with_options(scene_buffer,
&surface->buffer->base, &options);
if (syncobj_surface_state != NULL &&
(surface->current.committed & WLR_SURFACE_STATE_BUFFER)) {
wlr_linux_drm_syncobj_v1_state_signal_release_with_buffer(syncobj_surface_state,
@ -192,6 +155,7 @@ static void surface_reconfigure(struct wlr_scene_surface *scene_surface) {
wlr_scene_buffer_set_buffer(scene_buffer, NULL);
}
wlr_raster_unlock(raster);
pixman_region32_fini(&opaque);
}
@ -231,8 +195,6 @@ static bool scene_buffer_point_accepts_input(struct wlr_scene_buffer *scene_buff
static void surface_addon_destroy(struct wlr_addon *addon) {
struct wlr_scene_surface *surface = wl_container_of(addon, surface, addon);
scene_buffer_unmark_client_buffer(surface->buffer);
wlr_addon_finish(&surface->addon);
wl_list_remove(&surface->outputs_update.link);

View file

@ -10,6 +10,7 @@
#include <wlr/types/wlr_gamma_control_v1.h>
#include <wlr/types/wlr_linux_dmabuf_v1.h>
#include <wlr/types/wlr_presentation_time.h>
#include <wlr/types/wlr_raster.h>
#include <wlr/types/wlr_scene.h>
#include <wlr/util/log.h>
#include <wlr/util/region.h>
@ -91,8 +92,6 @@ struct highlight_region {
static void scene_buffer_set_buffer(struct wlr_scene_buffer *scene_buffer,
struct wlr_buffer *buffer);
static void scene_buffer_set_texture(struct wlr_scene_buffer *scene_buffer,
struct wlr_texture *texture);
void wlr_scene_node_destroy(struct wlr_scene_node *node) {
if (node == NULL) {
@ -123,9 +122,8 @@ void wlr_scene_node_destroy(struct wlr_scene_node *node) {
}
scene_buffer_set_buffer(scene_buffer, NULL);
scene_buffer_set_texture(scene_buffer, NULL);
wlr_raster_unlock(scene_buffer->raster);
pixman_region32_fini(&scene_buffer->opaque_region);
wlr_drm_syncobj_timeline_unref(scene_buffer->wait_timeline);
} else if (node->type == WLR_SCENE_NODE_TREE) {
struct wlr_scene_tree *scene_tree = wlr_scene_tree_from_node(node);
@ -259,7 +257,7 @@ static void scene_node_opaque_region(struct wlr_scene_node *node, int x, int y,
} else if (node->type == WLR_SCENE_NODE_BUFFER) {
struct wlr_scene_buffer *scene_buffer = wlr_scene_buffer_from_node(node);
if (!scene_buffer->buffer) {
if (!scene_buffer->raster) {
return;
}
@ -267,7 +265,7 @@ static void scene_node_opaque_region(struct wlr_scene_node *node, int x, int y,
return;
}
if (!scene_buffer->buffer_is_opaque) {
if (!scene_buffer->raster->opaque) {
pixman_region32_copy(opaque, &scene_buffer->opaque_region);
pixman_region32_intersect_rect(opaque, opaque, 0, 0, width, height);
pixman_region32_translate(opaque, x, y);
@ -753,57 +751,17 @@ static void scene_buffer_set_buffer(struct wlr_scene_buffer *scene_buffer,
if (scene_buffer->own_buffer) {
wlr_buffer_unlock(scene_buffer->buffer);
}
scene_buffer->buffer = NULL;
scene_buffer->buffer = buffer;
scene_buffer->own_buffer = false;
scene_buffer->buffer_width = scene_buffer->buffer_height = 0;
scene_buffer->buffer_is_opaque = false;
if (!buffer) {
return;
}
scene_buffer->own_buffer = true;
scene_buffer->buffer = wlr_buffer_lock(buffer);
scene_buffer->buffer_width = buffer->width;
scene_buffer->buffer_height = buffer->height;
scene_buffer->buffer_is_opaque = buffer_is_opaque(buffer);
scene_buffer->buffer_release.notify = scene_buffer_handle_buffer_release;
wl_signal_add(&buffer->events.release, &scene_buffer->buffer_release);
}
static void scene_buffer_handle_renderer_destroy(struct wl_listener *listener,
void *data) {
struct wlr_scene_buffer *scene_buffer = wl_container_of(listener, scene_buffer, renderer_destroy);
scene_buffer_set_texture(scene_buffer, NULL);
}
static void scene_buffer_set_texture(struct wlr_scene_buffer *scene_buffer,
struct wlr_texture *texture) {
wl_list_remove(&scene_buffer->renderer_destroy.link);
wlr_texture_destroy(scene_buffer->texture);
scene_buffer->texture = texture;
if (texture != NULL) {
scene_buffer->renderer_destroy.notify = scene_buffer_handle_renderer_destroy;
wl_signal_add(&texture->renderer->events.destroy, &scene_buffer->renderer_destroy);
} else {
wl_list_init(&scene_buffer->renderer_destroy.link);
}
}
static void scene_buffer_set_wait_timeline(struct wlr_scene_buffer *scene_buffer,
struct wlr_drm_syncobj_timeline *timeline, uint64_t point) {
wlr_drm_syncobj_timeline_unref(scene_buffer->wait_timeline);
if (timeline != NULL) {
scene_buffer->wait_timeline = wlr_drm_syncobj_timeline_ref(timeline);
scene_buffer->wait_point = point;
} else {
scene_buffer->wait_timeline = NULL;
scene_buffer->wait_point = 0;
}
}
struct wlr_scene_buffer *wlr_scene_buffer_create(struct wlr_scene_tree *parent,
struct wlr_buffer *buffer) {
struct wlr_scene_buffer *scene_buffer = calloc(1, sizeof(*scene_buffer));
@ -822,8 +780,14 @@ struct wlr_scene_buffer *wlr_scene_buffer_create(struct wlr_scene_tree *parent,
wl_list_init(&scene_buffer->buffer_release.link);
wl_list_init(&scene_buffer->renderer_destroy.link);
scene_buffer->opacity = 1;
scene_buffer_set_buffer(scene_buffer, buffer);
if (buffer) {
scene_buffer->raster = wlr_raster_create(buffer, NULL);
wlr_buffer_lock(buffer);
scene_buffer->own_buffer = true;
}
scene_node_update(&scene_buffer->node, NULL);
return scene_buffer;
@ -836,32 +800,47 @@ void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buf
options = &default_options;
}
// specifying a region for a NULL buffer doesn't make sense. We need to know
// about the buffer to scale the buffer local coordinates down to scene
struct wlr_raster *raster = NULL;
if (buffer) {
raster = wlr_raster_create(buffer, &(struct wlr_raster_create_options) {
.wait_timeline = options->wait_timeline,
.wait_point = options->wait_point,
});
}
wlr_scene_buffer_set_raster_with_damage(scene_buffer, raster, options->damage);
if (raster) {
wlr_buffer_lock(buffer);
scene_buffer->own_buffer = true;
}
wlr_raster_unlock(raster);
}
void wlr_scene_buffer_set_raster_with_damage(struct wlr_scene_buffer *scene_buffer,
struct wlr_raster *raster, const pixman_region32_t *damage) {
// specifying a region for a NULL raster doesn't make sense. We need to know
// about the raster to scale the raster local coordinates down to scene
// coordinates.
assert(buffer || !options->damage);
assert(raster || !damage);
bool mapped = buffer != NULL;
bool prev_mapped = scene_buffer->buffer != NULL || scene_buffer->texture != NULL;
if (!mapped && !prev_mapped) {
// unmapping already unmapped buffer - noop
if (raster == scene_buffer->raster) {
return;
}
// if this node used to not be mapped or its previous displayed
// buffer region will be different from what the new buffer would
// produce we need to update the node.
bool update = mapped != prev_mapped;
if (buffer != NULL && scene_buffer->dst_width == 0 && scene_buffer->dst_height == 0) {
update = update || scene_buffer->buffer_width != buffer->width ||
scene_buffer->buffer_height != buffer->height;
bool update = !raster != !scene_buffer->raster;
if (raster != NULL && scene_buffer->dst_width == 0 && scene_buffer->dst_height == 0) {
update = update || scene_buffer->raster->width != raster->width ||
scene_buffer->raster->height != raster->height;
}
scene_buffer_set_buffer(scene_buffer, buffer);
scene_buffer_set_texture(scene_buffer, NULL);
scene_buffer_set_wait_timeline(scene_buffer,
options->wait_timeline, options->wait_point);
wlr_raster_unlock(scene_buffer->raster);
scene_buffer_set_buffer(scene_buffer, raster ? raster->buffer : NULL);
scene_buffer->raster = raster ? wlr_raster_lock(raster) : NULL;
if (update) {
scene_node_update(&scene_buffer->node, NULL);
@ -876,8 +855,7 @@ void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buf
}
pixman_region32_t fallback_damage;
pixman_region32_init_rect(&fallback_damage, 0, 0, buffer->width, buffer->height);
const pixman_region32_t *damage = options->damage;
pixman_region32_init_rect(&fallback_damage, 0, 0, raster->width, raster->height);
if (!damage) {
damage = &fallback_damage;
}
@ -886,26 +864,26 @@ void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buf
if (wlr_fbox_empty(&box)) {
box.x = 0;
box.y = 0;
box.width = buffer->width;
box.height = buffer->height;
box.width = raster->width;
box.height = raster->height;
}
wlr_fbox_transform(&box, &box, scene_buffer->transform,
buffer->width, buffer->height);
raster->width, raster->height);
float scale_x, scale_y;
if (scene_buffer->dst_width || scene_buffer->dst_height) {
scale_x = scene_buffer->dst_width / box.width;
scale_y = scene_buffer->dst_height / box.height;
} else {
scale_x = buffer->width / box.width;
scale_y = buffer->height / box.height;
scale_x = raster->width / box.width;
scale_y = raster->height / box.height;
}
pixman_region32_t trans_damage;
pixman_region32_init(&trans_damage);
wlr_region_transform(&trans_damage, damage,
scene_buffer->transform, buffer->width, buffer->height);
scene_buffer->transform, raster->width, raster->height);
pixman_region32_intersect_rect(&trans_damage, &trans_damage,
box.x, box.y, box.width, box.height);
pixman_region32_translate(&trans_damage, -box.x, -box.y);
@ -1055,28 +1033,6 @@ void wlr_scene_buffer_set_filter_mode(struct wlr_scene_buffer *scene_buffer,
scene_node_update(&scene_buffer->node, NULL);
}
static struct wlr_texture *scene_buffer_get_texture(
struct wlr_scene_buffer *scene_buffer, struct wlr_renderer *renderer) {
if (scene_buffer->buffer == NULL || scene_buffer->texture != NULL) {
return scene_buffer->texture;
}
struct wlr_client_buffer *client_buffer =
wlr_client_buffer_get(scene_buffer->buffer);
if (client_buffer != NULL) {
return client_buffer->texture;
}
struct wlr_texture *texture =
wlr_texture_from_buffer(renderer, scene_buffer->buffer);
if (texture != NULL && scene_buffer->own_buffer) {
scene_buffer->own_buffer = false;
wlr_buffer_unlock(scene_buffer->buffer);
}
scene_buffer_set_texture(scene_buffer, texture);
return texture;
}
static void scene_node_get_size(struct wlr_scene_node *node,
int *width, int *height) {
*width = 0;
@ -1095,9 +1051,9 @@ static void scene_node_get_size(struct wlr_scene_node *node,
if (scene_buffer->dst_width > 0 && scene_buffer->dst_height > 0) {
*width = scene_buffer->dst_width;
*height = scene_buffer->dst_height;
} else {
*width = scene_buffer->buffer_width;
*height = scene_buffer->buffer_height;
} else if (scene_buffer->raster) {
*width = scene_buffer->raster->width;
*height = scene_buffer->raster->height;
wlr_output_transform_coords(scene_buffer->transform, width, height);
}
break;
@ -1380,8 +1336,12 @@ static void scene_entry_render(struct render_list_entry *entry, const struct ren
case WLR_SCENE_NODE_BUFFER:;
struct wlr_scene_buffer *scene_buffer = wlr_scene_buffer_from_node(node);
struct wlr_texture *texture = scene_buffer_get_texture(scene_buffer,
data->output->output->renderer);
struct wlr_texture *texture = NULL;
if (scene_buffer->raster) {
texture = wlr_raster_obtain_texture(scene_buffer->raster,
data->output->output->renderer);
}
if (texture == NULL) {
scene_output_damage(data->output, &render_region);
break;
@ -1402,8 +1362,8 @@ static void scene_entry_render(struct render_list_entry *entry, const struct ren
.blend_mode = !data->output->scene->calculate_visibility ||
pixman_region32_not_empty(&opaque) ?
WLR_RENDER_BLEND_MODE_PREMULTIPLIED : WLR_RENDER_BLEND_MODE_NONE,
.wait_timeline = scene_buffer->wait_timeline,
.wait_point = scene_buffer->wait_point,
.wait_timeline = scene_buffer->raster->wait_timeline,
.wait_point = scene_buffer->raster->wait_point,
});
struct wlr_scene_output_sample_event sample_event = {
@ -1702,7 +1662,7 @@ static bool scene_node_invisible(struct wlr_scene_node *node) {
} else if (node->type == WLR_SCENE_NODE_BUFFER) {
struct wlr_scene_buffer *buffer = wlr_scene_buffer_from_node(node);
return buffer->buffer == NULL && buffer->texture == NULL;
return buffer->raster == NULL;
}
return false;
@ -1825,8 +1785,8 @@ static bool scene_entry_try_direct_scanout(struct render_list_entry *entry,
return false;
}
int default_width = buffer->buffer->width;
int default_height = buffer->buffer->height;
int default_width = buffer->raster->width;
int default_height = buffer->raster->height;
wlr_output_transform_coords(buffer->transform, &default_width, &default_height);
struct wlr_fbox default_box = {
.width = default_width,
@ -1866,8 +1826,9 @@ static bool scene_entry_try_direct_scanout(struct render_list_entry *entry,
}
wlr_output_state_set_buffer(&pending, buffer->buffer);
if (buffer->wait_timeline != NULL) {
wlr_output_state_set_wait_timeline(&pending, buffer->wait_timeline, buffer->wait_point);
if (buffer->raster->wait_timeline != NULL) {
wlr_output_state_set_wait_timeline(&pending,
buffer->raster->wait_timeline, buffer->raster->wait_point);
}
if (!wlr_output_test_state(scene_output->output, &pending)) {
@ -2125,6 +2086,20 @@ bool wlr_scene_output_build_state(struct wlr_scene_output *scene_output,
return false;
}
// upload all the textures that will be used within this pass before we start
// rendering. We need to do this because some of those textures might be
// created as part of a multirender blit.
for (int i = list_len - 1; i >= 0; i--) {
struct render_list_entry *entry = &list_data[i];
if (entry->node->type != WLR_SCENE_NODE_BUFFER) {
continue;
}
struct wlr_scene_buffer *buffer = wlr_scene_buffer_from_node(entry->node);
wlr_raster_obtain_texture_with_allocator(buffer->raster,
output->renderer, output->allocator);
}
render_data.render_pass = render_pass;
pixman_region32_init(&render_data.damage);

View file

@ -405,8 +405,12 @@ static void surface_state_move(struct wlr_surface_state *state,
}
static void surface_apply_damage(struct wlr_surface *surface) {
wl_list_remove(&surface->current_buffer_release.link);
if (surface->current.buffer == NULL) {
// NULL commit
wl_list_init(&surface->current_buffer_release.link);
if (surface->buffer != NULL) {
wlr_buffer_unlock(&surface->buffer->base);
}
@ -415,13 +419,19 @@ static void surface_apply_damage(struct wlr_surface *surface) {
return;
}
// lock the buffer during the commit so that everything watching the surface
// can have a chance to take a look at the buffer.
wlr_buffer_lock(surface->current.buffer);
wl_signal_add(&surface->current.buffer->events.release,
&surface->current_buffer_release);
surface->opaque = buffer_is_opaque(surface->current.buffer);
if (surface->buffer != NULL) {
if (wlr_client_buffer_apply_damage(surface->buffer,
surface->current.buffer, &surface->buffer_damage)) {
wlr_buffer_unlock(surface->current.buffer);
surface->current.buffer = NULL;
wlr_surface_consume(surface);
return;
}
}
@ -432,6 +442,7 @@ static void surface_apply_damage(struct wlr_surface *surface) {
struct wlr_client_buffer *buffer = wlr_client_buffer_create(
surface->current.buffer, surface->compositor->renderer);
wlr_surface_consume(surface);
if (buffer == NULL) {
wlr_log(WLR_ERROR, "Failed to upload buffer");
@ -508,10 +519,26 @@ error:
wl_resource_post_no_memory(surface->resource);
}
static void surface_clean_state(struct wlr_surface *surface) {
assert(surface->consumed);
wl_list_remove(&surface->current_buffer_release.link);
wl_list_init(&surface->current_buffer_release.link);
pixman_region32_clear(&surface->buffer_damage);
surface->current.buffer = NULL;
surface->consumed = false;
}
static void surface_commit_state(struct wlr_surface *surface,
struct wlr_surface_state *next) {
assert(next->cached_state_locks == 0);
// if the surface was consumed that means we don't own the current buffer
// anymore.
if (surface->consumed) {
surface_clean_state(surface);
}
bool invalid_buffer = next->committed & WLR_SURFACE_STATE_BUFFER;
if (invalid_buffer && next->buffer == NULL) {
@ -562,8 +589,8 @@ static void surface_commit_state(struct wlr_surface *surface,
// Release the buffer after emitting the commit event, so that listeners can
// access it. Don't leave the buffer locked so that wl_shm buffers can be
// released immediately on commit when they are uploaded to the GPU.
surface->consumed = true;
wlr_buffer_unlock(surface->current.buffer);
surface->current.buffer = NULL;
}
static void surface_handle_commit(struct wl_client *client,
@ -718,6 +745,10 @@ static void surface_destroy_role_object(struct wlr_surface *surface);
static void surface_handle_resource_destroy(struct wl_resource *resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if (surface->consumed) {
surface_clean_state(surface);
}
struct wlr_surface_output *surface_output, *surface_output_tmp;
wl_list_for_each_safe(surface_output, surface_output_tmp,
&surface->current_outputs, link) {
@ -736,6 +767,7 @@ static void surface_handle_resource_destroy(struct wl_resource *resource) {
surface_state_destroy_cached(cached, surface);
}
wl_list_remove(&surface->current_buffer_release.link);
wl_list_remove(&surface->role_resource_destroy.link);
wl_list_remove(&surface->pending_buffer_resource_destroy.link);
@ -751,6 +783,21 @@ static void surface_handle_resource_destroy(struct wl_resource *resource) {
free(surface);
}
static void surface_handle_current_buffer_release(struct wl_listener *listener,
void *data) {
struct wlr_surface *surface = wl_container_of(listener, surface, current_buffer_release);
surface_clean_state(surface);
}
void wlr_surface_consume(struct wlr_surface *surface) {
if (surface->consumed || !surface->current.buffer) {
return;
}
surface->consumed = true;
wlr_buffer_unlock(surface->current.buffer);
}
static struct wlr_surface *surface_create(struct wl_client *client,
uint32_t version, uint32_t id, struct wlr_compositor *compositor) {
struct wlr_surface *surface = calloc(1, sizeof(*surface));
@ -795,6 +842,9 @@ static struct wlr_surface *surface_create(struct wl_client *client,
surface->pending_buffer_resource_destroy.notify = pending_buffer_resource_handle_destroy;
wl_list_init(&surface->pending_buffer_resource_destroy.link);
surface->current_buffer_release.notify = surface_handle_current_buffer_release;
wl_list_init(&surface->current_buffer_release.link);
return surface;
}

View file

@ -14,6 +14,7 @@
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_tablet_tool.h>
#include <wlr/types/wlr_touch.h>
#include <wlr/types/wlr_raster.h>
#include <wlr/types/wlr_xcursor_manager.h>
#include <wlr/util/box.h>
#include <wlr/util/log.h>
@ -539,7 +540,13 @@ static void cursor_output_cursor_update(struct wlr_cursor_output_cursor *output_
} else if (cur->state->surface != NULL) {
struct wlr_surface *surface = cur->state->surface;
struct wlr_texture *texture = wlr_surface_get_texture(surface);
struct wlr_texture *texture = NULL;
struct wlr_raster *raster = wlr_raster_from_surface(surface);
if (raster) {
texture = wlr_raster_obtain_texture(raster, output_cursor->output_cursor->output->renderer);
}
wlr_raster_unlock(raster);
int32_t hotspot_x = cur->state->surface_hotspot.x;
int32_t hotspot_y = cur->state->surface_hotspot.y;

View file

@ -7,6 +7,7 @@
#include <wlr/backend.h>
#include <wlr/config.h>
#include <wlr/interfaces/wlr_buffer.h>
#include <wlr/render/allocator.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_linux_dmabuf_v1.h>
@ -90,16 +91,19 @@ struct wlr_dmabuf_v1_buffer *wlr_dmabuf_v1_buffer_try_from_buffer_resource(
static const struct wlr_buffer_impl buffer_impl;
static struct wlr_dmabuf_v1_buffer *dmabuf_v1_buffer_from_buffer(
struct wlr_dmabuf_v1_buffer *wlr_dmabuf_v1_buffer_try_from_buffer(
struct wlr_buffer *wlr_buffer) {
assert(wlr_buffer->impl == &buffer_impl);
if (wlr_buffer->impl != &buffer_impl) {
return NULL;
}
struct wlr_dmabuf_v1_buffer *buffer = wl_container_of(wlr_buffer, buffer, base);
return buffer;
}
static void buffer_destroy(struct wlr_buffer *wlr_buffer) {
struct wlr_dmabuf_v1_buffer *buffer =
dmabuf_v1_buffer_from_buffer(wlr_buffer);
wlr_dmabuf_v1_buffer_try_from_buffer(wlr_buffer);
if (buffer->resource != NULL) {
wl_resource_set_user_data(buffer->resource, NULL);
}
@ -111,7 +115,7 @@ static void buffer_destroy(struct wlr_buffer *wlr_buffer) {
static bool buffer_get_dmabuf(struct wlr_buffer *wlr_buffer,
struct wlr_dmabuf_attributes *attribs) {
struct wlr_dmabuf_v1_buffer *buffer =
dmabuf_v1_buffer_from_buffer(wlr_buffer);
wlr_dmabuf_v1_buffer_try_from_buffer(wlr_buffer);
*attribs = buffer->attributes;
return true;
}
@ -366,6 +370,7 @@ static void params_create_common(struct wl_resource *params_resource,
&wl_buffer_impl, buffer, buffer_handle_resource_destroy);
buffer->attributes = attribs;
buffer->linux_dmabuf_v1 = linux_dmabuf;
buffer->release.notify = buffer_handle_release;
wl_signal_add(&buffer->base.events.release, &buffer->release);
@ -870,6 +875,8 @@ static void linux_dmabuf_v1_destroy(struct wlr_linux_dmabuf_v1 *linux_dmabuf) {
}
wl_list_remove(&linux_dmabuf->display_destroy.link);
wl_list_remove(&linux_dmabuf->main_renderer_destroy.link);
wl_list_remove(&linux_dmabuf->main_allocator_destroy.link);
wl_global_destroy(linux_dmabuf->global);
free(linux_dmabuf);
@ -957,6 +964,8 @@ struct wlr_linux_dmabuf_v1 *wlr_linux_dmabuf_v1_create(struct wl_display *displa
linux_dmabuf->main_device_fd = -1;
wl_list_init(&linux_dmabuf->surfaces);
wl_list_init(&linux_dmabuf->main_renderer_destroy.link);
wl_list_init(&linux_dmabuf->main_allocator_destroy.link);
wl_signal_init(&linux_dmabuf->events.destroy);
linux_dmabuf->global = wl_global_create(display, &zwp_linux_dmabuf_v1_interface,
@ -1068,15 +1077,6 @@ static bool devid_from_fd(int fd, dev_t *devid) {
return true;
}
static bool is_secondary_drm_backend(struct wlr_backend *backend) {
#if WLR_HAS_DRM_BACKEND
return wlr_backend_is_drm(backend) &&
wlr_drm_backend_get_parent(backend) != NULL;
#else
return false;
#endif
}
bool wlr_linux_dmabuf_feedback_v1_init_with_options(struct wlr_linux_dmabuf_feedback_v1 *feedback,
const struct wlr_linux_dmabuf_feedback_v1_init_options *options) {
assert(options->main_renderer != NULL);
@ -1119,8 +1119,7 @@ bool wlr_linux_dmabuf_feedback_v1_init_with_options(struct wlr_linux_dmabuf_feed
wlr_log(WLR_ERROR, "Failed to intersect renderer and scanout formats");
goto error;
}
} else if (options->scanout_primary_output != NULL &&
!is_secondary_drm_backend(options->scanout_primary_output->backend)) {
} else if (options->scanout_primary_output != NULL) {
int backend_drm_fd = wlr_backend_get_drm_fd(options->scanout_primary_output->backend);
if (backend_drm_fd < 0) {
wlr_log(WLR_ERROR, "Failed to get backend DRM FD");
@ -1169,3 +1168,42 @@ error:
wlr_linux_dmabuf_feedback_v1_finish(feedback);
return false;
}
static void linux_dmabuf_unregister_main_blit_device(struct wlr_linux_dmabuf_v1 *linux_dmabuf) {
wl_list_remove(&linux_dmabuf->main_renderer_destroy.link);
wl_list_remove(&linux_dmabuf->main_allocator_destroy.link);
wl_list_init(&linux_dmabuf->main_renderer_destroy.link);
wl_list_init(&linux_dmabuf->main_allocator_destroy.link);
linux_dmabuf->main_renderer = NULL;
linux_dmabuf->main_allocator = NULL;
}
static void linux_dmabuf_handle_main_renderer_destroy(struct wl_listener *listener, void *data) {
struct wlr_linux_dmabuf_v1 *linux_dmabuf = wl_container_of(
listener, linux_dmabuf, main_renderer_destroy);
linux_dmabuf_unregister_main_blit_device(linux_dmabuf);
}
static void linux_dmabuf_handle_main_allocator_destroy(struct wl_listener *listener, void *data) {
struct wlr_linux_dmabuf_v1 *linux_dmabuf = wl_container_of(
listener, linux_dmabuf, main_allocator_destroy);
linux_dmabuf_unregister_main_blit_device(linux_dmabuf);
}
void wlr_linux_dmabuf_v1_set_main_blit_device(struct wlr_linux_dmabuf_v1 *linux_dmabuf,
struct wlr_renderer *renderer, struct wlr_allocator *allocator) {
assert(renderer != NULL && allocator != NULL);
wl_list_remove(&linux_dmabuf->main_renderer_destroy.link);
wl_list_remove(&linux_dmabuf->main_allocator_destroy.link);
linux_dmabuf->main_renderer_destroy.notify = linux_dmabuf_handle_main_renderer_destroy;
wl_signal_add(&renderer->events.destroy, &linux_dmabuf->main_renderer_destroy);
linux_dmabuf->main_allocator_destroy.notify = linux_dmabuf_handle_main_allocator_destroy;
wl_signal_add(&allocator->events.destroy, &linux_dmabuf->main_allocator_destroy);
linux_dmabuf->main_renderer = renderer;
linux_dmabuf->main_allocator = allocator;
}

342
types/wlr_output_manager.c Normal file
View file

@ -0,0 +1,342 @@
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <wlr/backend.h>
#include <wlr/backend/multi.h>
#include <wlr/render/allocator.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/types/wlr_drm.h>
#include <wlr/types/wlr_shm.h>
#include <wlr/types/wlr_output.h>
#include <wlr/types/wlr_output_manager.h>
#include <wlr/types/wlr_linux_dmabuf_v1.h>
#include <wlr/util/log.h>
static void output_manager_backend_finish(
struct wlr_output_manager_backend *backend) {
wlr_allocator_destroy(backend->allocator);
wlr_renderer_destroy(backend->renderer);
wl_list_remove(&backend->backend_destroy.link);
wl_list_remove(&backend->renderer_lost.link);
wl_list_remove(&backend->link);
}
static void output_manager_handle_backend_destroy(
struct wl_listener *listener, void *data) {
struct wlr_output_manager_backend *backend =
wl_container_of(listener, backend, backend_destroy);
output_manager_backend_finish(backend);
if (backend == &backend->manager->primary) {
*backend = (struct wlr_output_manager_backend){0};
} else {
free(backend);
}
}
static void output_manager_handle_renderer_lost(
struct wl_listener *listener, void *data) {
struct wlr_output_manager_backend *backend =
wl_container_of(listener, backend, renderer_lost);
wlr_log(WLR_INFO, "Attempting renderer recovery after GPU reset!");
struct wlr_renderer *renderer = wlr_renderer_autocreate(backend->backend);
if (!renderer) {
wlr_log(WLR_ERROR, "Could not create a new renderer after GPU reset");
return;
}
struct wlr_allocator *allocator =
wlr_allocator_autocreate(backend->backend, renderer);
if (!allocator) {
wlr_log(WLR_ERROR, "Could not create a new allocator after GPU reset");
wlr_renderer_destroy(renderer);
return;
}
wlr_log(WLR_INFO, "Created new renderer and allocator after reset. Attempting to swap...");
struct wlr_renderer *old_renderer = backend->renderer;
struct wlr_allocator *old_allocator = backend->allocator;
backend->renderer = renderer;
backend->allocator = allocator;
wl_signal_add(&backend->renderer->events.lost, &backend->renderer_lost);
// Only destroy the old state once we signal a recovery to avoid the old
// state being referenced during its destruction.
wlr_allocator_destroy(old_allocator);
wlr_renderer_destroy(old_renderer);
wl_signal_emit_mutable(&backend->events.recovery, NULL);
}
static bool output_manager_backend_init(struct wlr_output_manager *manager,
struct wlr_output_manager_backend *backend, struct wlr_backend *wlr_backend) {
backend->renderer = wlr_renderer_autocreate(wlr_backend);
if (!backend->renderer) {
return false;
}
backend->allocator = wlr_allocator_autocreate(wlr_backend,
manager->primary.renderer);
if (!backend->allocator) {
wlr_renderer_destroy(manager->primary.renderer);
return false;
}
backend->manager = manager;
backend->backend = wlr_backend;
backend->locks = 1;
wl_signal_init(&backend->events.recovery);
backend->backend_destroy.notify = output_manager_handle_backend_destroy;
wl_signal_add(&wlr_backend->events.destroy, &backend->backend_destroy);
backend->renderer_lost.notify = output_manager_handle_renderer_lost;
wl_signal_add(&backend->renderer->events.lost, &backend->renderer_lost);
wl_list_insert(&manager->backends, &backend->link);
return true;
}
struct multi_backend_iterator_data {
struct wlr_output_manager *manager;
bool primary;
};
static void multi_backend_iterator(struct wlr_backend *wlr_backend, void *_data) {
struct multi_backend_iterator_data *data = _data;
// Use the first device as the primary
if (data->primary) {
if (!output_manager_backend_init(data->manager, &data->manager->primary, wlr_backend)) {
return;
}
data->primary = false;
return;
}
struct wlr_output_manager_backend *backend = calloc(1, sizeof(*backend));
if (!backend) {
return;
}
if (!output_manager_backend_init(data->manager, backend, wlr_backend)) {
free(backend);
return;
}
}
bool wlr_output_manager_init(struct wlr_output_manager *manager,
struct wlr_backend *backend) {
*manager = (struct wlr_output_manager){0};
wl_list_init(&manager->backends);
struct multi_backend_iterator_data iter_data = {
.manager = manager,
.primary = true,
};
if (wlr_backend_is_multi(backend)) {
wlr_multi_for_each_backend(backend, multi_backend_iterator, &iter_data);
} else {
multi_backend_iterator(backend, &iter_data);
}
return !wl_list_empty(&manager->backends);
}
void wlr_output_manager_finish(struct wlr_output_manager *manager) {
struct wlr_output_manager_backend *backend;
wl_list_for_each(backend, &manager->backends, link) {
output_manager_backend_finish(backend);
}
}
struct wlr_output_manager_backend *wlr_output_manager_lock_backend(
struct wlr_output_manager *manager, struct wlr_backend *wlr_backend) {
assert(!wlr_backend_is_multi(wlr_backend));
struct wlr_output_manager_backend *backend;
wl_list_for_each(backend, &manager->backends, link) {
if (backend->backend == wlr_backend) {
backend->locks++;
return backend;
}
}
backend = calloc(1, sizeof(*backend));
if (!backend) {
return NULL;
}
if (!output_manager_backend_init(manager, backend, wlr_backend)) {
free(backend);
return NULL;
}
return backend;
}
void wlr_output_manager_unlock_backend(struct wlr_output_manager_backend *backend) {
assert(backend->locks > 0);
backend->locks--;
if (backend->locks != 0) {
return;
}
output_manager_backend_finish(backend);
free(backend);
}
struct output_manager_output {
struct wlr_output_manager_backend *backend;
struct wlr_output *output;
struct wlr_addon addon;
// recover from GPU resets
struct wl_listener backend_recovery;
};
static void manager_output_handle_output_destroy(struct wlr_addon *addon) {
struct output_manager_output *manager_output =
wl_container_of(addon, manager_output, addon);
wlr_addon_finish(&manager_output->addon);
wlr_output_manager_unlock_backend(manager_output->backend);
wl_list_remove(&manager_output->backend_recovery.link);
free(manager_output);
}
static const struct wlr_addon_interface output_addon_impl = {
.name = "wlr_output_manager_output",
.destroy = manager_output_handle_output_destroy,
};
static void output_handle_recovery(struct wl_listener *listener, void *data) {
struct output_manager_output *manager = wl_container_of(listener, manager, backend_recovery);
// we lost the context, create a new renderer and switch everything out.
wlr_output_init_render(manager->output, manager->backend->allocator,
manager->backend->renderer);
}
bool wlr_output_manager_init_output(struct wlr_output_manager *manager,
struct wlr_output *output) {
struct output_manager_output *manager_output = calloc(1, sizeof(*manager_output));
if (!manager_output) {
return false;
}
manager_output->output = output;
manager_output->backend = wlr_output_manager_lock_backend(
manager, output->backend);
if (!manager_output->backend) {
free(manager_output);
return false;
}
wlr_addon_init(&manager_output->addon, &output->addons, manager, &output_addon_impl);
manager_output->backend_recovery.notify = output_handle_recovery;
wl_signal_add(&manager_output->backend->events.recovery, &manager_output->backend_recovery);
wlr_output_init_render(output, manager_output->backend->allocator,
manager_output->backend->renderer);
return true;
}
bool wlr_output_manager_init_wl_shm(struct wlr_output_manager *manager,
struct wl_display *wl_display) {
size_t shm_formats_len = 0;
uint32_t *shm_formats = NULL;
struct wlr_output_manager_backend *backend;
wl_list_for_each(backend, &manager->backends, link) {
const struct wlr_drm_format_set *format_set = wlr_renderer_get_texture_formats(
backend->renderer, WLR_BUFFER_CAP_DATA_PTR);
if (format_set == NULL || format_set->len == 0) {
wlr_log(WLR_ERROR, "Failed to initialize wl_shm: "
"cannot get renderer formats");
return NULL;
}
if (!shm_formats) {
shm_formats = malloc(format_set->len * sizeof(uint32_t));
if (!shm_formats) {
wlr_log(WLR_INFO, "Cannot allocate a format set");
return false;
}
for (size_t i = 0; i < format_set->len; i++) {
shm_formats[i] = format_set->formats[i].format;
}
shm_formats_len = format_set->len;
continue;
}
// interset the format lists - null out any formats from the shm_formats
// list when the current renderer doesn't have the format as well.
for (size_t i = 0; i < shm_formats_len; i++) {
if (shm_formats[i] == 0) {
continue;
}
bool found = false;
for (size_t j = 0; j < format_set->len; j++) {
if (format_set->formats[j].format == shm_formats[i]) {
found = true;
break;
}
}
if (!found) {
shm_formats[i] = 0;
}
}
}
// clear out all null formats from the format list
size_t j = 0;
for (size_t i = 0; i < shm_formats_len; i++) {
if (shm_formats[i] != 0) {
shm_formats[j++] = shm_formats[i];
}
}
shm_formats_len = j;
bool ok = wlr_shm_create(wl_display, 1, shm_formats, shm_formats_len);
free(shm_formats);
return ok;
}
bool wlr_output_manager_init_wl_display(struct wlr_output_manager *manager,
struct wl_display *wl_display) {
if (!wlr_output_manager_init_wl_shm(manager, wl_display)) {
return false;
}
struct wlr_renderer *r = manager->primary.renderer;
if (wlr_renderer_get_texture_formats(r, WLR_BUFFER_CAP_DMABUF) != NULL) {
if (wlr_renderer_get_drm_fd(r) >= 0) {
if (wlr_drm_create(wl_display, r) == NULL) {
return false;
}
} else {
wlr_log(WLR_INFO, "Cannot get renderer DRM FD, disabling wl_drm");
}
if (wlr_linux_dmabuf_v1_create_with_renderer(wl_display, 4, r) == NULL) {
return false;
}
}
return true;
}

724
types/wlr_raster.c Normal file
View file

@ -0,0 +1,724 @@
#include <assert.h>
#include <drm_fourcc.h>
#include <pixman.h>
#include <wlr/render/allocator.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/render/wlr_texture.h>
#include <wlr/types/wlr_buffer.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_linux_dmabuf_v1.h>
#include <wlr/types/wlr_linux_drm_syncobj_v1.h>
#include <wlr/types/wlr_raster.h>
#include <wlr/render/drm_syncobj.h>
#include <wlr/types/wlr_surface_invalidation_v1.h>
#include <wlr/render/wlr_texture.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/util/addon.h>
#include <wlr/util/log.h>
#include "render/drm_format_set.h"
#include "render/wlr_renderer.h"
#include "types/wlr_buffer.h"
static void raster_handle_buffer_release(struct wl_listener *listener, void *data) {
struct wlr_raster *raster = wl_container_of(listener, raster, buffer_release);
raster->buffer = NULL;
wl_list_remove(&raster->buffer_release.link);
wl_list_init(&raster->buffer_release.link);
if (wl_list_empty(&raster->sources)) {
wl_signal_emit_mutable(&raster->events.invalidated, NULL);
}
}
struct wlr_raster *wlr_raster_create(struct wlr_buffer *buffer,
const struct wlr_raster_create_options *options) {
struct wlr_raster *raster = calloc(1, sizeof(*raster));
if (!raster) {
return NULL;
}
wl_list_init(&raster->sources);
wl_signal_init(&raster->events.destroy);
wl_signal_init(&raster->events.invalidated);
assert(buffer);
raster->opaque = buffer_is_opaque(buffer);
raster->width = buffer->width;
raster->height = buffer->height;
raster->buffer = buffer;
raster->n_locks = 1;
raster->buffer_release.notify = raster_handle_buffer_release;
wl_signal_add(&raster->buffer->events.release, &raster->buffer_release);
if (options && options->wait_timeline) {
raster->wait_timeline = wlr_drm_syncobj_timeline_ref(options->wait_timeline);
raster->wait_point = options->wait_point;
}
return raster;
}
static void raster_source_destroy(struct wlr_raster_source *source) {
wl_list_remove(&source->link);
wl_list_remove(&source->renderer_destroy.link);
wl_list_remove(&source->allocator_destroy.link);
if (!source->raster->buffer && wl_list_empty(&source->raster->sources)) {
wl_signal_emit_mutable(&source->raster->events.invalidated, NULL);
}
free(source);
}
static void raster_consider_destroy(struct wlr_raster *raster) {
if (raster->n_locks > 0) {
return;
}
wl_signal_emit_mutable(&raster->events.destroy, NULL);
// we don't want to call invalidation signals as we're destroying the raster
wl_signal_init(&raster->events.invalidated);
struct wlr_raster_source *source, *source_tmp;
wl_list_for_each_safe(source, source_tmp, &raster->sources, link) {
wlr_texture_destroy(source->texture);
raster_source_destroy(source);
}
wl_list_remove(&raster->buffer_release.link);
wlr_drm_syncobj_timeline_unref(raster->wait_timeline);
free(raster);
}
struct wlr_raster *wlr_raster_lock(struct wlr_raster *raster) {
raster->n_locks++;
return raster;
}
void wlr_raster_unlock(struct wlr_raster *raster) {
if (!raster) {
return;
}
assert(raster->n_locks > 0);
raster->n_locks--;
raster_consider_destroy(raster);
}
static void raster_detach(struct wlr_raster *raster, struct wlr_texture *texture) {
if (!texture) {
return;
}
struct wlr_raster_source *source;
wl_list_for_each(source, &raster->sources, link) {
if (source->texture == texture) {
raster_source_destroy(source);
return;
}
}
assert(false);
}
static void handle_renderer_destroy(struct wl_listener *listener, void *data) {
struct wlr_raster_source *source = wl_container_of(listener, source, renderer_destroy);
raster_source_destroy(source);
}
static void handle_allocator_destroy(struct wl_listener *listener, void *data) {
struct wlr_raster_source *source = wl_container_of(listener, source, allocator_destroy);
source->allocator = NULL;
wl_list_remove(&source->allocator_destroy.link);
wl_list_init(&source->allocator_destroy.link);
}
static void raster_attach_with_allocator(struct wlr_raster *raster,
struct wlr_texture *texture, struct wlr_allocator *allocator) {
assert(texture->width == raster->width && texture->height == raster->height);
struct wlr_raster_source *source;
wl_list_for_each(source, &raster->sources, link) {
assert(source->texture != texture);
}
source = calloc(1, sizeof(*source));
if (!source) {
return;
}
source->renderer_destroy.notify = handle_renderer_destroy;
wl_signal_add(&texture->renderer->events.destroy, &source->renderer_destroy);
if (allocator) {
source->allocator_destroy.notify = handle_allocator_destroy;
wl_signal_add(&allocator->events.destroy, &source->allocator_destroy);
} else {
wl_list_init(&source->allocator_destroy.link);
}
wl_list_insert(&raster->sources, &source->link);
source->texture = texture;
source->allocator = allocator;
source->raster = raster;
}
static struct wlr_texture *wlr_raster_get_texture(struct wlr_raster *raster,
struct wlr_renderer *renderer) {
struct wlr_raster_source *source;
wl_list_for_each(source, &raster->sources, link) {
if (source->texture->renderer == renderer) {
return source->texture;
}
}
return NULL;
}
static bool compute_import_buffer_format(struct wlr_raster *raster, struct wlr_drm_format *drm_fmt,
struct wlr_renderer *dst) {
const struct wlr_drm_format_set *texture_formats =
wlr_renderer_get_texture_formats(dst, WLR_BUFFER_CAP_DMABUF);
if (!texture_formats) {
wlr_log(WLR_ERROR, "Failed to get texture_formats");
return NULL;
}
// For now, let's only use XRGB
uint32_t fmt = raster->opaque ? DRM_FORMAT_XRGB8888 : DRM_FORMAT_ARGB8888;
const struct wlr_drm_format *drm_fmt_inv =
wlr_drm_format_set_get(texture_formats, fmt);
if (!wlr_drm_format_copy(drm_fmt, drm_fmt_inv)) {
return false;
}
for (size_t i = 0; i < drm_fmt->len; i++) {
uint64_t mod = drm_fmt->modifiers[i];
if (mod != DRM_FORMAT_MOD_INVALID) {
continue;
}
for (size_t j = i + 1; j < drm_fmt->len; j++) {
drm_fmt->modifiers[j] = drm_fmt->modifiers[j + 1];
}
drm_fmt->len--;
break;
}
return true;
}
static struct wlr_buffer *raster_try_blit(struct wlr_raster *raster,
struct wlr_raster_source *source, struct wlr_renderer *dst) {
if (!source->allocator) {
return NULL;
}
wlr_log(WLR_DEBUG, "Attempting a multigpu blit through a GPU");
struct wlr_renderer *src = source->texture->renderer;
// The src needs to be able to render into this format
const struct wlr_drm_format_set *render_formats =
wlr_renderer_get_render_formats(src);
if (!render_formats) {
wlr_log(WLR_ERROR, "Failed to get render_formats");
return NULL;
}
struct wlr_drm_format fmt = {0};
if (!compute_import_buffer_format(raster, &fmt, dst)) {
wlr_log(WLR_ERROR, "Could not find a common format modifiers for all GPUs");
return NULL;
}
if (wlr_drm_format_intersect(&fmt, &fmt,
wlr_drm_format_set_get(render_formats, fmt.format))) {
wlr_drm_format_finish(&fmt);
return NULL;
}
struct wlr_buffer *buffer = wlr_allocator_create_buffer(
source->allocator, raster->width, raster->height, &fmt);
wlr_drm_format_finish(&fmt);
if (!buffer) {
wlr_log(WLR_ERROR, "Failed to allocate multirenderer blit buffer");
return NULL;
}
struct wlr_drm_syncobj_timeline *timeline;
int drm_fd = wlr_renderer_get_drm_fd(src);
if (src->features.timeline && drm_fd >= 0) {
timeline = wlr_drm_syncobj_timeline_create(drm_fd);
}
const struct wlr_buffer_pass_options pass_options = {
.signal_timeline = timeline,
.signal_point = 1,
};
struct wlr_render_pass *pass = wlr_renderer_begin_buffer_pass(src, buffer, &pass_options);
if (!pass) {
wlr_log(WLR_ERROR, "Failed to create a render pass");
wlr_buffer_drop(buffer);
return NULL;
}
wlr_render_pass_add_texture(pass, &(struct wlr_render_texture_options) {
.texture = source->texture,
.blend_mode = WLR_RENDER_BLEND_MODE_NONE,
.wait_timeline = timeline,
.wait_point = 1,
});
wlr_drm_syncobj_timeline_unref(timeline);
if (!wlr_render_pass_submit(pass)) {
wlr_log(WLR_ERROR, "Failed to renedr to a multigpu blit buffer");
wlr_buffer_drop(buffer);
return NULL;
}
return buffer;
}
static struct wlr_texture *raster_try_texture_from_blit(struct wlr_raster *raster,
struct wlr_renderer *renderer) {
struct wlr_buffer *imported = NULL;
struct wlr_raster_source *source;
wl_list_for_each(source, &raster->sources, link) {
imported = raster_try_blit(raster, source, renderer);
if (imported) {
break;
}
}
if (!imported) {
return NULL;
}
wlr_buffer_drop(imported);
return wlr_texture_from_buffer(renderer, imported);
}
static struct wlr_texture *raster_try_cpu_copy(struct wlr_raster *raster,
struct wlr_renderer *dst) {
if (wl_list_empty(&raster->sources)) {
return NULL;
}
wlr_log(WLR_DEBUG, "Performing multigpu blit through the CPU");
struct wlr_texture *texture = NULL;
uint32_t format = DRM_FORMAT_ARGB8888;
uint32_t stride = raster->width * 4;
void *data = malloc(stride * raster->height);
if (!data) {
return NULL;
}
struct wlr_raster_source *source;
wl_list_for_each(source, &raster->sources, link) {
if (!wlr_texture_read_pixels(source->texture, &(struct wlr_texture_read_pixels_options){
.format = format,
.stride = stride,
.data = data,
})) {
wlr_log(WLR_ERROR, "Failed to read pixels");
continue;
}
texture = wlr_texture_from_pixels(dst, format,
stride, raster->width, raster->height, data);
if (!texture) {
wlr_log(WLR_ERROR, "Failed to upload texture from cpu data");
continue;
}
break;
}
free(data);
return texture;
}
struct wlr_texture *wlr_raster_obtain_texture_with_allocator(struct wlr_raster *raster,
struct wlr_renderer *renderer, struct wlr_allocator *allocator) {
struct wlr_texture *texture = wlr_raster_get_texture(raster, renderer);
if (texture) {
return texture;
}
if (raster->buffer) {
struct wlr_client_buffer *client_buffer =
wlr_client_buffer_get(raster->buffer);
if (client_buffer != NULL) {
return client_buffer->texture;
}
// if we have a buffer, try and import that
texture = wlr_texture_from_buffer(renderer, raster->buffer);
if (texture) {
raster_attach_with_allocator(raster, texture, allocator);
return texture;
}
}
// try to blit using the textures already available to us
texture = raster_try_texture_from_blit(raster, renderer);
if (texture) {
raster_attach_with_allocator(raster, texture, allocator);
return texture;
}
// if this is a linux_dmabuf_v1 buffer, then we can try to use the
// main device for blitting which should support all the modifiers we
// advertise.
if (raster->buffer) {
struct wlr_dmabuf_v1_buffer *dmabuf_buffer =
wlr_dmabuf_v1_buffer_try_from_buffer(raster->buffer);
if (dmabuf_buffer && dmabuf_buffer->linux_dmabuf_v1->main_renderer) {
struct wlr_linux_dmabuf_v1 *linux_dmabuf = dmabuf_buffer->linux_dmabuf_v1;
struct wlr_texture *texture = wlr_texture_from_buffer(
linux_dmabuf->main_renderer, raster->buffer);
if (texture) {
raster_attach_with_allocator(raster, texture,
linux_dmabuf->main_allocator);
// try to create a blit but this time through the primary device
texture = raster_try_texture_from_blit(raster, renderer);
if (texture) {
raster_attach_with_allocator(raster, texture, allocator);
return texture;
}
}
}
}
// as a last resort we need to do a copy through the CPU
texture = raster_try_cpu_copy(raster, renderer);
if (texture) {
raster_attach_with_allocator(raster, texture, allocator);
return texture;
}
return NULL;
}
struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster,
struct wlr_renderer *renderer) {
return wlr_raster_obtain_texture_with_allocator(raster, renderer, NULL);
}
struct raster_update_state {
struct wlr_buffer *buffer;
pixman_region32_t damage;
struct wlr_raster *new_raster;
struct wlr_raster *old_raster;
struct wl_listener old_raster_destroy;
struct wl_listener new_raster_destroy;
struct wl_listener buffer_release;
};
static void destroy_raster_update_state(struct raster_update_state *state) {
wl_list_remove(&state->old_raster_destroy.link);
wl_list_remove(&state->new_raster_destroy.link);
wl_list_remove(&state->buffer_release.link);
pixman_region32_fini(&state->damage);
free(state);
}
static void raster_update_handle_new_raster_destroy(struct wl_listener *listener, void *data) {
struct raster_update_state *state = wl_container_of(listener, state, new_raster_destroy);
destroy_raster_update_state(state);
}
static void raster_update_handle_old_raster_destroy(struct wl_listener *listener, void *data) {
struct raster_update_state *state = wl_container_of(listener, state, old_raster_destroy);
// if the new raster already has a texture, there's nothing we can do to help.
if (!wl_list_empty(&state->new_raster->sources)) {
destroy_raster_update_state(state);
return;
}
struct wlr_raster_source *source, *tmp_source;
wl_list_for_each_safe(source, tmp_source, &state->old_raster->sources, link) {
struct wlr_texture *texture = source->texture;
struct wlr_allocator *allocator = source->allocator;
if (wlr_texture_update_from_buffer(texture, state->buffer, &state->damage)) {
raster_detach(state->old_raster, texture);
raster_attach_with_allocator(state->new_raster, texture, allocator);
}
}
destroy_raster_update_state(state);
}
static void raster_update_handle_buffer_release(struct wl_listener *listener, void *data) {
struct raster_update_state *state = wl_container_of(listener, state, buffer_release);
destroy_raster_update_state(state);
}
static struct wlr_raster *raster_update(struct wlr_raster *raster,
struct wlr_buffer *buffer, const pixman_region32_t *damage,
const struct wlr_raster_create_options *options) {
struct raster_update_state *state = calloc(1, sizeof(*state));
if (!state) {
return NULL;
}
struct wlr_raster *new_raster = wlr_raster_create(buffer, options);
if (!new_raster) {
free(state);
return NULL;
}
state->old_raster_destroy.notify = raster_update_handle_old_raster_destroy;
wl_signal_add(&raster->events.destroy, &state->old_raster_destroy);
state->new_raster_destroy.notify = raster_update_handle_new_raster_destroy;
wl_signal_add(&new_raster->events.destroy, &state->new_raster_destroy);
state->buffer_release.notify = raster_update_handle_buffer_release;
wl_signal_add(&buffer->events.release, &state->buffer_release);
state->new_raster = new_raster;
state->old_raster = raster;
state->buffer = buffer;
pixman_region32_init(&state->damage);
pixman_region32_copy(&state->damage, damage);
return new_raster;
}
struct surface_raster {
struct wlr_raster *raster;
struct wlr_surface *surface;
struct wlr_addon addon;
struct wl_listener buffer_prerelease;
struct wl_listener raster_invalidated;
bool locking_buffer;
};
static void surface_raster_drop_raster(struct surface_raster *surface_raster) {
if (surface_raster->locking_buffer) {
wlr_buffer_unlock(surface_raster->raster->buffer);
surface_raster->locking_buffer = false;
}
wlr_raster_unlock(surface_raster->raster);
surface_raster->raster = NULL;
}
static void surface_raster_destroy(struct surface_raster *surface_raster) {
surface_raster_drop_raster(surface_raster);
wl_list_remove(&surface_raster->buffer_prerelease.link);
wl_list_remove(&surface_raster->raster_invalidated.link);
wlr_addon_finish(&surface_raster->addon);
free(surface_raster);
}
static void surface_raster_handle_addon_destroy(struct wlr_addon *addon) {
struct surface_raster *surface_raster = wl_container_of(addon, surface_raster, addon);
surface_raster_destroy(surface_raster);
}
static void surface_raster_handle_buffer_prerelease(struct wl_listener *listener, void *data) {
struct surface_raster *surface_raster =
wl_container_of(listener, surface_raster, buffer_prerelease);
struct wlr_raster *raster = surface_raster->raster;
struct wlr_surface_output *output;
wl_list_for_each(output, &surface_raster->surface->current_outputs, link) {
wlr_raster_obtain_texture_with_allocator(raster,
output->output->renderer, output->output->allocator);
}
// if there was a failed texture upload, keep on locking the buffer
if (wl_list_empty(&raster->sources)) {
wlr_buffer_lock(raster->buffer);
surface_raster->locking_buffer = true;
}
wl_list_remove(&surface_raster->buffer_prerelease.link);
wl_list_init(&surface_raster->buffer_prerelease.link);
}
static void surface_raster_handle_raster_invalidated(struct wl_listener *listener, void *data) {
struct surface_raster *surface_raster =
wl_container_of(listener, surface_raster, raster_invalidated);
wlr_surface_invalidation_manager_v1_send_surface_invalidation(
surface_raster->surface);
}
const struct wlr_addon_interface surface_raster_addon_impl = {
.name = "wlr_raster_surface",
.destroy = surface_raster_handle_addon_destroy,
};
static struct surface_raster *get_surface_raster(struct wlr_surface *surface) {
struct wlr_addon *addon = wlr_addon_find(&surface->addons, NULL,
&surface_raster_addon_impl);
if (!addon) {
return NULL;
}
struct surface_raster *surface_raster = wl_container_of(addon, surface_raster, addon);
return surface_raster;
}
// Because wlr_raster doesn't lock the buffer itself, we need something extra
// to keep client buffer locked when operating in legacy mode.
struct client_buffer_compat {
struct wlr_client_buffer *buffer;
struct wl_listener destroy;
};
static void client_buffer_compat_raster_destroy(struct wl_listener *listener, void *data) {
struct client_buffer_compat *compat = wl_container_of(listener, compat, destroy);
wlr_buffer_unlock(&compat->buffer->base);
wl_list_remove(&compat->destroy.link);
free(compat);
}
struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface) {
struct wlr_linux_drm_syncobj_surface_v1_state *syncobj_surface_state =
wlr_linux_drm_syncobj_v1_get_surface_state(surface);
struct wlr_raster_create_options options = {0};
if (syncobj_surface_state) {
options.wait_timeline = syncobj_surface_state->acquire_timeline;
options.wait_point = syncobj_surface_state->acquire_point;
}
if (surface->compositor->renderer) {
// use legacy wlr_client_buffer
if (!surface->buffer) {
return NULL;
}
struct client_buffer_compat *compat = calloc(1, sizeof(*compat));
if (!compat) {
return NULL;
}
struct wlr_raster *raster = wlr_raster_create(&surface->buffer->base, &options);
if (!raster) {
free(compat);
return NULL;
}
compat->destroy.notify = client_buffer_compat_raster_destroy;
wl_signal_add(&raster->events.destroy, &compat->destroy);
compat->buffer = surface->buffer;
wlr_buffer_lock(&surface->buffer->base);
return raster;
}
struct surface_raster *surface_raster = get_surface_raster(surface);
if (!surface_raster) {
surface_raster = calloc(1, sizeof(*surface_raster));
if (!surface_raster) {
return NULL;
}
surface_raster->surface = surface;
wlr_addon_init(&surface_raster->addon, &surface->addons, NULL,
&surface_raster_addon_impl);
surface_raster->buffer_prerelease.notify = surface_raster_handle_buffer_prerelease;
wl_list_init(&surface_raster->buffer_prerelease.link);
surface_raster->raster_invalidated.notify = surface_raster_handle_raster_invalidated;
wl_list_init(&surface_raster->raster_invalidated.link);
}
if (!surface->current.buffer) {
// surface is mapped but it hasn't committed a new buffer. We need to keep
// using the old one
if (wlr_surface_has_buffer(surface)) {
if (surface_raster->raster) {
return wlr_raster_lock(surface_raster->raster);
} else {
return NULL;
}
}
wl_list_remove(&surface_raster->buffer_prerelease.link);
wl_list_init(&surface_raster->buffer_prerelease.link);
wl_list_remove(&surface_raster->raster_invalidated.link);
wl_list_init(&surface_raster->raster_invalidated.link);
surface_raster_drop_raster(surface_raster);
return NULL;
}
struct wlr_raster *raster;
if (surface_raster->raster) {
// make sure we haven't already seen this buffer
if (surface_raster->raster->buffer == surface->current.buffer) {
return wlr_raster_lock(surface_raster->raster);
}
// before we try to update the old raster, remove obsolete textures
struct wlr_raster_source *source, *tmp_source;
wl_list_for_each_safe(source, tmp_source, &surface_raster->raster->sources, link) {
struct wlr_texture *texture = source->texture;
bool found = false;
struct wlr_surface_output *output;
wl_list_for_each(output, &surface->current_outputs, link) {
if (output->output->renderer == texture->renderer) {
found = true;
break;
}
}
if (!found) {
raster_detach(surface_raster->raster, texture);
wlr_texture_destroy(texture);
}
}
raster = raster_update(surface_raster->raster,
surface->current.buffer, &surface->buffer_damage, &options);
} else {
raster = wlr_raster_create(surface->current.buffer, &options);
}
if (!raster) {
return NULL;
}
wl_list_remove(&surface_raster->raster_invalidated.link);
wl_signal_add(&raster->events.invalidated, &surface_raster->raster_invalidated);
surface_raster_drop_raster(surface_raster);
surface_raster->raster = wlr_raster_lock(raster);
wl_list_remove(&surface_raster->buffer_prerelease.link);
wl_signal_add(&surface->current.buffer->events.prerelease, &surface_raster->buffer_prerelease);
wlr_surface_consume(surface);
return raster;
}

View file

@ -0,0 +1,204 @@
#include <assert.h>
#include <stdlib.h>
#include <wlr/types/wlr_surface_invalidation_v1.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/util/addon.h>
#include "surface-invalidation-v1-protocol.h"
#define SURFACE_INVALIDATION_MANAGER_VERSION 1
struct wlr_surface_invalidation_v1_configure {
struct wl_list link; // struct wlr_surface_invalidation_v1.configures
uint32_t serial;
bool configured;
};
struct wlr_surface_invalidation_v1 {
struct wl_resource *resource;
struct wl_list configures; // struct wlr_surface_invalidation_v1_configure.link
struct wlr_addon addon;
};
static void wlr_surface_invalidation_v1_configure_destroy(
struct wlr_surface_invalidation_v1_configure *configure) {
wl_list_remove(&configure->link);
free(configure);
}
static const struct wp_surface_invalidation_v1_interface surface_inval_impl;
static struct wlr_surface_invalidation_v1 *surface_invalidation_v1_from_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &wp_surface_invalidation_v1_interface,
&surface_inval_impl));
return wl_resource_get_user_data(resource);
}
static void surface_handle_resource_destroy(struct wl_resource *resource) {
struct wlr_surface_invalidation_v1 *surface =
surface_invalidation_v1_from_resource(resource);
surface->resource = NULL;
wlr_addon_finish(&surface->addon);
struct wlr_surface_invalidation_v1_configure *configure, *tmp_configure;
wl_list_for_each_safe(configure, tmp_configure, &surface->configures, link) {
wlr_surface_invalidation_v1_configure_destroy(configure);
}
free(surface);
}
static void surface_inval_handle_ack(struct wl_client *client,
struct wl_resource *resource, uint32_t serial) {
struct wlr_surface_invalidation_v1 *surface =
surface_invalidation_v1_from_resource(resource);
// First find the ack'ed configure
bool found = false;
struct wlr_surface_invalidation_v1_configure *configure, *tmp_configure;
wl_list_for_each(configure, &surface->configures, link) {
if (configure->serial == serial) {
found = true;
break;
}
}
if (!found) {
/*
TODO: What do we do here?
wl_resource_post_error(resource,
ZWLR_LAYER_SURFACE_V1_ERROR_INVALID_SURFACE_STATE,
"wrong configure serial: %" PRIu32, serial);
*/
return;
}
configure->configured = true;
// Then remove old configures from the list
wl_list_for_each_safe(configure, tmp_configure, &surface->configures, link) {
if (configure->serial == serial) {
break;
}
wlr_surface_invalidation_v1_configure_destroy(configure);
}
}
static void destroy_resource(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static const struct wp_surface_invalidation_v1_interface surface_inval_impl = {
.destroy = destroy_resource,
.ack = surface_inval_handle_ack,
};
static void surface_addon_handle_destroy(struct wlr_addon *addon) {
struct wlr_surface_invalidation_v1 *surface = wl_container_of(addon, surface, addon);
wl_resource_destroy(surface->resource);
}
static const struct wlr_addon_interface surface_addon_impl = {
.name = "surface_invalidation_v1",
.destroy = surface_addon_handle_destroy,
};
static const struct wp_surface_invalidation_manager_v1_interface manager_impl;
static void manager_handle_get_surface_invalidation(struct wl_client *client,
struct wl_resource *resource, uint32_t id, struct wl_resource *surface_resource) {
struct wlr_surface *wlr_surface = wlr_surface_from_resource(surface_resource);
struct wlr_surface_invalidation_v1 *surface = calloc(1, sizeof(*surface));
if (!surface) {
wl_client_post_no_memory(client);
return;
}
surface->resource = wl_resource_create(client,
&wp_surface_invalidation_v1_interface, 1, id);
if (!surface->resource) {
wl_client_post_no_memory(client);
free(surface);
return;
}
wl_list_init(&surface->configures);
wlr_addon_init(&surface->addon, &wlr_surface->addons, NULL, &surface_addon_impl);
wl_resource_set_implementation(surface->resource,
&surface_inval_impl, surface, surface_handle_resource_destroy);
}
static const struct wp_surface_invalidation_manager_v1_interface manager_impl = {
.destroy = destroy_resource,
.get_surface_invalidation = manager_handle_get_surface_invalidation,
};
static void manager_bind(struct wl_client *client, void *data,
uint32_t version, uint32_t id) {
struct wlr_surface_invalidation_manager_v1 *manager = data;
struct wl_resource *resource = wl_resource_create(client,
&wp_surface_invalidation_manager_v1_interface, version, id);
if (!resource) {
wl_client_post_no_memory(client);
return;
}
wl_resource_set_implementation(resource, &manager_impl, manager, NULL);
}
static void handle_display_destroy(struct wl_listener *listener, void *data) {
struct wlr_surface_invalidation_manager_v1 *manager =
wl_container_of(listener, manager, display_destroy);
wl_signal_emit_mutable(&manager->events.destroy, NULL);
wl_global_destroy(manager->global);
free(manager);
}
struct wlr_surface_invalidation_manager_v1 *wlr_surface_invalidation_manager_v1_create(
struct wl_display *display, uint32_t version) {
assert(version <= SURFACE_INVALIDATION_MANAGER_VERSION);
struct wlr_surface_invalidation_manager_v1 *manager = calloc(1, sizeof(*manager));
if (!manager) {
return NULL;
}
manager->global = wl_global_create(display, &wp_surface_invalidation_manager_v1_interface,
version, manager, manager_bind);
if (!manager->global) {
free(manager);
return NULL;
}
manager->display_destroy.notify = handle_display_destroy;
wl_display_add_destroy_listener(display, &manager->display_destroy);
wl_signal_init(&manager->events.destroy);
return manager;
}
void wlr_surface_invalidation_manager_v1_send_surface_invalidation(
struct wlr_surface *wlr_surface) {
struct wlr_addon *addon = wlr_addon_find(
&wlr_surface->addons, NULL, &surface_addon_impl);
if (!addon) {
return;
}
struct wlr_surface_invalidation_v1 *surface =
wl_container_of(addon, surface, addon);
struct wl_display *display =
wl_client_get_display(wl_resource_get_client(surface->resource));
struct wlr_surface_invalidation_v1_configure *configure = calloc(1, sizeof(*configure));
if (!configure) {
wl_client_post_no_memory(wl_resource_get_client(surface->resource));
return;
}
configure->serial = wl_display_next_serial(display);
wl_list_insert(&surface->configures, &configure->link);
wp_surface_invalidation_v1_send_invalidated(surface->resource, configure->serial);
}