Merge branch 'gpu-reset-recover' into 'master'

Automatic GPU reset recovery

See merge request wlroots/wlroots!3910
This commit is contained in:
Alexander Orzechowski 2024-10-09 09:31:41 +00:00
commit 863ed02d54
29 changed files with 1822 additions and 541 deletions

View file

@ -240,46 +240,44 @@ static struct wlr_backend *attempt_headless_backend(struct wl_event_loop *loop)
return backend;
}
static struct wlr_backend *attempt_drm_backend(struct wlr_backend *backend, struct wlr_session *session) {
static bool attempt_drm_backend(struct wlr_backend *backend, struct wlr_session *session) {
#if WLR_HAS_DRM_BACKEND
struct wlr_device *gpus[8];
ssize_t num_gpus = wlr_session_find_gpus(session, 8, gpus);
if (num_gpus < 0) {
wlr_log(WLR_ERROR, "Failed to find GPUs");
return NULL;
return false;
}
if (num_gpus == 0) {
wlr_log(WLR_ERROR, "Found 0 GPUs, cannot create backend");
return NULL;
return false;
} else {
wlr_log(WLR_INFO, "Found %zu GPUs", num_gpus);
}
struct wlr_backend *primary_drm = NULL;
bool ok = false;
for (size_t i = 0; i < (size_t)num_gpus; ++i) {
struct wlr_backend *drm = wlr_drm_backend_create(session, gpus[i], primary_drm);
struct wlr_backend *drm = wlr_drm_backend_create(session, gpus[i]);
if (!drm) {
wlr_log(WLR_ERROR, "Failed to create DRM backend");
continue;
}
if (!primary_drm) {
primary_drm = drm;
}
wlr_multi_backend_add(backend, drm);
ok = true;
}
if (!primary_drm) {
if (!ok) {
wlr_log(WLR_ERROR, "Could not successfully create backend on any GPU");
return NULL;
return false;
}
if (getenv("WLR_DRM_DEVICES") == NULL) {
drm_backend_monitor_create(backend, primary_drm, session);
drm_backend_monitor_create(backend, session);
}
return primary_drm;
return true;
#else
wlr_log(WLR_ERROR, "Cannot create DRM backend: disabled at compile-time");
return NULL;
@ -319,7 +317,7 @@ static bool attempt_backend_by_name(struct wl_event_loop *loop,
backend = attempt_libinput_backend(*session_ptr);
} else {
// attempt_drm_backend() adds the multi drm backends itself
return attempt_drm_backend(multi, *session_ptr) != NULL;
return attempt_drm_backend(multi, *session_ptr);
}
} else {
wlr_log(WLR_ERROR, "unrecognized backend '%s'", name);
@ -423,16 +421,11 @@ struct wlr_backend *wlr_backend_autocreate(struct wl_event_loop *loop,
goto error;
}
struct wlr_backend *primary_drm = attempt_drm_backend(multi, session);
if (primary_drm == NULL) {
if (!attempt_drm_backend(multi, session)) {
wlr_log(WLR_ERROR, "Failed to open any DRM device");
goto error;
}
if (!auto_backend_monitor_create(multi, primary_drm)) {
goto error;
}
success:
if (session_ptr != NULL) {
*session_ptr = session;

View file

@ -275,9 +275,9 @@ bool drm_atomic_connector_prepare(struct wlr_drm_connector_state *state, bool mo
}
int in_fence_fd = -1;
if (state->wait_timeline != NULL) {
in_fence_fd = wlr_drm_syncobj_timeline_export_sync_file(state->wait_timeline,
state->wait_point);
if (state->base->committed & WLR_OUTPUT_STATE_WAIT_TIMELINE) {
in_fence_fd = wlr_drm_syncobj_timeline_export_sync_file(state->base->wait_timeline,
state->base->wait_point);
if (in_fence_fd < 0) {
return false;
}

View file

@ -49,14 +49,9 @@ static void backend_destroy(struct wlr_backend *backend) {
wl_list_remove(&drm->session_destroy.link);
wl_list_remove(&drm->session_active.link);
wl_list_remove(&drm->parent_destroy.link);
wl_list_remove(&drm->dev_change.link);
wl_list_remove(&drm->dev_remove.link);
if (drm->parent) {
finish_drm_renderer(&drm->mgpu_renderer);
}
finish_drm_resources(drm);
struct wlr_drm_fb *fb, *fb_tmp;
@ -104,11 +99,6 @@ bool wlr_backend_is_drm(struct wlr_backend *b) {
return b->impl == &backend_impl;
}
struct wlr_backend *wlr_drm_backend_get_parent(struct wlr_backend *backend) {
struct wlr_drm_backend *drm = get_drm_backend_from_backend(backend);
return drm->parent ? &drm->parent->backend : NULL;
}
static void handle_session_active(struct wl_listener *listener, void *data) {
struct wlr_drm_backend *drm =
wl_container_of(listener, drm, session_active);
@ -159,16 +149,8 @@ static void handle_session_destroy(struct wl_listener *listener, void *data) {
backend_destroy(&drm->backend);
}
static void handle_parent_destroy(struct wl_listener *listener, void *data) {
struct wlr_drm_backend *drm =
wl_container_of(listener, drm, parent_destroy);
backend_destroy(&drm->backend);
}
struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session,
struct wlr_device *dev, struct wlr_backend *parent) {
struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session, struct wlr_device *dev) {
assert(session && dev);
assert(!parent || wlr_backend_is_drm(parent));
char *name = drmGetDeviceNameFromFd2(dev->fd);
if (name == NULL) {
@ -201,15 +183,6 @@ struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session,
drm->fd = dev->fd;
drm->name = name;
if (parent != NULL) {
drm->parent = get_drm_backend_from_backend(parent);
drm->parent_destroy.notify = handle_parent_destroy;
wl_signal_add(&parent->events.destroy, &drm->parent_destroy);
} else {
wl_list_init(&drm->parent_destroy.link);
}
drm->dev_change.notify = handle_dev_change;
wl_signal_add(&dev->events.change, &drm->dev_change);
@ -234,52 +207,17 @@ struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session,
goto error_event;
}
if (drm->parent) {
if (!init_drm_renderer(drm, &drm->mgpu_renderer)) {
wlr_log(WLR_ERROR, "Failed to initialize renderer");
goto error_resources;
}
// We'll perform a multi-GPU copy for all submitted buffers, we need
// to be able to texture from them
struct wlr_renderer *renderer = drm->mgpu_renderer.wlr_rend;
const struct wlr_drm_format_set *texture_formats =
wlr_renderer_get_texture_formats(renderer, WLR_BUFFER_CAP_DMABUF);
if (texture_formats == NULL) {
wlr_log(WLR_ERROR, "Failed to query renderer texture formats");
goto error_mgpu_renderer;
}
// Forbid implicit modifiers, because their meaning changes from one
// GPU to another.
for (size_t i = 0; i < texture_formats->len; i++) {
const struct wlr_drm_format *fmt = &texture_formats->formats[i];
for (size_t j = 0; j < fmt->len; j++) {
uint64_t mod = fmt->modifiers[j];
if (mod == DRM_FORMAT_MOD_INVALID) {
continue;
}
wlr_drm_format_set_add(&drm->mgpu_formats, fmt->format, mod);
}
}
}
drm->session_destroy.notify = handle_session_destroy;
wl_signal_add(&session->events.destroy, &drm->session_destroy);
return &drm->backend;
error_mgpu_renderer:
finish_drm_renderer(&drm->mgpu_renderer);
error_resources:
finish_drm_resources(drm);
error_event:
wl_list_remove(&drm->session_active.link);
wl_event_source_remove(drm->drm_event);
error_fd:
wl_list_remove(&drm->dev_remove.link);
wl_list_remove(&drm->dev_change.link);
wl_list_remove(&drm->parent_destroy.link);
wlr_session_close_file(drm->session, dev);
free(drm->name);
free(drm);

View file

@ -65,15 +65,6 @@ bool check_drm_features(struct wlr_drm_backend *drm) {
return false;
}
if (drm->parent) {
if (drmGetCap(drm->parent->fd, DRM_CAP_PRIME, &cap) ||
!(cap & DRM_PRIME_CAP_EXPORT)) {
wlr_log(WLR_ERROR,
"PRIME export not supported on primary GPU");
return false;
}
}
if (drmSetClientCap(drm->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1)) {
wlr_log(WLR_ERROR, "DRM universal planes unsupported");
return false;
@ -369,8 +360,6 @@ static void drm_plane_finish_surface(struct wlr_drm_plane *plane) {
drm_fb_clear(&plane->queued_fb);
drm_fb_clear(&plane->current_fb);
finish_drm_surface(&plane->mgpu_surf);
}
void finish_drm_resources(struct wlr_drm_backend *drm) {
@ -709,52 +698,8 @@ static bool drm_connector_state_update_primary_fb(struct wlr_drm_connector *conn
struct wlr_drm_plane *plane = crtc->primary;
struct wlr_buffer *source_buf = state->base->buffer;
struct wlr_drm_syncobj_timeline *wait_timeline = NULL;
uint64_t wait_point = 0;
if (state->base->committed & WLR_OUTPUT_STATE_WAIT_TIMELINE) {
wait_timeline = state->base->wait_timeline;
wait_point = state->base->wait_point;
}
assert(state->wait_timeline == NULL);
struct wlr_buffer *local_buf;
if (drm->parent) {
struct wlr_drm_format format = {0};
if (!drm_plane_pick_render_format(plane, &format, &drm->mgpu_renderer)) {
wlr_log(WLR_ERROR, "Failed to pick primary plane format");
return false;
}
// TODO: fallback to modifier-less buffer allocation
bool ok = init_drm_surface(&plane->mgpu_surf, &drm->mgpu_renderer,
source_buf->width, source_buf->height, &format);
wlr_drm_format_finish(&format);
if (!ok) {
return false;
}
local_buf = drm_surface_blit(&plane->mgpu_surf, source_buf,
wait_timeline, wait_point);
if (local_buf == NULL) {
return false;
}
if (plane->mgpu_surf.timeline != NULL) {
state->wait_timeline = wlr_drm_syncobj_timeline_ref(plane->mgpu_surf.timeline);
state->wait_point = plane->mgpu_surf.point;
}
} else {
local_buf = wlr_buffer_lock(source_buf);
if (wait_timeline != NULL) {
state->wait_timeline = wlr_drm_syncobj_timeline_ref(wait_timeline);
state->wait_point = wait_point;
}
}
bool ok = drm_fb_import(&state->primary_fb, drm, local_buf,
bool ok = drm_fb_import(&state->primary_fb, drm, source_buf,
&plane->formats);
wlr_buffer_unlock(local_buf);
if (!ok) {
wlr_drm_conn_log(conn, WLR_DEBUG,
"Failed to import buffer for scan-out");
@ -769,7 +714,7 @@ static bool drm_connector_set_pending_layer_fbs(struct wlr_drm_connector *conn,
struct wlr_drm_backend *drm = conn->backend;
struct wlr_drm_crtc *crtc = conn->crtc;
if (!crtc || drm->parent) {
if (!crtc) {
return false;
}
@ -828,12 +773,6 @@ static bool drm_connector_prepare(struct wlr_drm_connector_state *conn_state, bo
return false;
}
if (test_only && conn->backend->parent) {
// If we're running as a secondary GPU, we can't perform an atomic
// commit without blitting a buffer.
return true;
}
if (state->committed & WLR_OUTPUT_STATE_BUFFER) {
if (!drm_connector_state_update_primary_fb(conn, conn_state)) {
return false;
@ -898,13 +837,6 @@ static bool drm_connector_commit_state(struct wlr_drm_connector *conn,
goto out;
}
if (test_only && conn->backend->parent) {
// If we're running as a secondary GPU, we can't perform an atomic
// commit without blitting a buffer.
ok = true;
goto out;
}
if (!pending.active && conn->crtc == NULL) {
// Disabling an already-disabled connector
ok = true;
@ -1094,28 +1026,7 @@ static bool drm_connector_set_cursor(struct wlr_output *output,
return false;
}
struct wlr_buffer *local_buf;
if (drm->parent) {
struct wlr_drm_format format = {0};
if (!drm_plane_pick_render_format(plane, &format, &drm->mgpu_renderer)) {
wlr_log(WLR_ERROR, "Failed to pick cursor plane format");
return false;
}
bool ok = init_drm_surface(&plane->mgpu_surf, &drm->mgpu_renderer,
buffer->width, buffer->height, &format);
wlr_drm_format_finish(&format);
if (!ok) {
return false;
}
local_buf = drm_surface_blit(&plane->mgpu_surf, buffer, NULL, 0);
if (local_buf == NULL) {
return false;
}
} else {
local_buf = wlr_buffer_lock(buffer);
}
struct wlr_buffer *local_buf = wlr_buffer_lock(buffer);
bool ok = drm_fb_import(&conn->cursor_pending_fb, drm, local_buf,
&plane->formats);
@ -1209,9 +1120,6 @@ static const struct wlr_drm_format_set *drm_connector_get_cursor_formats(
if (!plane) {
return NULL;
}
if (conn->backend->parent) {
return &conn->backend->mgpu_formats;
}
return &plane->formats;
}
@ -1238,9 +1146,6 @@ static const struct wlr_drm_format_set *drm_connector_get_primary_formats(
if (!drm_connector_alloc_crtc(conn)) {
return NULL;
}
if (conn->backend->parent) {
return &conn->backend->mgpu_formats;
}
return &conn->crtc->primary->formats;
}
@ -1647,9 +1552,6 @@ static bool connect_drm_connector(struct wlr_drm_connector *wlr_conn,
}
output->timeline = drm->iface != &legacy_iface;
if (drm->parent) {
output->timeline = output->timeline && drm->mgpu_renderer.wlr_rend->features.timeline;
}
memset(wlr_conn->max_bpc_bounds, 0, sizeof(wlr_conn->max_bpc_bounds));
if (wlr_conn->props.max_bpc != 0) {
@ -2000,13 +1902,6 @@ bool commit_drm_device(struct wlr_drm_backend *drm,
modeset |= output_state->base.allow_reconfiguration;
}
if (test_only && drm->parent) {
// If we're running as a secondary GPU, we can't perform an atomic
// commit without blitting a buffer.
ok = true;
goto out;
}
uint32_t flags = 0;
if (!test_only) {
flags |= DRM_MODE_PAGE_FLIP_EVENT;
@ -2039,7 +1934,8 @@ static void handle_page_flip(int fd, unsigned seq,
conn->pending_page_flip = NULL;
}
uint32_t present_flags = WLR_OUTPUT_PRESENT_HW_CLOCK | WLR_OUTPUT_PRESENT_HW_COMPLETION;
uint32_t present_flags = WLR_OUTPUT_PRESENT_HW_CLOCK | WLR_OUTPUT_PRESENT_HW_COMPLETION |
WLR_OUTPUT_PRESENT_ZERO_COPY;
if (!page_flip->async) {
present_flags |= WLR_OUTPUT_PRESENT_VSYNC;
}
@ -2074,14 +1970,6 @@ static void handle_page_flip(int fd, unsigned seq,
drm_fb_move(&layer->current_fb, &layer->queued_fb);
}
/* Don't report ZERO_COPY in multi-gpu situations, because we had to copy
* data between the GPUs, even if we were using the direct scanout
* interface.
*/
if (!drm->parent) {
present_flags |= WLR_OUTPUT_PRESENT_ZERO_COPY;
}
struct wlr_output_event_present present_event = {
/* The DRM backend guarantees that the presentation event will be for
* the last submitted frame. */

View file

@ -7,7 +7,6 @@
static void drm_backend_monitor_destroy(struct wlr_drm_backend_monitor* monitor) {
wl_list_remove(&monitor->session_add_drm_card.link);
wl_list_remove(&monitor->session_destroy.link);
wl_list_remove(&monitor->primary_drm_destroy.link);
wl_list_remove(&monitor->multi_destroy.link);
free(monitor);
}
@ -25,8 +24,7 @@ static void handle_add_drm_card(struct wl_listener *listener, void *data) {
}
wlr_log(WLR_DEBUG, "Creating DRM backend for %s after hotplug", event->path);
struct wlr_backend *child_drm = wlr_drm_backend_create(backend_monitor->session,
dev, backend_monitor->primary_drm);
struct wlr_backend *child_drm = wlr_drm_backend_create(backend_monitor->session, dev);
if (!child_drm) {
wlr_log(WLR_ERROR, "Failed to create DRM backend after hotplug");
return;
@ -50,12 +48,6 @@ static void handle_session_destroy(struct wl_listener *listener, void *data) {
drm_backend_monitor_destroy(backend_monitor);
}
static void handle_primary_drm_destroy(struct wl_listener *listener, void *data) {
struct wlr_drm_backend_monitor *backend_monitor =
wl_container_of(listener, backend_monitor, primary_drm_destroy);
drm_backend_monitor_destroy(backend_monitor);
}
static void handle_multi_destroy(struct wl_listener *listener, void *data) {
struct wlr_drm_backend_monitor *backend_monitor =
wl_container_of(listener, backend_monitor, multi_destroy);
@ -63,8 +55,7 @@ static void handle_multi_destroy(struct wl_listener *listener, void *data) {
}
struct wlr_drm_backend_monitor *drm_backend_monitor_create(
struct wlr_backend *multi, struct wlr_backend *primary_drm,
struct wlr_session *session) {
struct wlr_backend *multi, struct wlr_session *session) {
struct wlr_drm_backend_monitor *monitor = calloc(1, sizeof(*monitor));
if (!monitor) {
wlr_log_errno(WLR_ERROR, "Allocation failed");
@ -72,7 +63,6 @@ struct wlr_drm_backend_monitor *drm_backend_monitor_create(
}
monitor->multi = multi;
monitor->primary_drm = primary_drm;
monitor->session = session;
monitor->session_add_drm_card.notify = handle_add_drm_card;
@ -81,9 +71,6 @@ struct wlr_drm_backend_monitor *drm_backend_monitor_create(
monitor->session_destroy.notify = handle_session_destroy;
wl_signal_add(&session->events.destroy, &monitor->session_destroy);
monitor->primary_drm_destroy.notify = handle_primary_drm_destroy;
wl_signal_add(&primary_drm->events.destroy, &monitor->primary_drm_destroy);
monitor->multi_destroy.notify = handle_multi_destroy;
wl_signal_add(&multi->events.destroy, &monitor->multi_destroy);

View file

@ -13,35 +13,6 @@
#include "render/pixel_format.h"
#include "render/wlr_renderer.h"
bool init_drm_renderer(struct wlr_drm_backend *drm,
struct wlr_drm_renderer *renderer) {
renderer->wlr_rend = renderer_autocreate_with_drm_fd(drm->fd);
if (!renderer->wlr_rend) {
wlr_log(WLR_ERROR, "Failed to create renderer");
return false;
}
uint32_t backend_caps = backend_get_buffer_caps(&drm->backend);
renderer->allocator = allocator_autocreate_with_drm_fd(backend_caps,
renderer->wlr_rend, drm->fd);
if (renderer->allocator == NULL) {
wlr_log(WLR_ERROR, "Failed to create allocator");
wlr_renderer_destroy(renderer->wlr_rend);
return false;
}
return true;
}
void finish_drm_renderer(struct wlr_drm_renderer *renderer) {
if (!renderer) {
return;
}
wlr_allocator_destroy(renderer->allocator);
wlr_renderer_destroy(renderer->wlr_rend);
}
void finish_drm_surface(struct wlr_drm_surface *surf) {
if (!surf || !surf->renderer) {
return;
@ -85,62 +56,6 @@ bool init_drm_surface(struct wlr_drm_surface *surf,
return true;
}
struct wlr_buffer *drm_surface_blit(struct wlr_drm_surface *surf,
struct wlr_buffer *buffer,
struct wlr_drm_syncobj_timeline *wait_timeline, uint64_t wait_point) {
struct wlr_renderer *renderer = surf->renderer->wlr_rend;
if (surf->swapchain->width != buffer->width ||
surf->swapchain->height != buffer->height) {
wlr_log(WLR_ERROR, "Surface size doesn't match buffer size");
return NULL;
}
struct wlr_texture *tex = wlr_texture_from_buffer(renderer, buffer);
if (tex == NULL) {
wlr_log(WLR_ERROR, "Failed to import source buffer into multi-GPU renderer");
return NULL;
}
struct wlr_buffer *dst = wlr_swapchain_acquire(surf->swapchain);
if (!dst) {
wlr_log(WLR_ERROR, "Failed to acquire multi-GPU swapchain buffer");
goto error_tex;
}
surf->point++;
const struct wlr_buffer_pass_options pass_options = {
.signal_timeline = surf->timeline,
.signal_point = surf->point,
};
struct wlr_render_pass *pass = wlr_renderer_begin_buffer_pass(renderer, dst, &pass_options);
if (pass == NULL) {
wlr_log(WLR_ERROR, "Failed to begin render pass with multi-GPU destination buffer");
goto error_dst;
}
wlr_render_pass_add_texture(pass, &(struct wlr_render_texture_options){
.texture = tex,
.blend_mode = WLR_RENDER_BLEND_MODE_NONE,
.wait_timeline = wait_timeline,
.wait_point = wait_point,
});
if (!wlr_render_pass_submit(pass)) {
wlr_log(WLR_ERROR, "Failed to submit multi-GPU render pass");
goto error_dst;
}
wlr_texture_destroy(tex);
return dst;
error_dst:
wlr_buffer_unlock(dst);
error_tex:
wlr_texture_destroy(tex);
return NULL;
}
bool drm_plane_pick_render_format(struct wlr_drm_plane *plane,
struct wlr_drm_format *fmt, struct wlr_drm_renderer *renderer) {
const struct wlr_drm_format_set *render_formats =

View file

@ -19,9 +19,6 @@ struct wlr_drm_plane {
uint32_t type;
uint32_t id;
/* Only initialized on multi-GPU setups */
struct wlr_drm_surface mgpu_surf;
/* Buffer submitted to the kernel, will be presented on next vblank */
struct wlr_drm_fb *queued_fb;
/* Buffer currently displayed on screen */
@ -80,7 +77,6 @@ struct wlr_drm_crtc {
struct wlr_drm_backend {
struct wlr_backend backend;
struct wlr_drm_backend *parent;
const struct wlr_drm_interface *iface;
bool addfb2_modifiers;
@ -99,7 +95,6 @@ struct wlr_drm_backend {
struct wl_listener session_destroy;
struct wl_listener session_active;
struct wl_listener parent_destroy;
struct wl_listener dev_change;
struct wl_listener dev_remove;
@ -108,15 +103,10 @@ struct wlr_drm_backend {
struct wl_list page_flips; // wlr_drm_page_flip.link
/* Only initialized on multi-GPU setups */
struct wlr_drm_renderer mgpu_renderer;
struct wlr_session *session;
uint64_t cursor_width, cursor_height;
struct wlr_drm_format_set mgpu_formats;
bool supports_tearing_page_flips;
};

View file

@ -8,17 +8,14 @@
*/
struct wlr_drm_backend_monitor {
struct wlr_backend *multi;
struct wlr_backend *primary_drm;
struct wlr_session *session;
struct wl_listener multi_destroy;
struct wl_listener primary_drm_destroy;
struct wl_listener session_destroy;
struct wl_listener session_add_drm_card;
};
struct wlr_drm_backend_monitor *drm_backend_monitor_create(
struct wlr_backend *multi, struct wlr_backend *primary_drm,
struct wlr_session *session);
struct wlr_backend *multi, struct wlr_session *session);
#endif

View file

@ -25,19 +25,11 @@ struct wlr_drm_surface {
uint64_t point;
};
bool init_drm_renderer(struct wlr_drm_backend *drm,
struct wlr_drm_renderer *renderer);
void finish_drm_renderer(struct wlr_drm_renderer *renderer);
bool init_drm_surface(struct wlr_drm_surface *surf,
struct wlr_drm_renderer *renderer, int width, int height,
const struct wlr_drm_format *drm_format);
void finish_drm_surface(struct wlr_drm_surface *surf);
struct wlr_buffer *drm_surface_blit(struct wlr_drm_surface *surf,
struct wlr_buffer *buffer,
struct wlr_drm_syncobj_timeline *wait_timeline, uint64_t wait_point);
bool drm_plane_pick_render_format(struct wlr_drm_plane *plane,
struct wlr_drm_format *fmt, struct wlr_drm_renderer *renderer);

View file

@ -32,21 +32,12 @@ struct wlr_drm_lease {
/**
* Creates a DRM backend using the specified GPU file descriptor (typically from
* a device node in /dev/dri).
*
* To slave this to another DRM backend, pass it as the parent (which _must_ be
* a DRM backend, other kinds of backends raise SIGABRT).
*/
struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session,
struct wlr_device *dev, struct wlr_backend *parent);
struct wlr_backend *wlr_drm_backend_create(struct wlr_session *session, struct wlr_device *dev);
bool wlr_backend_is_drm(struct wlr_backend *backend);
bool wlr_output_is_drm(struct wlr_output *output);
/**
* Get the parent DRM backend, if any.
*/
struct wlr_backend *wlr_drm_backend_get_parent(struct wlr_backend *backend);
/**
* Get the KMS connector object ID.
*/

View file

@ -56,6 +56,7 @@ struct wlr_buffer {
struct {
struct wl_signal destroy;
struct wl_signal release;
struct wl_signal prerelease;
} events;
struct wlr_addon_set addons;

View file

@ -239,6 +239,7 @@ struct wlr_surface {
// private state
struct wl_listener role_resource_destroy;
struct wl_listener current_buffer_release;
struct {
int32_t scale;
@ -250,6 +251,7 @@ struct wlr_surface {
bool unmap_commit;
bool opaque;
bool consumed;
bool handling_commit;
bool pending_rejected;
@ -536,6 +538,12 @@ void wlr_surface_synced_finish(struct wlr_surface_synced *synced);
void *wlr_surface_synced_get_state(struct wlr_surface_synced *synced,
const struct wlr_surface_state *state);
/*
* Consumes buffer and damage state of the buffer so that the compositor may
* drop references to any of these resources.
*/
void wlr_surface_consume(struct wlr_surface *surface);
/**
* Get a Pixman region from a wl_region resource.
*/

View file

@ -17,6 +17,8 @@
#include <wlr/render/drm_format_set.h>
struct wlr_surface;
struct wlr_renderer;
struct wlr_allocator;
struct wlr_dmabuf_v1_buffer {
struct wlr_buffer base;
@ -27,6 +29,8 @@ struct wlr_dmabuf_v1_buffer {
// private state
struct wl_listener release;
struct wlr_linux_dmabuf_v1 *linux_dmabuf_v1;
};
/**
@ -63,7 +67,13 @@ struct wlr_linux_dmabuf_v1 {
int main_device_fd; // to sanity check FDs sent by clients, -1 if unavailable
// used for multigpu
struct wlr_renderer *main_renderer;
struct wlr_allocator *main_allocator;
struct wl_listener display_destroy;
struct wl_listener main_renderer_destroy;
struct wl_listener main_allocator_destroy;
bool (*check_dmabuf_callback)(struct wlr_dmabuf_attributes *attribs, void *data);
void *check_dmabuf_callback_data;
@ -78,6 +88,23 @@ struct wlr_linux_dmabuf_v1 {
struct wlr_linux_dmabuf_v1 *wlr_linux_dmabuf_v1_create(struct wl_display *display,
uint32_t version, const struct wlr_linux_dmabuf_feedback_v1 *default_feedback);
/**
* Returns the associated dmabuf object from a generic buffer. Returnns
* NULL if the generic buffer is not a dmabuf.
*/
struct wlr_dmabuf_v1_buffer *wlr_dmabuf_v1_buffer_try_from_buffer(
struct wlr_buffer *buffer);
/**
* Sets the main blit device used for multigpu. With multigpu, dmabufs with
* implicit modifiers or just modifiers that aren't supported by other GPUs
* might need to be blitted into a staging buffer with correct modifiers. This
* will be done with this allocator (to allocate the staging buffer) and renderer
* to render into the staging buffer.
*/
void wlr_linux_dmabuf_v1_set_main_blit_device(struct wlr_linux_dmabuf_v1 *linux_dmabuf,
struct wlr_renderer *renderer, struct wlr_allocator *allocator);
/**
* Create the linux-dmabuf-v1 global.
*
@ -120,6 +147,9 @@ void wlr_linux_dmabuf_feedback_v1_finish(struct wlr_linux_dmabuf_feedback_v1 *fe
struct wlr_linux_dmabuf_feedback_v1_init_options {
// Main renderer used by the compositor
struct wlr_renderer *main_renderer;
// Optional allocator created for the primary GPU used by the default feedback.
// This is used for multi gpu for allocating staging buffers.
struct wlr_allocator *main_allocator;
// Output on which direct scan-out is possible on the primary plane, or NULL
struct wlr_output *scanout_primary_output;
// Output layer feedback event, or NULL

View file

@ -0,0 +1,97 @@
/*
* This an unstable interface of wlroots. No guarantees are made regarding the
* future consistency of this API.
*/
#ifndef WLR_USE_UNSTABLE
#error "Add -DWLR_USE_UNSTABLE to enable unstable wlroots features"
#endif
#ifndef WLR_TYPES_OUTPUT_MANAGER_H
#define WLR_TYPES_OUTPUT_MANAGER_H
#include <stdbool.h>
#include <wayland-server-core.h>
#include <wlr/render/drm_format_set.h>
struct wlr_renderer;
struct wlr_allocator;
struct wlr_backend;
struct wlr_output;
struct wlr_output_manager_backend {
struct wlr_output_manager *manager;
struct wlr_renderer *renderer;
struct wlr_allocator *allocator;
struct wlr_backend *backend;
struct wl_list link; // wlr_output_manager.backends
struct {
struct wl_signal recovery;
} events;
// private state
uint32_t locks;
struct wl_listener backend_destroy;
struct wl_listener renderer_lost;
};
struct wlr_output_manager {
struct wl_list backends; // wlr_output_manager_backend.link
struct wlr_output_manager_backend primary;
};
/**
* Initializes the output given output manager. wlr_output_manager_finish
* must be called to clean up this manager.
*/
bool wlr_output_manager_init(struct wlr_output_manager *manager,
struct wlr_backend *backend);
/**
* Finishes this output_manager and cleans up all its resources including any
* output manager backends.
*/
void wlr_output_manager_finish(struct wlr_output_manager *manager);
/**
* This will return a output_manager backend that will be reference counted.
* wlr_output_manager_unlock_backend is required to be called after the usage
* of this is finished.
*/
struct wlr_output_manager_backend *wlr_output_manager_lock_backend(
struct wlr_output_manager *manager, struct wlr_backend *wlr_backend);
/**
* wlr_output_manager_unlock_backend will unlock any backend returned by
* wlr_output_manager_lock_rendener. The allocator and backend allocated
* may be destroyed when the reference count reaches 0
*/
void wlr_output_manager_unlock_backend(struct wlr_output_manager_backend *backend);
/**
* wlr_output_manager_init_output will automatically initialize the given output.
* This is a helder function that will handle unlocking backends automatically
* upon output destroy
*/
bool wlr_output_manager_init_output(struct wlr_output_manager *manager,
struct wlr_output *output);
/**
* Initializes shm for the given wl_display given the constraints all devices
* on the manager have
*/
bool wlr_output_manager_init_wl_shm(struct wlr_output_manager *manager,
struct wl_display *wl_display);
/**
* Initializes the given wl_display given the constraints all devices
* on the manager have
*/
bool wlr_output_manager_init_wl_display(struct wlr_output_manager *manager,
struct wl_display *wl_display);
#endif

View file

@ -0,0 +1,120 @@
/*
* This an unstable interface of wlroots. No guarantees are made regarding the
* future consistency of this API.
*/
#ifndef WLR_USE_UNSTABLE
#error "Add -DWLR_USE_UNSTABLE to enable unstable wlroots features"
#endif
#ifndef WLR_TYPES_WLR_RASTER_H
#define WLR_TYPES_WLR_RASTER_H
#include <stdbool.h>
#include <stdlib.h>
#include <wayland-server-core.h>
struct wlr_buffer;
struct wlr_texture;
struct wlr_renderer;
struct wlr_drm_syncobj_timeline;
struct wlr_surface;
struct wlr_allocator;
struct wlr_raster_source {
struct wlr_texture *texture;
struct wlr_allocator *allocator; // may be NULL
struct wlr_raster *raster;
struct wl_list link;
struct wl_listener renderer_destroy;
struct wl_listener allocator_destroy;
};
struct wlr_raster {
// May be NULL
struct wlr_buffer *buffer;
struct wl_list sources;
uint32_t width, height;
bool opaque;
struct wlr_drm_syncobj_timeline *wait_timeline;
uint64_t wait_point;
struct {
struct wl_signal destroy;
struct wl_signal invalidated;
} events;
// private state
size_t n_locks;
struct wl_listener buffer_release;
};
struct wlr_raster_create_options {
struct wlr_drm_syncobj_timeline *wait_timeline;
uint64_t wait_point;
};
/**
* Creates a new wlr_raster being backed by the given buffer. The raster will
* not lock the given buffer meaning that once it's released, the raster will
* NULL its buffer reference and potentially become invalid.
* The creation function is referenced: once the creator is done with the raster,
* wlr_raster_unlock must be called as the reference count will start at 1
* from creation.
*
* Options can be NULL.
*/
struct wlr_raster *wlr_raster_create(struct wlr_buffer *buffer,
const struct wlr_raster_create_options *options);
/**
* Lock the raster for use. As long as the raster has at least one lock, it
* will not be destroyed.
*/
struct wlr_raster *wlr_raster_lock(struct wlr_raster *raster);
/**
* Unlock the raster. This must be called after wlr_raster_lock once the raster
* has been finished being used or after creation from wlr_raster_create.
*/
void wlr_raster_unlock(struct wlr_raster *raster);
/**
* Returns the texture allocated for this renderer. If there is none,
* a new texture will be created and attached to this wlr_raster. Users do not
* own the texture returned by this function and can only be used for read-only
* purposes.
*
* Will return NULL if the creation was unsuccessful.
*/
struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster,
struct wlr_renderer *renderer);
/**
* Returns the texture allocated for this renderer. If there is none,
* a new texture will be created and attached to this wlr_raster. Users do not
* own the texture returned by this function and can only be used for read-only
* purposes.
*
* An optional allocator can be provided which will be used to allocate staging
* buffers to blit between graphics devices if needed.
*
* Will return NULL if the creation was unsuccessful.
*/
struct wlr_texture *wlr_raster_obtain_texture_with_allocator(struct wlr_raster *raster,
struct wlr_renderer *renderer, struct wlr_allocator *allocator);
/**
* Creates a wlr_raster from a surface. This will automatically deduplicate
* rasters if multiple are consumed from the same surface so that redundant
* uploads are not performed. The raster returned will automatically be locked.
* Users are required to call wlr_raster_unlock() after invoking this function.
*/
struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface);
#endif

View file

@ -35,6 +35,7 @@ struct wlr_xdg_surface;
struct wlr_layer_surface_v1;
struct wlr_drag_icon;
struct wlr_surface;
struct wlr_raster;
struct wlr_scene_node;
struct wlr_scene_buffer;
@ -158,6 +159,7 @@ struct wlr_scene_buffer {
// May be NULL
struct wlr_buffer *buffer;
struct wlr_raster *raster;
struct {
struct wl_signal outputs_update; // struct wlr_scene_outputs_update_event
@ -188,15 +190,9 @@ struct wlr_scene_buffer {
// private state
uint64_t active_outputs;
struct wlr_texture *texture;
struct wlr_linux_dmabuf_feedback_v1_init_options prev_feedback_options;
bool own_buffer;
int buffer_width, buffer_height;
bool buffer_is_opaque;
struct wlr_drm_syncobj_timeline *wait_timeline;
uint64_t wait_point;
struct wl_listener buffer_release;
struct wl_listener renderer_destroy;
@ -427,7 +423,7 @@ struct wlr_scene_buffer *wlr_scene_buffer_create(struct wlr_scene_tree *parent,
struct wlr_buffer *buffer);
/**
* Sets the buffer's backing buffer.
* Sets the buffer's backing raster.
*
* If the buffer is NULL, the buffer node will not be displayed.
*/
@ -435,7 +431,7 @@ void wlr_scene_buffer_set_buffer(struct wlr_scene_buffer *scene_buffer,
struct wlr_buffer *buffer);
/**
* Sets the buffer's backing buffer with a custom damage region.
* Sets the buffer's backing raster with a custom damage region.
*
* The damage region is in buffer-local coordinates. If the region is NULL,
* the whole buffer node will be damaged.
@ -465,6 +461,15 @@ struct wlr_scene_buffer_set_buffer_options {
void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buffer,
struct wlr_buffer *buffer, const struct wlr_scene_buffer_set_buffer_options *options);
/*
* Sets the buffer's backing raster with a custom damage region.
*
* The damage region is in buffer-local coordinates. If the region is NULL,
* the whole buffer node will be damaged.
*/
void wlr_scene_buffer_set_raster_with_damage(struct wlr_scene_buffer *scene_buffer,
struct wlr_raster *raster, const pixman_region32_t *damage);
/**
* Sets the buffer's opaque region. This is an optimization hint used to
* determine if buffers which reside under this one need to be rendered or not.

View file

@ -0,0 +1,32 @@
/*
* This an unstable interface of wlroots. No guarantees are made regarding the
* future consistency of this API.
*/
#ifndef WLR_USE_UNSTABLE
#error "Add -DWLR_USE_UNSTABLE to enable unstable wlroots features"
#endif
#ifndef WLR_TYPES_WLR_SURFACE_INVALIDATION_V1_H
#define WLR_TYPES_WLR_SURFACE_INVALIDATION_V1_H
#include <wayland-server-core.h>
struct wlr_surface;
struct wlr_surface_invalidation_manager_v1 {
struct wl_global *global;
struct {
struct wl_signal destroy;
} events;
struct wl_listener display_destroy;
};
struct wlr_surface_invalidation_manager_v1 *wlr_surface_invalidation_manager_v1_create(
struct wl_display *display, uint32_t version);
void wlr_surface_invalidation_manager_v1_send_surface_invalidation(
struct wlr_surface *surface);
#endif

View file

@ -31,6 +31,7 @@ protocols = {
'linux-drm-syncobj-v1': wl_protocol_dir / 'staging/linux-drm-syncobj/linux-drm-syncobj-v1.xml',
'security-context-v1': wl_protocol_dir / 'staging/security-context/security-context-v1.xml',
'single-pixel-buffer-v1': wl_protocol_dir / 'staging/single-pixel-buffer/single-pixel-buffer-v1.xml',
'surface-invalidation-v1': wl_protocol_dir / 'staging/surface-invalidation/surface-invalidation-v1.xml',
'xdg-activation-v1': wl_protocol_dir / 'staging/xdg-activation/xdg-activation-v1.xml',
'xwayland-shell-v1': wl_protocol_dir / 'staging/xwayland-shell/xwayland-shell-v1.xml',
'tearing-control-v1': wl_protocol_dir / 'staging/tearing-control/tearing-control-v1.xml',

View file

@ -8,13 +8,13 @@
#include <wayland-server-core.h>
#include <wlr/backend.h>
#include <wlr/render/allocator.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/types/wlr_cursor.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_data_device.h>
#include <wlr/types/wlr_input_device.h>
#include <wlr/types/wlr_keyboard.h>
#include <wlr/types/wlr_output.h>
#include <wlr/types/wlr_output_manager.h>
#include <wlr/types/wlr_output_layout.h>
#include <wlr/types/wlr_pointer.h>
#include <wlr/types/wlr_scene.h>
@ -35,8 +35,7 @@ enum tinywl_cursor_mode {
struct tinywl_server {
struct wl_display *wl_display;
struct wlr_backend *backend;
struct wlr_renderer *renderer;
struct wlr_allocator *allocator;
struct wlr_output_manager output_manager;
struct wlr_scene *scene;
struct wlr_scene_output_layout *scene_layout;
@ -599,9 +598,10 @@ static void server_new_output(struct wl_listener *listener, void *data) {
wl_container_of(listener, server, new_output);
struct wlr_output *wlr_output = data;
/* Configures the output created by the backend to use our allocator
* and our renderer. Must be done once, before commiting the output */
wlr_output_init_render(wlr_output, server->allocator, server->renderer);
/* Configures the output created by the backend using the output manager
* to allocate a renderer and a allocator for us. Must be done once,
* before commiting the output */
wlr_output_manager_init_output(&server->output_manager, wlr_output);
/* The output may be disabled, switch it on. */
struct wlr_output_state state;
@ -904,28 +904,16 @@ int main(int argc, char *argv[]) {
return 1;
}
/* Autocreates a renderer, either Pixman, GLES2 or Vulkan for us. The user
* can also specify a renderer using the WLR_RENDERER env var.
* The renderer is responsible for defining the various pixel formats it
* supports for shared memory, this configures that for clients. */
server.renderer = wlr_renderer_autocreate(server.backend);
if (server.renderer == NULL) {
wlr_log(WLR_ERROR, "failed to create wlr_renderer");
/* This is a helper that will automatically create renderers and allocators
* for each output. This serves as the bridge between the output and the
* backend for rendering.
*/
if (!wlr_output_manager_init(&server.output_manager, server.backend)) {
wlr_log(WLR_ERROR, "failed to create wlr_output_manager");
return 1;
}
wlr_renderer_init_wl_display(server.renderer, server.wl_display);
/* Autocreates an allocator for us.
* The allocator is the bridge between the renderer and the backend. It
* handles the buffer creation, allowing wlroots to render onto the
* screen */
server.allocator = wlr_allocator_autocreate(server.backend,
server.renderer);
if (server.allocator == NULL) {
wlr_log(WLR_ERROR, "failed to create wlr_allocator");
return 1;
}
wlr_output_manager_init_wl_display(&server.output_manager, server.wl_display);
/* This creates some hands-off wlroots interfaces. The compositor is
* necessary for clients to allocate surfaces, the subcompositor allows to
@ -934,7 +922,7 @@ int main(int argc, char *argv[]) {
* to dig your fingers in and play with their behavior if you want. Note that
* the clients cannot set the selection directly without compositor approval,
* see the handling of the request_set_selection event below.*/
wlr_compositor_create(server.wl_display, 5, server.renderer);
wlr_compositor_create(server.wl_display, 5, NULL);
wlr_subcompositor_create(server.wl_display);
wlr_data_device_manager_create(server.wl_display);
@ -1057,10 +1045,8 @@ int main(int argc, char *argv[]) {
wl_display_destroy_clients(server.wl_display);
wlr_scene_node_destroy(&server.scene->tree.node);
wlr_xcursor_manager_destroy(server.cursor_mgr);
wlr_cursor_destroy(server.cursor);
wlr_allocator_destroy(server.allocator);
wlr_renderer_destroy(server.renderer);
wlr_backend_destroy(server.backend);
wlr_output_manager_finish(&server.output_manager);
wl_display_destroy(server.wl_display);
return 0;
}

View file

@ -19,6 +19,7 @@ void wlr_buffer_init(struct wlr_buffer *buffer,
};
wl_signal_init(&buffer->events.destroy);
wl_signal_init(&buffer->events.release);
wl_signal_init(&buffer->events.prerelease);
wlr_addon_set_init(&buffer->addons);
}
@ -58,6 +59,10 @@ void wlr_buffer_unlock(struct wlr_buffer *buffer) {
assert(buffer->n_locks > 0);
buffer->n_locks--;
if (buffer->n_locks == 0) {
wl_signal_emit_mutable(&buffer->events.prerelease, NULL);
}
if (buffer->n_locks == 0) {
wl_signal_emit_mutable(&buffer->events.release, NULL);
}

View file

@ -60,6 +60,7 @@ wlr_files += files(
'wlr_output_layer.c',
'wlr_output_layout.c',
'wlr_output_management_v1.c',
'wlr_output_manager.c',
'wlr_output_power_management_v1.c',
'wlr_output_swapchain_manager.c',
'wlr_pointer_constraints_v1.c',
@ -68,6 +69,7 @@ wlr_files += files(
'wlr_presentation_time.c',
'wlr_primary_selection_v1.c',
'wlr_primary_selection.c',
'wlr_raster.c',
'wlr_region.c',
'wlr_relative_pointer_v1.c',
'wlr_screencopy_v1.c',
@ -77,6 +79,7 @@ wlr_files += files(
'wlr_shm.c',
'wlr_single_pixel_buffer_v1.c',
'wlr_subcompositor.c',
'wlr_surface_invalidation_v1.c',
'wlr_fractional_scale_v1.c',
'wlr_switch.c',
'wlr_tablet_pad.c',

View file

@ -6,6 +6,7 @@
#include <wlr/types/wlr_fractional_scale_v1.h>
#include <wlr/types/wlr_linux_drm_syncobj_v1.h>
#include <wlr/types/wlr_presentation_time.h>
#include <wlr/types/wlr_raster.h>
#include <wlr/util/transform.h>
#include "types/wlr_scene.h"
@ -76,28 +77,6 @@ static void scene_surface_handle_surface_destroy(
wlr_scene_node_destroy(&surface->buffer->node);
}
// This is used for wlr_scene where it unconditionally locks buffers preventing
// reuse of the existing texture for shm clients. With the usage pattern of
// wlr_scene surface handling, we can mark its locked buffer as safe
// for mutation.
static void client_buffer_mark_next_can_damage(struct wlr_client_buffer *buffer) {
buffer->n_ignore_locks++;
}
static void scene_buffer_unmark_client_buffer(struct wlr_scene_buffer *scene_buffer) {
if (!scene_buffer->buffer) {
return;
}
struct wlr_client_buffer *buffer = wlr_client_buffer_get(scene_buffer->buffer);
if (!buffer) {
return;
}
assert(buffer->n_ignore_locks > 0);
buffer->n_ignore_locks--;
}
static int min(int a, int b) {
return a < b ? a : b;
}
@ -160,29 +139,13 @@ static void surface_reconfigure(struct wlr_scene_surface *scene_surface) {
wlr_scene_buffer_set_transform(scene_buffer, state->transform);
wlr_scene_buffer_set_opacity(scene_buffer, opacity);
scene_buffer_unmark_client_buffer(scene_buffer);
if (surface->buffer) {
client_buffer_mark_next_can_damage(surface->buffer);
struct wlr_raster *raster = wlr_raster_from_surface(surface);
if (raster) {
wlr_scene_buffer_set_raster_with_damage(scene_buffer,
raster, &surface->buffer_damage);
struct wlr_linux_drm_syncobj_surface_v1_state *syncobj_surface_state =
wlr_linux_drm_syncobj_v1_get_surface_state(surface);
struct wlr_drm_syncobj_timeline *wait_timeline = NULL;
uint64_t wait_point = 0;
if (syncobj_surface_state != NULL) {
wait_timeline = syncobj_surface_state->acquire_timeline;
wait_point = syncobj_surface_state->acquire_point;
}
struct wlr_scene_buffer_set_buffer_options options = {
.damage = &surface->buffer_damage,
.wait_timeline = wait_timeline,
.wait_point = wait_point,
};
wlr_scene_buffer_set_buffer_with_options(scene_buffer,
&surface->buffer->base, &options);
if (syncobj_surface_state != NULL &&
(surface->current.committed & WLR_SURFACE_STATE_BUFFER)) {
wlr_linux_drm_syncobj_v1_state_signal_release_with_buffer(syncobj_surface_state,
@ -192,6 +155,7 @@ static void surface_reconfigure(struct wlr_scene_surface *scene_surface) {
wlr_scene_buffer_set_buffer(scene_buffer, NULL);
}
wlr_raster_unlock(raster);
pixman_region32_fini(&opaque);
}
@ -231,8 +195,6 @@ static bool scene_buffer_point_accepts_input(struct wlr_scene_buffer *scene_buff
static void surface_addon_destroy(struct wlr_addon *addon) {
struct wlr_scene_surface *surface = wl_container_of(addon, surface, addon);
scene_buffer_unmark_client_buffer(surface->buffer);
wlr_addon_finish(&surface->addon);
wl_list_remove(&surface->outputs_update.link);

View file

@ -10,6 +10,7 @@
#include <wlr/types/wlr_gamma_control_v1.h>
#include <wlr/types/wlr_linux_dmabuf_v1.h>
#include <wlr/types/wlr_presentation_time.h>
#include <wlr/types/wlr_raster.h>
#include <wlr/types/wlr_scene.h>
#include <wlr/util/log.h>
#include <wlr/util/region.h>
@ -91,8 +92,6 @@ struct highlight_region {
static void scene_buffer_set_buffer(struct wlr_scene_buffer *scene_buffer,
struct wlr_buffer *buffer);
static void scene_buffer_set_texture(struct wlr_scene_buffer *scene_buffer,
struct wlr_texture *texture);
void wlr_scene_node_destroy(struct wlr_scene_node *node) {
if (node == NULL) {
@ -123,9 +122,8 @@ void wlr_scene_node_destroy(struct wlr_scene_node *node) {
}
scene_buffer_set_buffer(scene_buffer, NULL);
scene_buffer_set_texture(scene_buffer, NULL);
wlr_raster_unlock(scene_buffer->raster);
pixman_region32_fini(&scene_buffer->opaque_region);
wlr_drm_syncobj_timeline_unref(scene_buffer->wait_timeline);
} else if (node->type == WLR_SCENE_NODE_TREE) {
struct wlr_scene_tree *scene_tree = wlr_scene_tree_from_node(node);
@ -259,7 +257,7 @@ static void scene_node_opaque_region(struct wlr_scene_node *node, int x, int y,
} else if (node->type == WLR_SCENE_NODE_BUFFER) {
struct wlr_scene_buffer *scene_buffer = wlr_scene_buffer_from_node(node);
if (!scene_buffer->buffer) {
if (!scene_buffer->raster) {
return;
}
@ -267,7 +265,7 @@ static void scene_node_opaque_region(struct wlr_scene_node *node, int x, int y,
return;
}
if (!scene_buffer->buffer_is_opaque) {
if (!scene_buffer->raster->opaque) {
pixman_region32_copy(opaque, &scene_buffer->opaque_region);
pixman_region32_intersect_rect(opaque, opaque, 0, 0, width, height);
pixman_region32_translate(opaque, x, y);
@ -753,57 +751,17 @@ static void scene_buffer_set_buffer(struct wlr_scene_buffer *scene_buffer,
if (scene_buffer->own_buffer) {
wlr_buffer_unlock(scene_buffer->buffer);
}
scene_buffer->buffer = NULL;
scene_buffer->buffer = buffer;
scene_buffer->own_buffer = false;
scene_buffer->buffer_width = scene_buffer->buffer_height = 0;
scene_buffer->buffer_is_opaque = false;
if (!buffer) {
return;
}
scene_buffer->own_buffer = true;
scene_buffer->buffer = wlr_buffer_lock(buffer);
scene_buffer->buffer_width = buffer->width;
scene_buffer->buffer_height = buffer->height;
scene_buffer->buffer_is_opaque = buffer_is_opaque(buffer);
scene_buffer->buffer_release.notify = scene_buffer_handle_buffer_release;
wl_signal_add(&buffer->events.release, &scene_buffer->buffer_release);
}
static void scene_buffer_handle_renderer_destroy(struct wl_listener *listener,
void *data) {
struct wlr_scene_buffer *scene_buffer = wl_container_of(listener, scene_buffer, renderer_destroy);
scene_buffer_set_texture(scene_buffer, NULL);
}
static void scene_buffer_set_texture(struct wlr_scene_buffer *scene_buffer,
struct wlr_texture *texture) {
wl_list_remove(&scene_buffer->renderer_destroy.link);
wlr_texture_destroy(scene_buffer->texture);
scene_buffer->texture = texture;
if (texture != NULL) {
scene_buffer->renderer_destroy.notify = scene_buffer_handle_renderer_destroy;
wl_signal_add(&texture->renderer->events.destroy, &scene_buffer->renderer_destroy);
} else {
wl_list_init(&scene_buffer->renderer_destroy.link);
}
}
static void scene_buffer_set_wait_timeline(struct wlr_scene_buffer *scene_buffer,
struct wlr_drm_syncobj_timeline *timeline, uint64_t point) {
wlr_drm_syncobj_timeline_unref(scene_buffer->wait_timeline);
if (timeline != NULL) {
scene_buffer->wait_timeline = wlr_drm_syncobj_timeline_ref(timeline);
scene_buffer->wait_point = point;
} else {
scene_buffer->wait_timeline = NULL;
scene_buffer->wait_point = 0;
}
}
struct wlr_scene_buffer *wlr_scene_buffer_create(struct wlr_scene_tree *parent,
struct wlr_buffer *buffer) {
struct wlr_scene_buffer *scene_buffer = calloc(1, sizeof(*scene_buffer));
@ -822,8 +780,14 @@ struct wlr_scene_buffer *wlr_scene_buffer_create(struct wlr_scene_tree *parent,
wl_list_init(&scene_buffer->buffer_release.link);
wl_list_init(&scene_buffer->renderer_destroy.link);
scene_buffer->opacity = 1;
scene_buffer_set_buffer(scene_buffer, buffer);
if (buffer) {
scene_buffer->raster = wlr_raster_create(buffer, NULL);
wlr_buffer_lock(buffer);
scene_buffer->own_buffer = true;
}
scene_node_update(&scene_buffer->node, NULL);
return scene_buffer;
@ -836,32 +800,47 @@ void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buf
options = &default_options;
}
// specifying a region for a NULL buffer doesn't make sense. We need to know
// about the buffer to scale the buffer local coordinates down to scene
struct wlr_raster *raster = NULL;
if (buffer) {
raster = wlr_raster_create(buffer, &(struct wlr_raster_create_options) {
.wait_timeline = options->wait_timeline,
.wait_point = options->wait_point,
});
}
wlr_scene_buffer_set_raster_with_damage(scene_buffer, raster, options->damage);
if (raster) {
wlr_buffer_lock(buffer);
scene_buffer->own_buffer = true;
}
wlr_raster_unlock(raster);
}
void wlr_scene_buffer_set_raster_with_damage(struct wlr_scene_buffer *scene_buffer,
struct wlr_raster *raster, const pixman_region32_t *damage) {
// specifying a region for a NULL raster doesn't make sense. We need to know
// about the raster to scale the raster local coordinates down to scene
// coordinates.
assert(buffer || !options->damage);
assert(raster || !damage);
bool mapped = buffer != NULL;
bool prev_mapped = scene_buffer->buffer != NULL || scene_buffer->texture != NULL;
if (!mapped && !prev_mapped) {
// unmapping already unmapped buffer - noop
if (raster == scene_buffer->raster) {
return;
}
// if this node used to not be mapped or its previous displayed
// buffer region will be different from what the new buffer would
// produce we need to update the node.
bool update = mapped != prev_mapped;
if (buffer != NULL && scene_buffer->dst_width == 0 && scene_buffer->dst_height == 0) {
update = update || scene_buffer->buffer_width != buffer->width ||
scene_buffer->buffer_height != buffer->height;
bool update = !raster != !scene_buffer->raster;
if (raster != NULL && scene_buffer->dst_width == 0 && scene_buffer->dst_height == 0) {
update = update || scene_buffer->raster->width != raster->width ||
scene_buffer->raster->height != raster->height;
}
scene_buffer_set_buffer(scene_buffer, buffer);
scene_buffer_set_texture(scene_buffer, NULL);
scene_buffer_set_wait_timeline(scene_buffer,
options->wait_timeline, options->wait_point);
wlr_raster_unlock(scene_buffer->raster);
scene_buffer_set_buffer(scene_buffer, raster ? raster->buffer : NULL);
scene_buffer->raster = raster ? wlr_raster_lock(raster) : NULL;
if (update) {
scene_node_update(&scene_buffer->node, NULL);
@ -876,8 +855,7 @@ void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buf
}
pixman_region32_t fallback_damage;
pixman_region32_init_rect(&fallback_damage, 0, 0, buffer->width, buffer->height);
const pixman_region32_t *damage = options->damage;
pixman_region32_init_rect(&fallback_damage, 0, 0, raster->width, raster->height);
if (!damage) {
damage = &fallback_damage;
}
@ -886,26 +864,26 @@ void wlr_scene_buffer_set_buffer_with_options(struct wlr_scene_buffer *scene_buf
if (wlr_fbox_empty(&box)) {
box.x = 0;
box.y = 0;
box.width = buffer->width;
box.height = buffer->height;
box.width = raster->width;
box.height = raster->height;
}
wlr_fbox_transform(&box, &box, scene_buffer->transform,
buffer->width, buffer->height);
raster->width, raster->height);
float scale_x, scale_y;
if (scene_buffer->dst_width || scene_buffer->dst_height) {
scale_x = scene_buffer->dst_width / box.width;
scale_y = scene_buffer->dst_height / box.height;
} else {
scale_x = buffer->width / box.width;
scale_y = buffer->height / box.height;
scale_x = raster->width / box.width;
scale_y = raster->height / box.height;
}
pixman_region32_t trans_damage;
pixman_region32_init(&trans_damage);
wlr_region_transform(&trans_damage, damage,
scene_buffer->transform, buffer->width, buffer->height);
scene_buffer->transform, raster->width, raster->height);
pixman_region32_intersect_rect(&trans_damage, &trans_damage,
box.x, box.y, box.width, box.height);
pixman_region32_translate(&trans_damage, -box.x, -box.y);
@ -1055,28 +1033,6 @@ void wlr_scene_buffer_set_filter_mode(struct wlr_scene_buffer *scene_buffer,
scene_node_update(&scene_buffer->node, NULL);
}
static struct wlr_texture *scene_buffer_get_texture(
struct wlr_scene_buffer *scene_buffer, struct wlr_renderer *renderer) {
if (scene_buffer->buffer == NULL || scene_buffer->texture != NULL) {
return scene_buffer->texture;
}
struct wlr_client_buffer *client_buffer =
wlr_client_buffer_get(scene_buffer->buffer);
if (client_buffer != NULL) {
return client_buffer->texture;
}
struct wlr_texture *texture =
wlr_texture_from_buffer(renderer, scene_buffer->buffer);
if (texture != NULL && scene_buffer->own_buffer) {
scene_buffer->own_buffer = false;
wlr_buffer_unlock(scene_buffer->buffer);
}
scene_buffer_set_texture(scene_buffer, texture);
return texture;
}
static void scene_node_get_size(struct wlr_scene_node *node,
int *width, int *height) {
*width = 0;
@ -1095,9 +1051,9 @@ static void scene_node_get_size(struct wlr_scene_node *node,
if (scene_buffer->dst_width > 0 && scene_buffer->dst_height > 0) {
*width = scene_buffer->dst_width;
*height = scene_buffer->dst_height;
} else {
*width = scene_buffer->buffer_width;
*height = scene_buffer->buffer_height;
} else if (scene_buffer->raster) {
*width = scene_buffer->raster->width;
*height = scene_buffer->raster->height;
wlr_output_transform_coords(scene_buffer->transform, width, height);
}
break;
@ -1380,8 +1336,12 @@ static void scene_entry_render(struct render_list_entry *entry, const struct ren
case WLR_SCENE_NODE_BUFFER:;
struct wlr_scene_buffer *scene_buffer = wlr_scene_buffer_from_node(node);
struct wlr_texture *texture = scene_buffer_get_texture(scene_buffer,
data->output->output->renderer);
struct wlr_texture *texture = NULL;
if (scene_buffer->raster) {
texture = wlr_raster_obtain_texture(scene_buffer->raster,
data->output->output->renderer);
}
if (texture == NULL) {
scene_output_damage(data->output, &render_region);
break;
@ -1402,8 +1362,8 @@ static void scene_entry_render(struct render_list_entry *entry, const struct ren
.blend_mode = !data->output->scene->calculate_visibility ||
pixman_region32_not_empty(&opaque) ?
WLR_RENDER_BLEND_MODE_PREMULTIPLIED : WLR_RENDER_BLEND_MODE_NONE,
.wait_timeline = scene_buffer->wait_timeline,
.wait_point = scene_buffer->wait_point,
.wait_timeline = scene_buffer->raster->wait_timeline,
.wait_point = scene_buffer->raster->wait_point,
});
struct wlr_scene_output_sample_event sample_event = {
@ -1702,7 +1662,7 @@ static bool scene_node_invisible(struct wlr_scene_node *node) {
} else if (node->type == WLR_SCENE_NODE_BUFFER) {
struct wlr_scene_buffer *buffer = wlr_scene_buffer_from_node(node);
return buffer->buffer == NULL && buffer->texture == NULL;
return buffer->raster == NULL;
}
return false;
@ -1825,8 +1785,8 @@ static bool scene_entry_try_direct_scanout(struct render_list_entry *entry,
return false;
}
int default_width = buffer->buffer->width;
int default_height = buffer->buffer->height;
int default_width = buffer->raster->width;
int default_height = buffer->raster->height;
wlr_output_transform_coords(buffer->transform, &default_width, &default_height);
struct wlr_fbox default_box = {
.width = default_width,
@ -1866,8 +1826,9 @@ static bool scene_entry_try_direct_scanout(struct render_list_entry *entry,
}
wlr_output_state_set_buffer(&pending, buffer->buffer);
if (buffer->wait_timeline != NULL) {
wlr_output_state_set_wait_timeline(&pending, buffer->wait_timeline, buffer->wait_point);
if (buffer->raster->wait_timeline != NULL) {
wlr_output_state_set_wait_timeline(&pending,
buffer->raster->wait_timeline, buffer->raster->wait_point);
}
if (!wlr_output_test_state(scene_output->output, &pending)) {
@ -2125,6 +2086,20 @@ bool wlr_scene_output_build_state(struct wlr_scene_output *scene_output,
return false;
}
// upload all the textures that will be used within this pass before we start
// rendering. We need to do this because some of those textures might be
// created as part of a multirender blit.
for (int i = list_len - 1; i >= 0; i--) {
struct render_list_entry *entry = &list_data[i];
if (entry->node->type != WLR_SCENE_NODE_BUFFER) {
continue;
}
struct wlr_scene_buffer *buffer = wlr_scene_buffer_from_node(entry->node);
wlr_raster_obtain_texture_with_allocator(buffer->raster,
output->renderer, output->allocator);
}
render_data.render_pass = render_pass;
pixman_region32_init(&render_data.damage);

View file

@ -405,8 +405,12 @@ static void surface_state_move(struct wlr_surface_state *state,
}
static void surface_apply_damage(struct wlr_surface *surface) {
wl_list_remove(&surface->current_buffer_release.link);
if (surface->current.buffer == NULL) {
// NULL commit
wl_list_init(&surface->current_buffer_release.link);
if (surface->buffer != NULL) {
wlr_buffer_unlock(&surface->buffer->base);
}
@ -415,13 +419,19 @@ static void surface_apply_damage(struct wlr_surface *surface) {
return;
}
// lock the buffer during the commit so that everything watching the surface
// can have a chance to take a look at the buffer.
wlr_buffer_lock(surface->current.buffer);
wl_signal_add(&surface->current.buffer->events.release,
&surface->current_buffer_release);
surface->opaque = buffer_is_opaque(surface->current.buffer);
if (surface->buffer != NULL) {
if (wlr_client_buffer_apply_damage(surface->buffer,
surface->current.buffer, &surface->buffer_damage)) {
wlr_buffer_unlock(surface->current.buffer);
surface->current.buffer = NULL;
wlr_surface_consume(surface);
return;
}
}
@ -432,6 +442,7 @@ static void surface_apply_damage(struct wlr_surface *surface) {
struct wlr_client_buffer *buffer = wlr_client_buffer_create(
surface->current.buffer, surface->compositor->renderer);
wlr_surface_consume(surface);
if (buffer == NULL) {
wlr_log(WLR_ERROR, "Failed to upload buffer");
@ -508,10 +519,26 @@ error:
wl_resource_post_no_memory(surface->resource);
}
static void surface_clean_state(struct wlr_surface *surface) {
assert(surface->consumed);
wl_list_remove(&surface->current_buffer_release.link);
wl_list_init(&surface->current_buffer_release.link);
pixman_region32_clear(&surface->buffer_damage);
surface->current.buffer = NULL;
surface->consumed = false;
}
static void surface_commit_state(struct wlr_surface *surface,
struct wlr_surface_state *next) {
assert(next->cached_state_locks == 0);
// if the surface was consumed that means we don't own the current buffer
// anymore.
if (surface->consumed) {
surface_clean_state(surface);
}
bool invalid_buffer = next->committed & WLR_SURFACE_STATE_BUFFER;
if (invalid_buffer && next->buffer == NULL) {
@ -562,8 +589,8 @@ static void surface_commit_state(struct wlr_surface *surface,
// Release the buffer after emitting the commit event, so that listeners can
// access it. Don't leave the buffer locked so that wl_shm buffers can be
// released immediately on commit when they are uploaded to the GPU.
surface->consumed = true;
wlr_buffer_unlock(surface->current.buffer);
surface->current.buffer = NULL;
}
static void surface_handle_commit(struct wl_client *client,
@ -718,6 +745,10 @@ static void surface_destroy_role_object(struct wlr_surface *surface);
static void surface_handle_resource_destroy(struct wl_resource *resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if (surface->consumed) {
surface_clean_state(surface);
}
struct wlr_surface_output *surface_output, *surface_output_tmp;
wl_list_for_each_safe(surface_output, surface_output_tmp,
&surface->current_outputs, link) {
@ -736,6 +767,7 @@ static void surface_handle_resource_destroy(struct wl_resource *resource) {
surface_state_destroy_cached(cached, surface);
}
wl_list_remove(&surface->current_buffer_release.link);
wl_list_remove(&surface->role_resource_destroy.link);
wl_list_remove(&surface->pending_buffer_resource_destroy.link);
@ -751,6 +783,21 @@ static void surface_handle_resource_destroy(struct wl_resource *resource) {
free(surface);
}
static void surface_handle_current_buffer_release(struct wl_listener *listener,
void *data) {
struct wlr_surface *surface = wl_container_of(listener, surface, current_buffer_release);
surface_clean_state(surface);
}
void wlr_surface_consume(struct wlr_surface *surface) {
if (surface->consumed || !surface->current.buffer) {
return;
}
surface->consumed = true;
wlr_buffer_unlock(surface->current.buffer);
}
static struct wlr_surface *surface_create(struct wl_client *client,
uint32_t version, uint32_t id, struct wlr_compositor *compositor) {
struct wlr_surface *surface = calloc(1, sizeof(*surface));
@ -795,6 +842,9 @@ static struct wlr_surface *surface_create(struct wl_client *client,
surface->pending_buffer_resource_destroy.notify = pending_buffer_resource_handle_destroy;
wl_list_init(&surface->pending_buffer_resource_destroy.link);
surface->current_buffer_release.notify = surface_handle_current_buffer_release;
wl_list_init(&surface->current_buffer_release.link);
return surface;
}

View file

@ -14,6 +14,7 @@
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_tablet_tool.h>
#include <wlr/types/wlr_touch.h>
#include <wlr/types/wlr_raster.h>
#include <wlr/types/wlr_xcursor_manager.h>
#include <wlr/util/box.h>
#include <wlr/util/log.h>
@ -539,7 +540,13 @@ static void cursor_output_cursor_update(struct wlr_cursor_output_cursor *output_
} else if (cur->state->surface != NULL) {
struct wlr_surface *surface = cur->state->surface;
struct wlr_texture *texture = wlr_surface_get_texture(surface);
struct wlr_texture *texture = NULL;
struct wlr_raster *raster = wlr_raster_from_surface(surface);
if (raster) {
texture = wlr_raster_obtain_texture(raster, output_cursor->output_cursor->output->renderer);
}
wlr_raster_unlock(raster);
int32_t hotspot_x = cur->state->surface_hotspot.x;
int32_t hotspot_y = cur->state->surface_hotspot.y;

View file

@ -7,6 +7,7 @@
#include <wlr/backend.h>
#include <wlr/config.h>
#include <wlr/interfaces/wlr_buffer.h>
#include <wlr/render/allocator.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_linux_dmabuf_v1.h>
@ -90,16 +91,19 @@ struct wlr_dmabuf_v1_buffer *wlr_dmabuf_v1_buffer_try_from_buffer_resource(
static const struct wlr_buffer_impl buffer_impl;
static struct wlr_dmabuf_v1_buffer *dmabuf_v1_buffer_from_buffer(
struct wlr_dmabuf_v1_buffer *wlr_dmabuf_v1_buffer_try_from_buffer(
struct wlr_buffer *wlr_buffer) {
assert(wlr_buffer->impl == &buffer_impl);
if (wlr_buffer->impl != &buffer_impl) {
return NULL;
}
struct wlr_dmabuf_v1_buffer *buffer = wl_container_of(wlr_buffer, buffer, base);
return buffer;
}
static void buffer_destroy(struct wlr_buffer *wlr_buffer) {
struct wlr_dmabuf_v1_buffer *buffer =
dmabuf_v1_buffer_from_buffer(wlr_buffer);
wlr_dmabuf_v1_buffer_try_from_buffer(wlr_buffer);
if (buffer->resource != NULL) {
wl_resource_set_user_data(buffer->resource, NULL);
}
@ -111,7 +115,7 @@ static void buffer_destroy(struct wlr_buffer *wlr_buffer) {
static bool buffer_get_dmabuf(struct wlr_buffer *wlr_buffer,
struct wlr_dmabuf_attributes *attribs) {
struct wlr_dmabuf_v1_buffer *buffer =
dmabuf_v1_buffer_from_buffer(wlr_buffer);
wlr_dmabuf_v1_buffer_try_from_buffer(wlr_buffer);
*attribs = buffer->attributes;
return true;
}
@ -366,6 +370,7 @@ static void params_create_common(struct wl_resource *params_resource,
&wl_buffer_impl, buffer, buffer_handle_resource_destroy);
buffer->attributes = attribs;
buffer->linux_dmabuf_v1 = linux_dmabuf;
buffer->release.notify = buffer_handle_release;
wl_signal_add(&buffer->base.events.release, &buffer->release);
@ -870,6 +875,8 @@ static void linux_dmabuf_v1_destroy(struct wlr_linux_dmabuf_v1 *linux_dmabuf) {
}
wl_list_remove(&linux_dmabuf->display_destroy.link);
wl_list_remove(&linux_dmabuf->main_renderer_destroy.link);
wl_list_remove(&linux_dmabuf->main_allocator_destroy.link);
wl_global_destroy(linux_dmabuf->global);
free(linux_dmabuf);
@ -957,6 +964,8 @@ struct wlr_linux_dmabuf_v1 *wlr_linux_dmabuf_v1_create(struct wl_display *displa
linux_dmabuf->main_device_fd = -1;
wl_list_init(&linux_dmabuf->surfaces);
wl_list_init(&linux_dmabuf->main_renderer_destroy.link);
wl_list_init(&linux_dmabuf->main_allocator_destroy.link);
wl_signal_init(&linux_dmabuf->events.destroy);
linux_dmabuf->global = wl_global_create(display, &zwp_linux_dmabuf_v1_interface,
@ -1068,15 +1077,6 @@ static bool devid_from_fd(int fd, dev_t *devid) {
return true;
}
static bool is_secondary_drm_backend(struct wlr_backend *backend) {
#if WLR_HAS_DRM_BACKEND
return wlr_backend_is_drm(backend) &&
wlr_drm_backend_get_parent(backend) != NULL;
#else
return false;
#endif
}
bool wlr_linux_dmabuf_feedback_v1_init_with_options(struct wlr_linux_dmabuf_feedback_v1 *feedback,
const struct wlr_linux_dmabuf_feedback_v1_init_options *options) {
assert(options->main_renderer != NULL);
@ -1119,8 +1119,7 @@ bool wlr_linux_dmabuf_feedback_v1_init_with_options(struct wlr_linux_dmabuf_feed
wlr_log(WLR_ERROR, "Failed to intersect renderer and scanout formats");
goto error;
}
} else if (options->scanout_primary_output != NULL &&
!is_secondary_drm_backend(options->scanout_primary_output->backend)) {
} else if (options->scanout_primary_output != NULL) {
int backend_drm_fd = wlr_backend_get_drm_fd(options->scanout_primary_output->backend);
if (backend_drm_fd < 0) {
wlr_log(WLR_ERROR, "Failed to get backend DRM FD");
@ -1169,3 +1168,42 @@ error:
wlr_linux_dmabuf_feedback_v1_finish(feedback);
return false;
}
static void linux_dmabuf_unregister_main_blit_device(struct wlr_linux_dmabuf_v1 *linux_dmabuf) {
wl_list_remove(&linux_dmabuf->main_renderer_destroy.link);
wl_list_remove(&linux_dmabuf->main_allocator_destroy.link);
wl_list_init(&linux_dmabuf->main_renderer_destroy.link);
wl_list_init(&linux_dmabuf->main_allocator_destroy.link);
linux_dmabuf->main_renderer = NULL;
linux_dmabuf->main_allocator = NULL;
}
static void linux_dmabuf_handle_main_renderer_destroy(struct wl_listener *listener, void *data) {
struct wlr_linux_dmabuf_v1 *linux_dmabuf = wl_container_of(
listener, linux_dmabuf, main_renderer_destroy);
linux_dmabuf_unregister_main_blit_device(linux_dmabuf);
}
static void linux_dmabuf_handle_main_allocator_destroy(struct wl_listener *listener, void *data) {
struct wlr_linux_dmabuf_v1 *linux_dmabuf = wl_container_of(
listener, linux_dmabuf, main_allocator_destroy);
linux_dmabuf_unregister_main_blit_device(linux_dmabuf);
}
void wlr_linux_dmabuf_v1_set_main_blit_device(struct wlr_linux_dmabuf_v1 *linux_dmabuf,
struct wlr_renderer *renderer, struct wlr_allocator *allocator) {
assert(renderer != NULL && allocator != NULL);
wl_list_remove(&linux_dmabuf->main_renderer_destroy.link);
wl_list_remove(&linux_dmabuf->main_allocator_destroy.link);
linux_dmabuf->main_renderer_destroy.notify = linux_dmabuf_handle_main_renderer_destroy;
wl_signal_add(&renderer->events.destroy, &linux_dmabuf->main_renderer_destroy);
linux_dmabuf->main_allocator_destroy.notify = linux_dmabuf_handle_main_allocator_destroy;
wl_signal_add(&allocator->events.destroy, &linux_dmabuf->main_allocator_destroy);
linux_dmabuf->main_renderer = renderer;
linux_dmabuf->main_allocator = allocator;
}

342
types/wlr_output_manager.c Normal file
View file

@ -0,0 +1,342 @@
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <wlr/backend.h>
#include <wlr/backend/multi.h>
#include <wlr/render/allocator.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/types/wlr_drm.h>
#include <wlr/types/wlr_shm.h>
#include <wlr/types/wlr_output.h>
#include <wlr/types/wlr_output_manager.h>
#include <wlr/types/wlr_linux_dmabuf_v1.h>
#include <wlr/util/log.h>
static void output_manager_backend_finish(
struct wlr_output_manager_backend *backend) {
wlr_allocator_destroy(backend->allocator);
wlr_renderer_destroy(backend->renderer);
wl_list_remove(&backend->backend_destroy.link);
wl_list_remove(&backend->renderer_lost.link);
wl_list_remove(&backend->link);
}
static void output_manager_handle_backend_destroy(
struct wl_listener *listener, void *data) {
struct wlr_output_manager_backend *backend =
wl_container_of(listener, backend, backend_destroy);
output_manager_backend_finish(backend);
if (backend == &backend->manager->primary) {
*backend = (struct wlr_output_manager_backend){0};
} else {
free(backend);
}
}
static void output_manager_handle_renderer_lost(
struct wl_listener *listener, void *data) {
struct wlr_output_manager_backend *backend =
wl_container_of(listener, backend, renderer_lost);
wlr_log(WLR_INFO, "Attempting renderer recovery after GPU reset!");
struct wlr_renderer *renderer = wlr_renderer_autocreate(backend->backend);
if (!renderer) {
wlr_log(WLR_ERROR, "Could not create a new renderer after GPU reset");
return;
}
struct wlr_allocator *allocator =
wlr_allocator_autocreate(backend->backend, renderer);
if (!allocator) {
wlr_log(WLR_ERROR, "Could not create a new allocator after GPU reset");
wlr_renderer_destroy(renderer);
return;
}
wlr_log(WLR_INFO, "Created new renderer and allocator after reset. Attempting to swap...");
struct wlr_renderer *old_renderer = backend->renderer;
struct wlr_allocator *old_allocator = backend->allocator;
backend->renderer = renderer;
backend->allocator = allocator;
wl_signal_add(&backend->renderer->events.lost, &backend->renderer_lost);
// Only destroy the old state once we signal a recovery to avoid the old
// state being referenced during its destruction.
wlr_allocator_destroy(old_allocator);
wlr_renderer_destroy(old_renderer);
wl_signal_emit_mutable(&backend->events.recovery, NULL);
}
static bool output_manager_backend_init(struct wlr_output_manager *manager,
struct wlr_output_manager_backend *backend, struct wlr_backend *wlr_backend) {
backend->renderer = wlr_renderer_autocreate(wlr_backend);
if (!backend->renderer) {
return false;
}
backend->allocator = wlr_allocator_autocreate(wlr_backend,
manager->primary.renderer);
if (!backend->allocator) {
wlr_renderer_destroy(manager->primary.renderer);
return false;
}
backend->manager = manager;
backend->backend = wlr_backend;
backend->locks = 1;
wl_signal_init(&backend->events.recovery);
backend->backend_destroy.notify = output_manager_handle_backend_destroy;
wl_signal_add(&wlr_backend->events.destroy, &backend->backend_destroy);
backend->renderer_lost.notify = output_manager_handle_renderer_lost;
wl_signal_add(&backend->renderer->events.lost, &backend->renderer_lost);
wl_list_insert(&manager->backends, &backend->link);
return true;
}
struct multi_backend_iterator_data {
struct wlr_output_manager *manager;
bool primary;
};
static void multi_backend_iterator(struct wlr_backend *wlr_backend, void *_data) {
struct multi_backend_iterator_data *data = _data;
// Use the first device as the primary
if (data->primary) {
if (!output_manager_backend_init(data->manager, &data->manager->primary, wlr_backend)) {
return;
}
data->primary = false;
return;
}
struct wlr_output_manager_backend *backend = calloc(1, sizeof(*backend));
if (!backend) {
return;
}
if (!output_manager_backend_init(data->manager, backend, wlr_backend)) {
free(backend);
return;
}
}
bool wlr_output_manager_init(struct wlr_output_manager *manager,
struct wlr_backend *backend) {
*manager = (struct wlr_output_manager){0};
wl_list_init(&manager->backends);
struct multi_backend_iterator_data iter_data = {
.manager = manager,
.primary = true,
};
if (wlr_backend_is_multi(backend)) {
wlr_multi_for_each_backend(backend, multi_backend_iterator, &iter_data);
} else {
multi_backend_iterator(backend, &iter_data);
}
return !wl_list_empty(&manager->backends);
}
void wlr_output_manager_finish(struct wlr_output_manager *manager) {
struct wlr_output_manager_backend *backend;
wl_list_for_each(backend, &manager->backends, link) {
output_manager_backend_finish(backend);
}
}
struct wlr_output_manager_backend *wlr_output_manager_lock_backend(
struct wlr_output_manager *manager, struct wlr_backend *wlr_backend) {
assert(!wlr_backend_is_multi(wlr_backend));
struct wlr_output_manager_backend *backend;
wl_list_for_each(backend, &manager->backends, link) {
if (backend->backend == wlr_backend) {
backend->locks++;
return backend;
}
}
backend = calloc(1, sizeof(*backend));
if (!backend) {
return NULL;
}
if (!output_manager_backend_init(manager, backend, wlr_backend)) {
free(backend);
return NULL;
}
return backend;
}
void wlr_output_manager_unlock_backend(struct wlr_output_manager_backend *backend) {
assert(backend->locks > 0);
backend->locks--;
if (backend->locks != 0) {
return;
}
output_manager_backend_finish(backend);
free(backend);
}
struct output_manager_output {
struct wlr_output_manager_backend *backend;
struct wlr_output *output;
struct wlr_addon addon;
// recover from GPU resets
struct wl_listener backend_recovery;
};
static void manager_output_handle_output_destroy(struct wlr_addon *addon) {
struct output_manager_output *manager_output =
wl_container_of(addon, manager_output, addon);
wlr_addon_finish(&manager_output->addon);
wlr_output_manager_unlock_backend(manager_output->backend);
wl_list_remove(&manager_output->backend_recovery.link);
free(manager_output);
}
static const struct wlr_addon_interface output_addon_impl = {
.name = "wlr_output_manager_output",
.destroy = manager_output_handle_output_destroy,
};
static void output_handle_recovery(struct wl_listener *listener, void *data) {
struct output_manager_output *manager = wl_container_of(listener, manager, backend_recovery);
// we lost the context, create a new renderer and switch everything out.
wlr_output_init_render(manager->output, manager->backend->allocator,
manager->backend->renderer);
}
bool wlr_output_manager_init_output(struct wlr_output_manager *manager,
struct wlr_output *output) {
struct output_manager_output *manager_output = calloc(1, sizeof(*manager_output));
if (!manager_output) {
return false;
}
manager_output->output = output;
manager_output->backend = wlr_output_manager_lock_backend(
manager, output->backend);
if (!manager_output->backend) {
free(manager_output);
return false;
}
wlr_addon_init(&manager_output->addon, &output->addons, manager, &output_addon_impl);
manager_output->backend_recovery.notify = output_handle_recovery;
wl_signal_add(&manager_output->backend->events.recovery, &manager_output->backend_recovery);
wlr_output_init_render(output, manager_output->backend->allocator,
manager_output->backend->renderer);
return true;
}
bool wlr_output_manager_init_wl_shm(struct wlr_output_manager *manager,
struct wl_display *wl_display) {
size_t shm_formats_len = 0;
uint32_t *shm_formats = NULL;
struct wlr_output_manager_backend *backend;
wl_list_for_each(backend, &manager->backends, link) {
const struct wlr_drm_format_set *format_set = wlr_renderer_get_texture_formats(
backend->renderer, WLR_BUFFER_CAP_DATA_PTR);
if (format_set == NULL || format_set->len == 0) {
wlr_log(WLR_ERROR, "Failed to initialize wl_shm: "
"cannot get renderer formats");
return NULL;
}
if (!shm_formats) {
shm_formats = malloc(format_set->len * sizeof(uint32_t));
if (!shm_formats) {
wlr_log(WLR_INFO, "Cannot allocate a format set");
return false;
}
for (size_t i = 0; i < format_set->len; i++) {
shm_formats[i] = format_set->formats[i].format;
}
shm_formats_len = format_set->len;
continue;
}
// interset the format lists - null out any formats from the shm_formats
// list when the current renderer doesn't have the format as well.
for (size_t i = 0; i < shm_formats_len; i++) {
if (shm_formats[i] == 0) {
continue;
}
bool found = false;
for (size_t j = 0; j < format_set->len; j++) {
if (format_set->formats[j].format == shm_formats[i]) {
found = true;
break;
}
}
if (!found) {
shm_formats[i] = 0;
}
}
}
// clear out all null formats from the format list
size_t j = 0;
for (size_t i = 0; i < shm_formats_len; i++) {
if (shm_formats[i] != 0) {
shm_formats[j++] = shm_formats[i];
}
}
shm_formats_len = j;
bool ok = wlr_shm_create(wl_display, 1, shm_formats, shm_formats_len);
free(shm_formats);
return ok;
}
bool wlr_output_manager_init_wl_display(struct wlr_output_manager *manager,
struct wl_display *wl_display) {
if (!wlr_output_manager_init_wl_shm(manager, wl_display)) {
return false;
}
struct wlr_renderer *r = manager->primary.renderer;
if (wlr_renderer_get_texture_formats(r, WLR_BUFFER_CAP_DMABUF) != NULL) {
if (wlr_renderer_get_drm_fd(r) >= 0) {
if (wlr_drm_create(wl_display, r) == NULL) {
return false;
}
} else {
wlr_log(WLR_INFO, "Cannot get renderer DRM FD, disabling wl_drm");
}
if (wlr_linux_dmabuf_v1_create_with_renderer(wl_display, 4, r) == NULL) {
return false;
}
}
return true;
}

724
types/wlr_raster.c Normal file
View file

@ -0,0 +1,724 @@
#include <assert.h>
#include <drm_fourcc.h>
#include <pixman.h>
#include <wlr/render/allocator.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/render/wlr_texture.h>
#include <wlr/types/wlr_buffer.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_linux_dmabuf_v1.h>
#include <wlr/types/wlr_linux_drm_syncobj_v1.h>
#include <wlr/types/wlr_raster.h>
#include <wlr/render/drm_syncobj.h>
#include <wlr/types/wlr_surface_invalidation_v1.h>
#include <wlr/render/wlr_texture.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/util/addon.h>
#include <wlr/util/log.h>
#include "render/drm_format_set.h"
#include "render/wlr_renderer.h"
#include "types/wlr_buffer.h"
static void raster_handle_buffer_release(struct wl_listener *listener, void *data) {
struct wlr_raster *raster = wl_container_of(listener, raster, buffer_release);
raster->buffer = NULL;
wl_list_remove(&raster->buffer_release.link);
wl_list_init(&raster->buffer_release.link);
if (wl_list_empty(&raster->sources)) {
wl_signal_emit_mutable(&raster->events.invalidated, NULL);
}
}
struct wlr_raster *wlr_raster_create(struct wlr_buffer *buffer,
const struct wlr_raster_create_options *options) {
struct wlr_raster *raster = calloc(1, sizeof(*raster));
if (!raster) {
return NULL;
}
wl_list_init(&raster->sources);
wl_signal_init(&raster->events.destroy);
wl_signal_init(&raster->events.invalidated);
assert(buffer);
raster->opaque = buffer_is_opaque(buffer);
raster->width = buffer->width;
raster->height = buffer->height;
raster->buffer = buffer;
raster->n_locks = 1;
raster->buffer_release.notify = raster_handle_buffer_release;
wl_signal_add(&raster->buffer->events.release, &raster->buffer_release);
if (options && options->wait_timeline) {
raster->wait_timeline = wlr_drm_syncobj_timeline_ref(options->wait_timeline);
raster->wait_point = options->wait_point;
}
return raster;
}
static void raster_source_destroy(struct wlr_raster_source *source) {
wl_list_remove(&source->link);
wl_list_remove(&source->renderer_destroy.link);
wl_list_remove(&source->allocator_destroy.link);
if (!source->raster->buffer && wl_list_empty(&source->raster->sources)) {
wl_signal_emit_mutable(&source->raster->events.invalidated, NULL);
}
free(source);
}
static void raster_consider_destroy(struct wlr_raster *raster) {
if (raster->n_locks > 0) {
return;
}
wl_signal_emit_mutable(&raster->events.destroy, NULL);
// we don't want to call invalidation signals as we're destroying the raster
wl_signal_init(&raster->events.invalidated);
struct wlr_raster_source *source, *source_tmp;
wl_list_for_each_safe(source, source_tmp, &raster->sources, link) {
wlr_texture_destroy(source->texture);
raster_source_destroy(source);
}
wl_list_remove(&raster->buffer_release.link);
wlr_drm_syncobj_timeline_unref(raster->wait_timeline);
free(raster);
}
struct wlr_raster *wlr_raster_lock(struct wlr_raster *raster) {
raster->n_locks++;
return raster;
}
void wlr_raster_unlock(struct wlr_raster *raster) {
if (!raster) {
return;
}
assert(raster->n_locks > 0);
raster->n_locks--;
raster_consider_destroy(raster);
}
static void raster_detach(struct wlr_raster *raster, struct wlr_texture *texture) {
if (!texture) {
return;
}
struct wlr_raster_source *source;
wl_list_for_each(source, &raster->sources, link) {
if (source->texture == texture) {
raster_source_destroy(source);
return;
}
}
assert(false);
}
static void handle_renderer_destroy(struct wl_listener *listener, void *data) {
struct wlr_raster_source *source = wl_container_of(listener, source, renderer_destroy);
raster_source_destroy(source);
}
static void handle_allocator_destroy(struct wl_listener *listener, void *data) {
struct wlr_raster_source *source = wl_container_of(listener, source, allocator_destroy);
source->allocator = NULL;
wl_list_remove(&source->allocator_destroy.link);
wl_list_init(&source->allocator_destroy.link);
}
static void raster_attach_with_allocator(struct wlr_raster *raster,
struct wlr_texture *texture, struct wlr_allocator *allocator) {
assert(texture->width == raster->width && texture->height == raster->height);
struct wlr_raster_source *source;
wl_list_for_each(source, &raster->sources, link) {
assert(source->texture != texture);
}
source = calloc(1, sizeof(*source));
if (!source) {
return;
}
source->renderer_destroy.notify = handle_renderer_destroy;
wl_signal_add(&texture->renderer->events.destroy, &source->renderer_destroy);
if (allocator) {
source->allocator_destroy.notify = handle_allocator_destroy;
wl_signal_add(&allocator->events.destroy, &source->allocator_destroy);
} else {
wl_list_init(&source->allocator_destroy.link);
}
wl_list_insert(&raster->sources, &source->link);
source->texture = texture;
source->allocator = allocator;
source->raster = raster;
}
static struct wlr_texture *wlr_raster_get_texture(struct wlr_raster *raster,
struct wlr_renderer *renderer) {
struct wlr_raster_source *source;
wl_list_for_each(source, &raster->sources, link) {
if (source->texture->renderer == renderer) {
return source->texture;
}
}
return NULL;
}
static bool compute_import_buffer_format(struct wlr_raster *raster, struct wlr_drm_format *drm_fmt,
struct wlr_renderer *dst) {
const struct wlr_drm_format_set *texture_formats =
wlr_renderer_get_texture_formats(dst, WLR_BUFFER_CAP_DMABUF);
if (!texture_formats) {
wlr_log(WLR_ERROR, "Failed to get texture_formats");
return NULL;
}
// For now, let's only use XRGB
uint32_t fmt = raster->opaque ? DRM_FORMAT_XRGB8888 : DRM_FORMAT_ARGB8888;
const struct wlr_drm_format *drm_fmt_inv =
wlr_drm_format_set_get(texture_formats, fmt);
if (!wlr_drm_format_copy(drm_fmt, drm_fmt_inv)) {
return false;
}
for (size_t i = 0; i < drm_fmt->len; i++) {
uint64_t mod = drm_fmt->modifiers[i];
if (mod != DRM_FORMAT_MOD_INVALID) {
continue;
}
for (size_t j = i + 1; j < drm_fmt->len; j++) {
drm_fmt->modifiers[j] = drm_fmt->modifiers[j + 1];
}
drm_fmt->len--;
break;
}
return true;
}
static struct wlr_buffer *raster_try_blit(struct wlr_raster *raster,
struct wlr_raster_source *source, struct wlr_renderer *dst) {
if (!source->allocator) {
return NULL;
}
wlr_log(WLR_DEBUG, "Attempting a multigpu blit through a GPU");
struct wlr_renderer *src = source->texture->renderer;
// The src needs to be able to render into this format
const struct wlr_drm_format_set *render_formats =
wlr_renderer_get_render_formats(src);
if (!render_formats) {
wlr_log(WLR_ERROR, "Failed to get render_formats");
return NULL;
}
struct wlr_drm_format fmt = {0};
if (!compute_import_buffer_format(raster, &fmt, dst)) {
wlr_log(WLR_ERROR, "Could not find a common format modifiers for all GPUs");
return NULL;
}
if (wlr_drm_format_intersect(&fmt, &fmt,
wlr_drm_format_set_get(render_formats, fmt.format))) {
wlr_drm_format_finish(&fmt);
return NULL;
}
struct wlr_buffer *buffer = wlr_allocator_create_buffer(
source->allocator, raster->width, raster->height, &fmt);
wlr_drm_format_finish(&fmt);
if (!buffer) {
wlr_log(WLR_ERROR, "Failed to allocate multirenderer blit buffer");
return NULL;
}
struct wlr_drm_syncobj_timeline *timeline;
int drm_fd = wlr_renderer_get_drm_fd(src);
if (src->features.timeline && drm_fd >= 0) {
timeline = wlr_drm_syncobj_timeline_create(drm_fd);
}
const struct wlr_buffer_pass_options pass_options = {
.signal_timeline = timeline,
.signal_point = 1,
};
struct wlr_render_pass *pass = wlr_renderer_begin_buffer_pass(src, buffer, &pass_options);
if (!pass) {
wlr_log(WLR_ERROR, "Failed to create a render pass");
wlr_buffer_drop(buffer);
return NULL;
}
wlr_render_pass_add_texture(pass, &(struct wlr_render_texture_options) {
.texture = source->texture,
.blend_mode = WLR_RENDER_BLEND_MODE_NONE,
.wait_timeline = timeline,
.wait_point = 1,
});
wlr_drm_syncobj_timeline_unref(timeline);
if (!wlr_render_pass_submit(pass)) {
wlr_log(WLR_ERROR, "Failed to renedr to a multigpu blit buffer");
wlr_buffer_drop(buffer);
return NULL;
}
return buffer;
}
static struct wlr_texture *raster_try_texture_from_blit(struct wlr_raster *raster,
struct wlr_renderer *renderer) {
struct wlr_buffer *imported = NULL;
struct wlr_raster_source *source;
wl_list_for_each(source, &raster->sources, link) {
imported = raster_try_blit(raster, source, renderer);
if (imported) {
break;
}
}
if (!imported) {
return NULL;
}
wlr_buffer_drop(imported);
return wlr_texture_from_buffer(renderer, imported);
}
static struct wlr_texture *raster_try_cpu_copy(struct wlr_raster *raster,
struct wlr_renderer *dst) {
if (wl_list_empty(&raster->sources)) {
return NULL;
}
wlr_log(WLR_DEBUG, "Performing multigpu blit through the CPU");
struct wlr_texture *texture = NULL;
uint32_t format = DRM_FORMAT_ARGB8888;
uint32_t stride = raster->width * 4;
void *data = malloc(stride * raster->height);
if (!data) {
return NULL;
}
struct wlr_raster_source *source;
wl_list_for_each(source, &raster->sources, link) {
if (!wlr_texture_read_pixels(source->texture, &(struct wlr_texture_read_pixels_options){
.format = format,
.stride = stride,
.data = data,
})) {
wlr_log(WLR_ERROR, "Failed to read pixels");
continue;
}
texture = wlr_texture_from_pixels(dst, format,
stride, raster->width, raster->height, data);
if (!texture) {
wlr_log(WLR_ERROR, "Failed to upload texture from cpu data");
continue;
}
break;
}
free(data);
return texture;
}
struct wlr_texture *wlr_raster_obtain_texture_with_allocator(struct wlr_raster *raster,
struct wlr_renderer *renderer, struct wlr_allocator *allocator) {
struct wlr_texture *texture = wlr_raster_get_texture(raster, renderer);
if (texture) {
return texture;
}
if (raster->buffer) {
struct wlr_client_buffer *client_buffer =
wlr_client_buffer_get(raster->buffer);
if (client_buffer != NULL) {
return client_buffer->texture;
}
// if we have a buffer, try and import that
texture = wlr_texture_from_buffer(renderer, raster->buffer);
if (texture) {
raster_attach_with_allocator(raster, texture, allocator);
return texture;
}
}
// try to blit using the textures already available to us
texture = raster_try_texture_from_blit(raster, renderer);
if (texture) {
raster_attach_with_allocator(raster, texture, allocator);
return texture;
}
// if this is a linux_dmabuf_v1 buffer, then we can try to use the
// main device for blitting which should support all the modifiers we
// advertise.
if (raster->buffer) {
struct wlr_dmabuf_v1_buffer *dmabuf_buffer =
wlr_dmabuf_v1_buffer_try_from_buffer(raster->buffer);
if (dmabuf_buffer && dmabuf_buffer->linux_dmabuf_v1->main_renderer) {
struct wlr_linux_dmabuf_v1 *linux_dmabuf = dmabuf_buffer->linux_dmabuf_v1;
struct wlr_texture *texture = wlr_texture_from_buffer(
linux_dmabuf->main_renderer, raster->buffer);
if (texture) {
raster_attach_with_allocator(raster, texture,
linux_dmabuf->main_allocator);
// try to create a blit but this time through the primary device
texture = raster_try_texture_from_blit(raster, renderer);
if (texture) {
raster_attach_with_allocator(raster, texture, allocator);
return texture;
}
}
}
}
// as a last resort we need to do a copy through the CPU
texture = raster_try_cpu_copy(raster, renderer);
if (texture) {
raster_attach_with_allocator(raster, texture, allocator);
return texture;
}
return NULL;
}
struct wlr_texture *wlr_raster_obtain_texture(struct wlr_raster *raster,
struct wlr_renderer *renderer) {
return wlr_raster_obtain_texture_with_allocator(raster, renderer, NULL);
}
struct raster_update_state {
struct wlr_buffer *buffer;
pixman_region32_t damage;
struct wlr_raster *new_raster;
struct wlr_raster *old_raster;
struct wl_listener old_raster_destroy;
struct wl_listener new_raster_destroy;
struct wl_listener buffer_release;
};
static void destroy_raster_update_state(struct raster_update_state *state) {
wl_list_remove(&state->old_raster_destroy.link);
wl_list_remove(&state->new_raster_destroy.link);
wl_list_remove(&state->buffer_release.link);
pixman_region32_fini(&state->damage);
free(state);
}
static void raster_update_handle_new_raster_destroy(struct wl_listener *listener, void *data) {
struct raster_update_state *state = wl_container_of(listener, state, new_raster_destroy);
destroy_raster_update_state(state);
}
static void raster_update_handle_old_raster_destroy(struct wl_listener *listener, void *data) {
struct raster_update_state *state = wl_container_of(listener, state, old_raster_destroy);
// if the new raster already has a texture, there's nothing we can do to help.
if (!wl_list_empty(&state->new_raster->sources)) {
destroy_raster_update_state(state);
return;
}
struct wlr_raster_source *source, *tmp_source;
wl_list_for_each_safe(source, tmp_source, &state->old_raster->sources, link) {
struct wlr_texture *texture = source->texture;
struct wlr_allocator *allocator = source->allocator;
if (wlr_texture_update_from_buffer(texture, state->buffer, &state->damage)) {
raster_detach(state->old_raster, texture);
raster_attach_with_allocator(state->new_raster, texture, allocator);
}
}
destroy_raster_update_state(state);
}
static void raster_update_handle_buffer_release(struct wl_listener *listener, void *data) {
struct raster_update_state *state = wl_container_of(listener, state, buffer_release);
destroy_raster_update_state(state);
}
static struct wlr_raster *raster_update(struct wlr_raster *raster,
struct wlr_buffer *buffer, const pixman_region32_t *damage,
const struct wlr_raster_create_options *options) {
struct raster_update_state *state = calloc(1, sizeof(*state));
if (!state) {
return NULL;
}
struct wlr_raster *new_raster = wlr_raster_create(buffer, options);
if (!new_raster) {
free(state);
return NULL;
}
state->old_raster_destroy.notify = raster_update_handle_old_raster_destroy;
wl_signal_add(&raster->events.destroy, &state->old_raster_destroy);
state->new_raster_destroy.notify = raster_update_handle_new_raster_destroy;
wl_signal_add(&new_raster->events.destroy, &state->new_raster_destroy);
state->buffer_release.notify = raster_update_handle_buffer_release;
wl_signal_add(&buffer->events.release, &state->buffer_release);
state->new_raster = new_raster;
state->old_raster = raster;
state->buffer = buffer;
pixman_region32_init(&state->damage);
pixman_region32_copy(&state->damage, damage);
return new_raster;
}
struct surface_raster {
struct wlr_raster *raster;
struct wlr_surface *surface;
struct wlr_addon addon;
struct wl_listener buffer_prerelease;
struct wl_listener raster_invalidated;
bool locking_buffer;
};
static void surface_raster_drop_raster(struct surface_raster *surface_raster) {
if (surface_raster->locking_buffer) {
wlr_buffer_unlock(surface_raster->raster->buffer);
surface_raster->locking_buffer = false;
}
wlr_raster_unlock(surface_raster->raster);
surface_raster->raster = NULL;
}
static void surface_raster_destroy(struct surface_raster *surface_raster) {
surface_raster_drop_raster(surface_raster);
wl_list_remove(&surface_raster->buffer_prerelease.link);
wl_list_remove(&surface_raster->raster_invalidated.link);
wlr_addon_finish(&surface_raster->addon);
free(surface_raster);
}
static void surface_raster_handle_addon_destroy(struct wlr_addon *addon) {
struct surface_raster *surface_raster = wl_container_of(addon, surface_raster, addon);
surface_raster_destroy(surface_raster);
}
static void surface_raster_handle_buffer_prerelease(struct wl_listener *listener, void *data) {
struct surface_raster *surface_raster =
wl_container_of(listener, surface_raster, buffer_prerelease);
struct wlr_raster *raster = surface_raster->raster;
struct wlr_surface_output *output;
wl_list_for_each(output, &surface_raster->surface->current_outputs, link) {
wlr_raster_obtain_texture_with_allocator(raster,
output->output->renderer, output->output->allocator);
}
// if there was a failed texture upload, keep on locking the buffer
if (wl_list_empty(&raster->sources)) {
wlr_buffer_lock(raster->buffer);
surface_raster->locking_buffer = true;
}
wl_list_remove(&surface_raster->buffer_prerelease.link);
wl_list_init(&surface_raster->buffer_prerelease.link);
}
static void surface_raster_handle_raster_invalidated(struct wl_listener *listener, void *data) {
struct surface_raster *surface_raster =
wl_container_of(listener, surface_raster, raster_invalidated);
wlr_surface_invalidation_manager_v1_send_surface_invalidation(
surface_raster->surface);
}
const struct wlr_addon_interface surface_raster_addon_impl = {
.name = "wlr_raster_surface",
.destroy = surface_raster_handle_addon_destroy,
};
static struct surface_raster *get_surface_raster(struct wlr_surface *surface) {
struct wlr_addon *addon = wlr_addon_find(&surface->addons, NULL,
&surface_raster_addon_impl);
if (!addon) {
return NULL;
}
struct surface_raster *surface_raster = wl_container_of(addon, surface_raster, addon);
return surface_raster;
}
// Because wlr_raster doesn't lock the buffer itself, we need something extra
// to keep client buffer locked when operating in legacy mode.
struct client_buffer_compat {
struct wlr_client_buffer *buffer;
struct wl_listener destroy;
};
static void client_buffer_compat_raster_destroy(struct wl_listener *listener, void *data) {
struct client_buffer_compat *compat = wl_container_of(listener, compat, destroy);
wlr_buffer_unlock(&compat->buffer->base);
wl_list_remove(&compat->destroy.link);
free(compat);
}
struct wlr_raster *wlr_raster_from_surface(struct wlr_surface *surface) {
struct wlr_linux_drm_syncobj_surface_v1_state *syncobj_surface_state =
wlr_linux_drm_syncobj_v1_get_surface_state(surface);
struct wlr_raster_create_options options = {0};
if (syncobj_surface_state) {
options.wait_timeline = syncobj_surface_state->acquire_timeline;
options.wait_point = syncobj_surface_state->acquire_point;
}
if (surface->compositor->renderer) {
// use legacy wlr_client_buffer
if (!surface->buffer) {
return NULL;
}
struct client_buffer_compat *compat = calloc(1, sizeof(*compat));
if (!compat) {
return NULL;
}
struct wlr_raster *raster = wlr_raster_create(&surface->buffer->base, &options);
if (!raster) {
free(compat);
return NULL;
}
compat->destroy.notify = client_buffer_compat_raster_destroy;
wl_signal_add(&raster->events.destroy, &compat->destroy);
compat->buffer = surface->buffer;
wlr_buffer_lock(&surface->buffer->base);
return raster;
}
struct surface_raster *surface_raster = get_surface_raster(surface);
if (!surface_raster) {
surface_raster = calloc(1, sizeof(*surface_raster));
if (!surface_raster) {
return NULL;
}
surface_raster->surface = surface;
wlr_addon_init(&surface_raster->addon, &surface->addons, NULL,
&surface_raster_addon_impl);
surface_raster->buffer_prerelease.notify = surface_raster_handle_buffer_prerelease;
wl_list_init(&surface_raster->buffer_prerelease.link);
surface_raster->raster_invalidated.notify = surface_raster_handle_raster_invalidated;
wl_list_init(&surface_raster->raster_invalidated.link);
}
if (!surface->current.buffer) {
// surface is mapped but it hasn't committed a new buffer. We need to keep
// using the old one
if (wlr_surface_has_buffer(surface)) {
if (surface_raster->raster) {
return wlr_raster_lock(surface_raster->raster);
} else {
return NULL;
}
}
wl_list_remove(&surface_raster->buffer_prerelease.link);
wl_list_init(&surface_raster->buffer_prerelease.link);
wl_list_remove(&surface_raster->raster_invalidated.link);
wl_list_init(&surface_raster->raster_invalidated.link);
surface_raster_drop_raster(surface_raster);
return NULL;
}
struct wlr_raster *raster;
if (surface_raster->raster) {
// make sure we haven't already seen this buffer
if (surface_raster->raster->buffer == surface->current.buffer) {
return wlr_raster_lock(surface_raster->raster);
}
// before we try to update the old raster, remove obsolete textures
struct wlr_raster_source *source, *tmp_source;
wl_list_for_each_safe(source, tmp_source, &surface_raster->raster->sources, link) {
struct wlr_texture *texture = source->texture;
bool found = false;
struct wlr_surface_output *output;
wl_list_for_each(output, &surface->current_outputs, link) {
if (output->output->renderer == texture->renderer) {
found = true;
break;
}
}
if (!found) {
raster_detach(surface_raster->raster, texture);
wlr_texture_destroy(texture);
}
}
raster = raster_update(surface_raster->raster,
surface->current.buffer, &surface->buffer_damage, &options);
} else {
raster = wlr_raster_create(surface->current.buffer, &options);
}
if (!raster) {
return NULL;
}
wl_list_remove(&surface_raster->raster_invalidated.link);
wl_signal_add(&raster->events.invalidated, &surface_raster->raster_invalidated);
surface_raster_drop_raster(surface_raster);
surface_raster->raster = wlr_raster_lock(raster);
wl_list_remove(&surface_raster->buffer_prerelease.link);
wl_signal_add(&surface->current.buffer->events.prerelease, &surface_raster->buffer_prerelease);
wlr_surface_consume(surface);
return raster;
}

View file

@ -0,0 +1,204 @@
#include <assert.h>
#include <stdlib.h>
#include <wlr/types/wlr_surface_invalidation_v1.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/util/addon.h>
#include "surface-invalidation-v1-protocol.h"
#define SURFACE_INVALIDATION_MANAGER_VERSION 1
struct wlr_surface_invalidation_v1_configure {
struct wl_list link; // struct wlr_surface_invalidation_v1.configures
uint32_t serial;
bool configured;
};
struct wlr_surface_invalidation_v1 {
struct wl_resource *resource;
struct wl_list configures; // struct wlr_surface_invalidation_v1_configure.link
struct wlr_addon addon;
};
static void wlr_surface_invalidation_v1_configure_destroy(
struct wlr_surface_invalidation_v1_configure *configure) {
wl_list_remove(&configure->link);
free(configure);
}
static const struct wp_surface_invalidation_v1_interface surface_inval_impl;
static struct wlr_surface_invalidation_v1 *surface_invalidation_v1_from_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &wp_surface_invalidation_v1_interface,
&surface_inval_impl));
return wl_resource_get_user_data(resource);
}
static void surface_handle_resource_destroy(struct wl_resource *resource) {
struct wlr_surface_invalidation_v1 *surface =
surface_invalidation_v1_from_resource(resource);
surface->resource = NULL;
wlr_addon_finish(&surface->addon);
struct wlr_surface_invalidation_v1_configure *configure, *tmp_configure;
wl_list_for_each_safe(configure, tmp_configure, &surface->configures, link) {
wlr_surface_invalidation_v1_configure_destroy(configure);
}
free(surface);
}
static void surface_inval_handle_ack(struct wl_client *client,
struct wl_resource *resource, uint32_t serial) {
struct wlr_surface_invalidation_v1 *surface =
surface_invalidation_v1_from_resource(resource);
// First find the ack'ed configure
bool found = false;
struct wlr_surface_invalidation_v1_configure *configure, *tmp_configure;
wl_list_for_each(configure, &surface->configures, link) {
if (configure->serial == serial) {
found = true;
break;
}
}
if (!found) {
/*
TODO: What do we do here?
wl_resource_post_error(resource,
ZWLR_LAYER_SURFACE_V1_ERROR_INVALID_SURFACE_STATE,
"wrong configure serial: %" PRIu32, serial);
*/
return;
}
configure->configured = true;
// Then remove old configures from the list
wl_list_for_each_safe(configure, tmp_configure, &surface->configures, link) {
if (configure->serial == serial) {
break;
}
wlr_surface_invalidation_v1_configure_destroy(configure);
}
}
static void destroy_resource(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static const struct wp_surface_invalidation_v1_interface surface_inval_impl = {
.destroy = destroy_resource,
.ack = surface_inval_handle_ack,
};
static void surface_addon_handle_destroy(struct wlr_addon *addon) {
struct wlr_surface_invalidation_v1 *surface = wl_container_of(addon, surface, addon);
wl_resource_destroy(surface->resource);
}
static const struct wlr_addon_interface surface_addon_impl = {
.name = "surface_invalidation_v1",
.destroy = surface_addon_handle_destroy,
};
static const struct wp_surface_invalidation_manager_v1_interface manager_impl;
static void manager_handle_get_surface_invalidation(struct wl_client *client,
struct wl_resource *resource, uint32_t id, struct wl_resource *surface_resource) {
struct wlr_surface *wlr_surface = wlr_surface_from_resource(surface_resource);
struct wlr_surface_invalidation_v1 *surface = calloc(1, sizeof(*surface));
if (!surface) {
wl_client_post_no_memory(client);
return;
}
surface->resource = wl_resource_create(client,
&wp_surface_invalidation_v1_interface, 1, id);
if (!surface->resource) {
wl_client_post_no_memory(client);
free(surface);
return;
}
wl_list_init(&surface->configures);
wlr_addon_init(&surface->addon, &wlr_surface->addons, NULL, &surface_addon_impl);
wl_resource_set_implementation(surface->resource,
&surface_inval_impl, surface, surface_handle_resource_destroy);
}
static const struct wp_surface_invalidation_manager_v1_interface manager_impl = {
.destroy = destroy_resource,
.get_surface_invalidation = manager_handle_get_surface_invalidation,
};
static void manager_bind(struct wl_client *client, void *data,
uint32_t version, uint32_t id) {
struct wlr_surface_invalidation_manager_v1 *manager = data;
struct wl_resource *resource = wl_resource_create(client,
&wp_surface_invalidation_manager_v1_interface, version, id);
if (!resource) {
wl_client_post_no_memory(client);
return;
}
wl_resource_set_implementation(resource, &manager_impl, manager, NULL);
}
static void handle_display_destroy(struct wl_listener *listener, void *data) {
struct wlr_surface_invalidation_manager_v1 *manager =
wl_container_of(listener, manager, display_destroy);
wl_signal_emit_mutable(&manager->events.destroy, NULL);
wl_global_destroy(manager->global);
free(manager);
}
struct wlr_surface_invalidation_manager_v1 *wlr_surface_invalidation_manager_v1_create(
struct wl_display *display, uint32_t version) {
assert(version <= SURFACE_INVALIDATION_MANAGER_VERSION);
struct wlr_surface_invalidation_manager_v1 *manager = calloc(1, sizeof(*manager));
if (!manager) {
return NULL;
}
manager->global = wl_global_create(display, &wp_surface_invalidation_manager_v1_interface,
version, manager, manager_bind);
if (!manager->global) {
free(manager);
return NULL;
}
manager->display_destroy.notify = handle_display_destroy;
wl_display_add_destroy_listener(display, &manager->display_destroy);
wl_signal_init(&manager->events.destroy);
return manager;
}
void wlr_surface_invalidation_manager_v1_send_surface_invalidation(
struct wlr_surface *wlr_surface) {
struct wlr_addon *addon = wlr_addon_find(
&wlr_surface->addons, NULL, &surface_addon_impl);
if (!addon) {
return;
}
struct wlr_surface_invalidation_v1 *surface =
wl_container_of(addon, surface, addon);
struct wl_display *display =
wl_client_get_display(wl_resource_get_client(surface->resource));
struct wlr_surface_invalidation_v1_configure *configure = calloc(1, sizeof(*configure));
if (!configure) {
wl_client_post_no_memory(wl_resource_get_client(surface->resource));
return;
}
configure->serial = wl_display_next_serial(display);
wl_list_insert(&surface->configures, &configure->link);
wp_surface_invalidation_v1_send_invalidated(surface->resource, configure->serial);
}