From b84aadb5c13049135cdfb8a1c19c3436e70f54b9 Mon Sep 17 00:00:00 2001 From: Kenny Levinsen Date: Wed, 1 Apr 2026 14:32:08 +0000 Subject: [PATCH 1/2] Add wlr_compositor_dmabuf_waiter This helper monitors buffers for implicit and explicit work synchronization, delaying the surface commit until the fences are ready to be consumed for synchronization (i.e., fences have materialized), or for the work to be complete, depending on the specified waiter mode. Through this, the compositor can avoid stalling on incomplete work, staying fully interactive until the buffer is ready to be sampled. This is a generalization of Simon's patch that added a waiter for traditional implicit sync. Based-on-patch-by: Simon Ser --- include/wlr/render/drm_syncobj.h | 20 +- include/wlr/types/wlr_linux_dmabuf_v1.h | 48 ++++ render/drm_syncobj.c | 45 ++-- types/wlr_linux_dmabuf_v1.c | 336 ++++++++++++++++++++++++ 4 files changed, 427 insertions(+), 22 deletions(-) diff --git a/include/wlr/render/drm_syncobj.h b/include/wlr/render/drm_syncobj.h index c7dd3b34c..954e60fa0 100644 --- a/include/wlr/render/drm_syncobj.h +++ b/include/wlr/render/drm_syncobj.h @@ -97,11 +97,7 @@ bool wlr_drm_syncobj_timeline_signal(struct wlr_drm_syncobj_timeline *timeline, /** * Asynchronously wait for a timeline point. * - * Flags can be: - * - * - 0 to wait for the point to be signalled - * - DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to only wait for a fence to - * materialize + * See wlr_drm_syncobj_timeline_eventfd for available flags. * * A callback must be provided that will be invoked when the waiter has finished. */ @@ -112,6 +108,20 @@ bool wlr_drm_syncobj_timeline_waiter_init(struct wlr_drm_syncobj_timeline_waiter * Cancel a timeline waiter. */ void wlr_drm_syncobj_timeline_waiter_finish(struct wlr_drm_syncobj_timeline_waiter *waiter); +/** + * Create an eventfd that becomes readable when a timeline point is ready. + * + * Flags can be: + * + * - 0 to wait for the point to be signaled + * - DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to only wait for a fence to + * materialize + * + * The returned FD is owned by the caller and must be closed when no longer + * needed. + */ +int wlr_drm_syncobj_timeline_eventfd(struct wlr_drm_syncobj_timeline *timeline, + uint64_t point, uint32_t flags); /** * Export a timeline point as a sync_file FD. * diff --git a/include/wlr/types/wlr_linux_dmabuf_v1.h b/include/wlr/types/wlr_linux_dmabuf_v1.h index 2193f9141..3487ea43c 100644 --- a/include/wlr/types/wlr_linux_dmabuf_v1.h +++ b/include/wlr/types/wlr_linux_dmabuf_v1.h @@ -16,6 +16,7 @@ #include #include +struct wlr_compositor; struct wlr_surface; struct wlr_dmabuf_v1_buffer { @@ -134,4 +135,51 @@ struct wlr_linux_dmabuf_feedback_v1_init_options { bool wlr_linux_dmabuf_feedback_v1_init_with_options(struct wlr_linux_dmabuf_feedback_v1 *feedback, const struct wlr_linux_dmabuf_feedback_v1_init_options *options); +enum wlr_surface_dmabuf_waiter_mode { + WLR_SURFACE_DMABUF_WAITER_MODE_AVAILABLE, + WLR_SURFACE_DMABUF_WAITER_MODE_COMPLETE, +}; + +/** + * A helper to wait for client buffers to be ready. + * + * When attached to a surface, this helper will delay commits until the + * relevant GPU fences are materialized or work has completed, depending on + * mode. When set to WLR_SURFACE_DMABUF_WAITER_MODE_COMPLETE, this means that + * wlr_surface.events.commit will only fire when the GPU buffers attached to + * that commit are ready to be read. + */ +struct wlr_surface_dmabuf_waiter { + struct wlr_surface *surface; + enum wlr_surface_dmabuf_waiter_mode mode; + + struct { + struct wl_list commits; // wlr_surface_dmabuf_waiter_commit.link + struct wl_listener client_commit; + } WLR_PRIVATE; +}; + +/** + * Initialize a buffer waiter for a surface. + * + * Callers must call wlr_surface_dmabuf_waiter_finish() to unregister the waiter. + */ +void wlr_surface_dmabuf_waiter_init(struct wlr_surface_dmabuf_waiter *waiter, + struct wlr_surface *surface, enum wlr_surface_dmabuf_waiter_mode mode); + +/** + * Clean up a buffer waiter. + * + * Any pending commit waiting on GPU work to complete will be applied + * immediately. + */ +void wlr_surface_dmabuf_waiter_finish(struct wlr_surface_dmabuf_waiter *waiter); + +/** + * Initialize a compositor-wide buffer waiter, which will listen for new + * surfaces and attach buffer waiters to them. + */ +void wlr_compositor_dmabuf_waiter_create(struct wlr_compositor *compositor, + enum wlr_surface_dmabuf_waiter_mode mode); + #endif diff --git a/render/drm_syncobj.c b/render/drm_syncobj.c index 15f71c536..d7d8b9a86 100644 --- a/render/drm_syncobj.c +++ b/render/drm_syncobj.c @@ -185,6 +185,31 @@ bool wlr_drm_syncobj_timeline_signal(struct wlr_drm_syncobj_timeline *timeline, return true; } +int wlr_drm_syncobj_timeline_eventfd(struct wlr_drm_syncobj_timeline *timeline, + uint64_t point, uint32_t flags) { + int ev_fd; +#if HAVE_EVENTFD + ev_fd = eventfd(0, EFD_CLOEXEC); + if (ev_fd < 0) { + wlr_log_errno(WLR_ERROR, "eventfd() failed"); + } +#else + ev_fd = -1; + wlr_log(WLR_ERROR, "eventfd() is unavailable"); +#endif + if (ev_fd < 0) { + return -1; + } + + if (drmSyncobjEventfd(timeline->drm_fd, timeline->handle, point, ev_fd, flags) != 0) { + wlr_log_errno(WLR_ERROR, "drmSyncobjEventfd() failed"); + close(ev_fd); + return -1; + } + + return ev_fd; +} + static int handle_eventfd_ready(int ev_fd, uint32_t mask, void *data) { struct wlr_drm_syncobj_timeline_waiter *waiter = data; @@ -208,27 +233,13 @@ bool wlr_drm_syncobj_timeline_waiter_init(struct wlr_drm_syncobj_timeline_waiter struct wl_event_loop *loop, wlr_drm_syncobj_timeline_ready_callback callback) { assert(callback); - int ev_fd; -#if HAVE_EVENTFD - ev_fd = eventfd(0, EFD_CLOEXEC); - if (ev_fd < 0) { - wlr_log_errno(WLR_ERROR, "eventfd() failed"); - } -#else - ev_fd = -1; - wlr_log(WLR_ERROR, "eventfd() is unavailable"); -#endif + int ev_fd = wlr_drm_syncobj_timeline_eventfd(timeline, point, flags); if (ev_fd < 0) { return false; } - if (drmSyncobjEventfd(timeline->drm_fd, timeline->handle, point, ev_fd, flags) != 0) { - wlr_log_errno(WLR_ERROR, "drmSyncobjEventfd() failed"); - close(ev_fd); - return false; - } - - struct wl_event_source *source = wl_event_loop_add_fd(loop, ev_fd, WL_EVENT_READABLE, handle_eventfd_ready, waiter); + struct wl_event_source *source = wl_event_loop_add_fd(loop, ev_fd, + WL_EVENT_READABLE, handle_eventfd_ready, waiter); if (source == NULL) { wlr_log(WLR_ERROR, "Failed to add FD to event loop"); close(ev_fd); diff --git a/types/wlr_linux_dmabuf_v1.c b/types/wlr_linux_dmabuf_v1.c index 3165e8805..832de09d8 100644 --- a/types/wlr_linux_dmabuf_v1.c +++ b/types/wlr_linux_dmabuf_v1.c @@ -1,15 +1,18 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -1173,3 +1176,336 @@ error: wlr_linux_dmabuf_feedback_v1_finish(feedback); return false; } + +struct wlr_surface_dmabuf_waiter_commit { + struct wlr_surface_dmabuf_waiter *waiter; + uint32_t surface_lock_seq; + struct wl_list link; // wlr_surface_dmabuf_waiter.commits + + int fds[WLR_DMABUF_MAX_PLANES]; + struct wl_event_source *event_sources[WLR_DMABUF_MAX_PLANES]; + bool owned_fds; + + struct wl_listener surface_destroy; +}; + +static void dmabuf_waiter_commit_destroy( + struct wlr_surface_dmabuf_waiter_commit *commit) { + for (size_t i = 0; i < WLR_DMABUF_MAX_PLANES; i++) { + if (commit->event_sources[i] != NULL) { + wl_event_source_remove(commit->event_sources[i]); + } + if (commit->owned_fds && commit->fds[i] != -1) { + close(commit->fds[i]); + } + } + + wlr_surface_unlock_cached(commit->waiter->surface, + commit->surface_lock_seq); + + wl_list_remove(&commit->surface_destroy.link); + wl_list_remove(&commit->link); + free(commit); +} + +static void dmabuf_waiter_commit_handle_surface_destroy( + struct wl_listener *listener, void *data) { + struct wlr_surface_dmabuf_waiter_commit *commit = + wl_container_of(listener, commit, surface_destroy); + dmabuf_waiter_commit_destroy(commit); +} + +static struct wlr_surface_dmabuf_waiter_commit *dmabuf_waiter_commit_create( + struct wlr_surface_dmabuf_waiter *waiter) { + struct wlr_surface_dmabuf_waiter_commit *commit = calloc(1, sizeof(*commit)); + if (commit == NULL) { + wlr_log_errno(WLR_ERROR, "Allocation failed"); + return NULL; + } + + for (int i = 0; i < WLR_DMABUF_MAX_PLANES; i++) { + commit->fds[i] = -1; + } + + commit->waiter = waiter; + wl_list_insert(&waiter->commits, &commit->link); + + commit->surface_destroy.notify = + dmabuf_waiter_commit_handle_surface_destroy; + wl_signal_add(&waiter->surface->events.destroy, + &commit->surface_destroy); + + commit->surface_lock_seq = wlr_surface_lock_pending(waiter->surface); + + return commit; +} + +static int dmabuf_waiter_fd_event(int fd, uint32_t mask, void *data) { + struct wlr_surface_dmabuf_waiter_commit *commit = data; + + if (mask & (WL_EVENT_HANGUP | WL_EVENT_ERROR)) { + wlr_log(WLR_ERROR, "Got hangup/error while polling on DMA-BUF"); + } + + bool still_pending = false; + for (size_t i = 0; i < WLR_DMABUF_MAX_PLANES; i++) { + if (commit->fds[i] == fd) { + wl_event_source_remove(commit->event_sources[i]); + commit->event_sources[i] = NULL; + if (commit->owned_fds) { + close(commit->fds[i]); + } + commit->fds[i] = -1; + } else if (commit->event_sources[i] != NULL) { + still_pending = true; + } + } + + if (!still_pending) { + dmabuf_waiter_commit_destroy(commit); + } + return 0; +} + +static bool dmabuf_waiter_commit_add_fd( + struct wlr_surface_dmabuf_waiter_commit *commit, + struct wl_event_loop *event_loop, + int fd) { + size_t slot = 0; + while (slot < WLR_DMABUF_MAX_PLANES && commit->event_sources[slot] != NULL) { + slot++; + } + assert(slot < WLR_DMABUF_MAX_PLANES); + commit->fds[slot] = fd; + + struct wl_event_source *source = wl_event_loop_add_fd(event_loop, fd, + WL_EVENT_READABLE, dmabuf_waiter_fd_event, commit); + if (source == NULL) { + wlr_log(WLR_ERROR, "wl_event_loop_add_fd() failed"); + return false; + } + + commit->event_sources[slot] = source; + return true; +} + +static void dmabuf_waiter_wait_syncobj( + struct wlr_surface_dmabuf_waiter *waiter, + struct wl_event_loop *event_loop, + struct wlr_linux_drm_syncobj_surface_v1_state *syncobj_state) { + struct wlr_drm_syncobj_timeline *timeline = + syncobj_state->acquire_timeline; + uint64_t point = syncobj_state->acquire_point; + + uint32_t check_flags, eventfd_flags; + if (waiter->mode == WLR_SURFACE_DMABUF_WAITER_MODE_COMPLETE) { + check_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT; + eventfd_flags = 0; + } else { + check_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE; + eventfd_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE; + } + + bool already_ready = false; + if (!wlr_drm_syncobj_timeline_check(timeline, point, + check_flags, &already_ready)) { + wl_resource_post_no_memory(waiter->surface->resource); + return; + } else if (already_ready) { + return; + } + + int ev_fd = wlr_drm_syncobj_timeline_eventfd(timeline, point, + eventfd_flags); + if (ev_fd < 0) { + wl_resource_post_no_memory(waiter->surface->resource); + return; + } + + struct wlr_surface_dmabuf_waiter_commit *commit = + dmabuf_waiter_commit_create(waiter); + if (commit == NULL) { + close(ev_fd); + return; + } + + commit->owned_fds = true; + if (!dmabuf_waiter_commit_add_fd(commit, event_loop, ev_fd)) { + dmabuf_waiter_commit_destroy(commit); + } +} + +static void dmabuf_waiter_wait_dmabuf( + struct wlr_surface_dmabuf_waiter *waiter, + struct wl_event_loop *event_loop) { + if (waiter->mode != WLR_SURFACE_DMABUF_WAITER_MODE_COMPLETE) { + // Nothing to do here, implicit sync is always "available" + return; + } + + struct wlr_dmabuf_attributes dmabuf = {0}; + if (!wlr_buffer_get_dmabuf(waiter->surface->pending.buffer, &dmabuf)) { + return; + } + + struct pollfd pollfds[WLR_DMABUF_MAX_PLANES]; + for (int i = 0; i < dmabuf.n_planes; i++) { + pollfds[i] = (struct pollfd){ + .fd = dmabuf.fd[i], + .events = POLLIN, + }; + } + if (poll(pollfds, dmabuf.n_planes, 0) < 0) { + wlr_log_errno(WLR_ERROR, "poll() failed"); + return; + } + + bool need_wait = false; + for (int i = 0; i < dmabuf.n_planes; i++) { + if (pollfds[i].revents & (POLLHUP | POLLERR)) { + wlr_log(WLR_ERROR, + "Got hangup/error while polling on DMA-BUF"); + return; + } + if (!(pollfds[i].revents & POLLIN)) { + need_wait = true; + } + } + if (!need_wait) { + return; + } + + struct wlr_surface_dmabuf_waiter_commit *commit = + dmabuf_waiter_commit_create(waiter); + if (commit == NULL) { + return; + } + + // wlr_compositor ensures the wlr_buffer will remain alive (IOW, the + // DMA-BUF FDs will remain opened) while we have a lock + for (int i = 0; i < dmabuf.n_planes; i++) { + if (pollfds[i].revents & POLLIN) { + continue; + } + if (!dmabuf_waiter_commit_add_fd(commit, event_loop, + dmabuf.fd[i])) { + dmabuf_waiter_commit_destroy(commit); + return; + } + } +} + +static void dmabuf_waiter_handle_client_commit(struct wl_listener *listener, + void *data) { + struct wlr_surface_dmabuf_waiter *waiter = + wl_container_of(listener, waiter, client_commit); + struct wlr_surface *surface = waiter->surface; + + if (!(surface->pending.committed & WLR_SURFACE_STATE_BUFFER) || + surface->pending.buffer == NULL) { + return; + } + + struct wl_client *client = wl_resource_get_client(surface->resource); + struct wl_display *display = wl_client_get_display(client); + struct wl_event_loop *event_loop = wl_display_get_event_loop(display); + + struct wlr_linux_drm_syncobj_surface_v1_state *syncobj_state = + wlr_linux_drm_syncobj_v1_get_surface_state(surface); + + if (syncobj_state != NULL && syncobj_state->acquire_timeline != NULL) { + dmabuf_waiter_wait_syncobj(waiter, event_loop, syncobj_state); + } else { + dmabuf_waiter_wait_dmabuf(waiter, event_loop); + } +} + +void wlr_surface_dmabuf_waiter_init(struct wlr_surface_dmabuf_waiter *waiter, + struct wlr_surface *surface, + enum wlr_surface_dmabuf_waiter_mode mode) { + assert(waiter->surface == NULL); + + waiter->surface = surface; + waiter->mode = mode; + wl_list_init(&waiter->commits); + + waiter->client_commit.notify = dmabuf_waiter_handle_client_commit; + wl_signal_add(&surface->events.client_commit, &waiter->client_commit); +} + +void wlr_surface_dmabuf_waiter_finish( + struct wlr_surface_dmabuf_waiter *waiter) { + struct wlr_surface_dmabuf_waiter_commit *commit, *tmp; + wl_list_for_each_safe(commit, tmp, &waiter->commits, link) { + dmabuf_waiter_commit_destroy(commit); + } + + wl_list_remove(&waiter->commits); + wl_list_remove(&waiter->client_commit.link); +} + +struct wlr_compositor_dmabuf_waiter { + enum wlr_surface_dmabuf_waiter_mode mode; + struct wl_listener new_surface; + struct wl_listener destroy; +}; + +struct wlr_compositor_dmabuf_waiter_surface { + struct wlr_surface_dmabuf_waiter base; + struct wl_listener destroy; +}; + +static void compositor_dmabuf_waiter_surface_handle_destroy( + struct wl_listener *listener, void *data) { + struct wlr_compositor_dmabuf_waiter_surface *waiter_surface = + wl_container_of(listener, waiter_surface, destroy); + wlr_surface_dmabuf_waiter_finish(&waiter_surface->base); + wl_list_remove(&waiter_surface->destroy.link); + free(waiter_surface); +} + +static void compositor_dmabuf_waiter_handle_new_surface( + struct wl_listener *listener, void *data) { + struct wlr_compositor_dmabuf_waiter *waiter = + wl_container_of(listener, waiter, new_surface); + struct wlr_surface *surface = data; + + struct wlr_compositor_dmabuf_waiter_surface *waiter_surface = + calloc(1, sizeof(*waiter_surface)); + if (waiter_surface == NULL) { + wlr_log_errno(WLR_ERROR, "Allocation failed"); + return; + } + + wlr_surface_dmabuf_waiter_init(&waiter_surface->base, surface, + waiter->mode); + + waiter_surface->destroy.notify = + compositor_dmabuf_waiter_surface_handle_destroy; + wl_signal_add(&surface->events.destroy, &waiter_surface->destroy); +} + +static void compositor_dmabuf_waiter_handle_destroy( + struct wl_listener *listener, void *data) { + struct wlr_compositor_dmabuf_waiter *waiter = + wl_container_of(listener, waiter, destroy); + wl_list_remove(&waiter->new_surface.link); + wl_list_remove(&waiter->destroy.link); + free(waiter); +} + +void wlr_compositor_dmabuf_waiter_create(struct wlr_compositor *compositor, + enum wlr_surface_dmabuf_waiter_mode mode) { + struct wlr_compositor_dmabuf_waiter *waiter = calloc(1, sizeof(*waiter)); + if (waiter == NULL) { + wlr_log_errno(WLR_ERROR, "Allocation failed"); + return; + } + + waiter->mode = mode; + + waiter->new_surface.notify = compositor_dmabuf_waiter_handle_new_surface; + wl_signal_add(&compositor->events.new_surface, &waiter->new_surface); + waiter->destroy.notify = compositor_dmabuf_waiter_handle_destroy; + wl_signal_add(&compositor->events.destroy, &waiter->destroy); +} From d75f606cfc5a6bb11829bcf2450152730c72a4d7 Mon Sep 17 00:00:00 2001 From: Kenny Levinsen Date: Wed, 1 Apr 2026 14:34:54 +0000 Subject: [PATCH 2/2] linux_drm_syncobj_v1: Remove implicit wait This responsibility has moved to the separate dmabuf waiter helper. This is a breaking change as users of the syncobj manager must use the dmabuf waiter to at the very least wait for availability. --- include/wlr/types/wlr_linux_drm_syncobj_v1.h | 4 ++ types/wlr_linux_drm_syncobj_v1.c | 69 -------------------- 2 files changed, 4 insertions(+), 69 deletions(-) diff --git a/include/wlr/types/wlr_linux_drm_syncobj_v1.h b/include/wlr/types/wlr_linux_drm_syncobj_v1.h index 7fd55ec2e..ce4c2cc70 100644 --- a/include/wlr/types/wlr_linux_drm_syncobj_v1.h +++ b/include/wlr/types/wlr_linux_drm_syncobj_v1.h @@ -43,6 +43,10 @@ struct wlr_linux_drm_syncobj_manager_v1 { * The compositor must be prepared to handle fences coming from clients and to * send release fences correctly. In particular, both the renderer and the * backend need to support explicit synchronization. + * + * Fences provided here may not yet be ready to consume. See + * wlr_compositor_buffer_waiter_create for a way to wait for fences to be + * materialized or completed before application. */ struct wlr_linux_drm_syncobj_manager_v1 *wlr_linux_drm_syncobj_manager_v1_create( struct wl_display *display, uint32_t version, int drm_fd); diff --git a/types/wlr_linux_drm_syncobj_v1.c b/types/wlr_linux_drm_syncobj_v1.c index 53fc2fd43..8f52f7b81 100644 --- a/types/wlr_linux_drm_syncobj_v1.c +++ b/types/wlr_linux_drm_syncobj_v1.c @@ -27,14 +27,6 @@ struct wlr_linux_drm_syncobj_surface_v1 { struct wl_listener client_commit; }; -struct wlr_linux_drm_syncobj_surface_v1_commit { - struct wlr_surface *surface; - struct wlr_drm_syncobj_timeline_waiter waiter; - uint32_t cached_seq; - - struct wl_listener surface_destroy; -}; - static const struct wp_linux_drm_syncobj_manager_v1_interface manager_impl; static const struct wp_linux_drm_syncobj_timeline_v1_interface timeline_impl; static const struct wp_linux_drm_syncobj_surface_v1_interface surface_impl; @@ -212,61 +204,6 @@ static struct wlr_linux_drm_syncobj_surface_v1 *surface_from_wlr_surface( return surface; } -static void surface_commit_destroy(struct wlr_linux_drm_syncobj_surface_v1_commit *commit) { - wlr_surface_unlock_cached(commit->surface, commit->cached_seq); - wl_list_remove(&commit->surface_destroy.link); - wlr_drm_syncobj_timeline_waiter_finish(&commit->waiter); - free(commit); -} - -static void surface_commit_handle_waiter_ready(struct wlr_drm_syncobj_timeline_waiter *waiter) { - struct wlr_linux_drm_syncobj_surface_v1_commit *commit = - wl_container_of(waiter, commit, waiter); - surface_commit_destroy(commit); -} - -static void surface_commit_handle_surface_destroy(struct wl_listener *listener, - void *data) { - struct wlr_linux_drm_syncobj_surface_v1_commit *commit = - wl_container_of(listener, commit, surface_destroy); - surface_commit_destroy(commit); -} - -// Block the surface commit until the fence materializes -static bool lock_surface_commit(struct wlr_linux_drm_syncobj_surface_v1 *surface, - struct wlr_drm_syncobj_timeline *timeline, uint64_t point) { - uint32_t flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE; - - bool already_materialized = false; - if (!wlr_drm_syncobj_timeline_check(timeline, point, flags, &already_materialized)) { - return false; - } else if (already_materialized) { - return true; - } - - struct wlr_linux_drm_syncobj_surface_v1_commit *commit = calloc(1, sizeof(*commit)); - if (commit == NULL) { - return false; - } - - struct wl_client *client = wl_resource_get_client(surface->resource); - struct wl_display *display = wl_client_get_display(client); - struct wl_event_loop *loop = wl_display_get_event_loop(display); - if (!wlr_drm_syncobj_timeline_waiter_init(&commit->waiter, timeline, point, - flags, loop, surface_commit_handle_waiter_ready)) { - free(commit); - return false; - } - - commit->surface = surface->surface; - commit->cached_seq = wlr_surface_lock_pending(surface->surface); - - commit->surface_destroy.notify = surface_commit_handle_surface_destroy; - wl_signal_add(&surface->surface->events.destroy, &commit->surface_destroy); - - return true; -} - static void surface_handle_client_commit(struct wl_listener *listener, void *data) { struct wlr_linux_drm_syncobj_surface_v1 *surface = @@ -311,12 +248,6 @@ static void surface_handle_client_commit(struct wl_listener *listener, "Acquire and release points conflict"); return; } - - if (surface->pending.acquire_timeline != NULL && !lock_surface_commit( - surface, surface->pending.acquire_timeline, surface->pending.acquire_point)) { - wl_resource_post_no_memory(surface->resource); - return; - } } static void manager_handle_get_surface(struct wl_client *client,