render,types/wlr_shm: Offload slow cleanup ops

Defers close()/munmap() calls into a cleanup thread in to avoid blocking
when they are slow. This is most noticable on low end hardware with
large screens where large surfaces can spend considerable time
invalidating mmus on the gpu or clearing shmem pages.
This commit is contained in:
Kurt Kartaltepe 2024-07-22 19:09:27 -07:00
parent 015bb8512e
commit 5cc737d599
6 changed files with 130 additions and 4 deletions

10
include/util/cleanup.h Normal file
View file

@ -0,0 +1,10 @@
typedef void (*wlr_task_cb)(void *data);
struct wlr_task {
wlr_task_cb task;
void *data;
};
void wlr_cleanup_queue_init(void);
void wlr_cleanup_queue_finish(void);
void wlr_cleanup_defer(struct wlr_task t);

View file

@ -114,6 +114,7 @@ pixman = dependency('pixman-1',
) )
math = cc.find_library('m') math = cc.find_library('m')
rt = cc.find_library('rt') rt = cc.find_library('rt')
threads = dependency('threads')
wlr_files = [] wlr_files = []
wlr_deps = [ wlr_deps = [
@ -123,6 +124,7 @@ wlr_deps = [
pixman, pixman,
math, math,
rt, rt,
threads,
] ]
subdir('protocol') subdir('protocol')

View file

@ -1,15 +1,31 @@
#include <fcntl.h> #include <fcntl.h>
#include <stdlib.h>
#include <unistd.h> #include <unistd.h>
#include <wlr/render/dmabuf.h> #include <wlr/render/dmabuf.h>
#include <wlr/util/log.h> #include <wlr/util/log.h>
#include "render/dmabuf.h" #include "render/dmabuf.h"
#include "util/cleanup.h"
static void defer_close_dmabuf(void *data) {
struct wlr_dmabuf_attributes *attribs = data;
void wlr_dmabuf_attributes_finish(struct wlr_dmabuf_attributes *attribs) {
for (int i = 0; i < attribs->n_planes; ++i) { for (int i = 0; i < attribs->n_planes; ++i) {
close(attribs->fd[i]); close(attribs->fd[i]);
}
free(attribs);
}
void wlr_dmabuf_attributes_finish(struct wlr_dmabuf_attributes *attribs) {
struct wlr_dmabuf_attributes *attribs_defer = calloc(1, sizeof(struct wlr_dmabuf_attributes));
attribs_defer->n_planes = attribs->n_planes;
for (int i = 0; i < attribs->n_planes; ++i) {
attribs_defer->fd[i] = attribs->fd[i];
attribs->fd[i] = -1; attribs->fd[i] = -1;
} }
attribs->n_planes = 0; attribs->n_planes = 0;
wlr_cleanup_defer((struct wlr_task){&defer_close_dmabuf, attribs_defer});
} }
bool wlr_dmabuf_attributes_copy(struct wlr_dmabuf_attributes *dst, bool wlr_dmabuf_attributes_copy(struct wlr_dmabuf_attributes *dst,

View file

@ -14,6 +14,7 @@
#include <wlr/types/wlr_shm.h> #include <wlr/types/wlr_shm.h>
#include <wlr/util/log.h> #include <wlr/util/log.h>
#include "render/pixel_format.h" #include "render/pixel_format.h"
#include "util/cleanup.h"
#ifdef __STDC_NO_ATOMICS__ #ifdef __STDC_NO_ATOMICS__
#error "C11 atomics are required" #error "C11 atomics are required"
@ -116,6 +117,12 @@ static struct wlr_shm_mapping *mapping_create(int fd, size_t size) {
return mapping; return mapping;
} }
static void defer_munmap(void *data) {
struct wlr_shm_mapping *mapping = data;
munmap(mapping->data, mapping->size);
free(mapping);
}
static void mapping_consider_destroy(struct wlr_shm_mapping *mapping) { static void mapping_consider_destroy(struct wlr_shm_mapping *mapping) {
if (!mapping->dropped) { if (!mapping->dropped) {
return; return;
@ -127,8 +134,7 @@ static void mapping_consider_destroy(struct wlr_shm_mapping *mapping) {
} }
} }
munmap(mapping->data, mapping->size); wlr_cleanup_defer((struct wlr_task){&defer_munmap, mapping});
free(mapping);
} }
/** /**
@ -397,13 +403,18 @@ static const struct wl_shm_pool_interface pool_impl = {
.resize = pool_handle_resize, .resize = pool_handle_resize,
}; };
static void defer_close_fd(void *data) {
int fd = (int)(uintptr_t)data;
close(fd);
}
static void pool_consider_destroy(struct wlr_shm_pool *pool) { static void pool_consider_destroy(struct wlr_shm_pool *pool) {
if (pool->resource != NULL || !wl_list_empty(&pool->buffers)) { if (pool->resource != NULL || !wl_list_empty(&pool->buffers)) {
return; return;
} }
mapping_drop(pool->mapping); mapping_drop(pool->mapping);
close(pool->fd); wlr_cleanup_defer((struct wlr_task){&defer_close_fd, (void *)(uintptr_t)pool->fd});
free(pool); free(pool);
} }

86
util/cleanup.c Normal file
View file

@ -0,0 +1,86 @@
#include "util/cleanup.h"
#include <pthread.h>
#include <stdbool.h>
#include <string.h>
#include <sys/eventfd.h>
#include <unistd.h>
#define MAX_TASKS 16
struct work_queue {
struct wlr_task list[MAX_TASKS];
int size;
bool running;
pthread_mutex_t lock;
pthread_t thread;
int efd;
};
static struct work_queue queue;
static void *cleanup_thread(void *unused)
{
struct wlr_task work[MAX_TASKS];
eventfd_t v;
do {
eventfd_read(queue.efd, &v);
pthread_mutex_lock(&queue.lock);
for (int i = 0; i < (int)v; i++) {
work[i] = queue.list[i];
}
for (int i = (int)v; i < queue.size; i++) {
queue.list[i - v] = queue.list[i];
}
queue.size -= (int)v;
pthread_mutex_unlock(&queue.lock);
for (int i = 0; i < (int)v; i++) {
work[i].task(work[i].data);
}
} while (queue.running);
return NULL;
}
extern void wlr_cleanup_defer(struct wlr_task t)
{
wlr_cleanup_queue_init();
pthread_mutex_lock(&queue.lock);
if (queue.size < MAX_TASKS) {
queue.list[queue.size] = t;
queue.size++;
} else {
// In case too much work is queue, apply backpressure.
pthread_mutex_unlock(&queue.lock);
return t.task(t.data);
}
pthread_mutex_unlock(&queue.lock);
eventfd_write(queue.efd, 1);
}
extern void wlr_cleanup_queue_init(void)
{
if (!queue.running) {
queue = (struct work_queue){0};
queue.running = true;
queue.efd = eventfd(0, 0);
pthread_mutex_init(&queue.lock, NULL);
pthread_create(&queue.thread, NULL, cleanup_thread, NULL);
}
}
static void done(void *unused) { queue.running = false; }
extern void wlr_cleanup_queue_finish(void)
{
if (queue.running) {
// Notify the cleanup thread that it's done.
wlr_cleanup_defer((struct wlr_task){&done, NULL});
pthread_join(queue.thread, NULL);
close(queue.efd);
queue = (struct work_queue){0};
}
}

View file

@ -13,4 +13,5 @@ wlr_files += files(
'token.c', 'token.c',
'transform.c', 'transform.c',
'utf8.c', 'utf8.c',
'cleanup.c',
) )