Merge branch 'pixman_dmabuf' into 'master'

Implement dmabufs in pixman renderer

Closes #3844

See merge request wlroots/wlroots!4714
This commit is contained in:
David Turner 2025-07-11 03:13:12 +00:00
commit d848f49bac
13 changed files with 156 additions and 46 deletions

View file

@ -32,6 +32,9 @@ install_subdir('wlr',
foreach name, have : internal_features
internal_config.set10('HAVE_' + name.underscorify().to_upper(), have)
endforeach
internal_config.set10('HAVE_LINUX_DMA_BUF_H', cc.has_header('linux/dma-buf.h'))
wlr_files += configure_file(
output: 'config.h',
configuration: internal_config,

View file

@ -41,7 +41,6 @@ struct wlr_gles2_renderer {
struct wlr_renderer wlr_renderer;
struct wlr_egl *egl;
int drm_fd;
struct wlr_drm_format_set shm_texture_formats;

View file

@ -39,8 +39,6 @@ struct wlr_vk_device {
VkPhysicalDevice phdev;
VkDevice dev;
int drm_fd;
bool sync_file_import_export;
bool implicit_sync_interop;
bool sampler_ycbcr_conversion;

View file

@ -25,7 +25,6 @@ struct wlr_renderer_impl {
const struct wlr_drm_format_set *(*get_render_formats)(
struct wlr_renderer *renderer);
void (*destroy)(struct wlr_renderer *renderer);
int (*get_drm_fd)(struct wlr_renderer *renderer);
struct wlr_texture *(*texture_from_buffer)(struct wlr_renderer *renderer,
struct wlr_buffer *buffer);
struct wlr_render_pass *(*begin_buffer_pass)(struct wlr_renderer *renderer,

View file

@ -13,7 +13,6 @@
#include <wlr/render/wlr_renderer.h>
struct wlr_renderer *wlr_pixman_renderer_create(void);
bool wlr_renderer_is_pixman(struct wlr_renderer *wlr_renderer);
bool wlr_texture_is_pixman(struct wlr_texture *texture);

View file

@ -26,8 +26,8 @@ struct wlr_fbox;
* A renderer for basic 2D operations.
*/
struct wlr_renderer {
// Capabilities required for the buffer used as a render target (bitmask of
// enum wlr_buffer_cap)
// Capabilities required for the buffers used as texture sources and
// render target (bitmask of enum wlr_buffer_cap)
uint32_t render_buffer_caps;
struct {
@ -59,6 +59,7 @@ struct wlr_renderer {
struct {
const struct wlr_renderer_impl *impl;
int drm_fd;
} WLR_PRIVATE;
};

View file

@ -26,6 +26,12 @@ struct wlr_dmabuf_v1_buffer {
struct {
struct wl_listener release;
// Cache mapped address while ptr_data_access is open
void *addr;
// WLR_BUFFER_DATA_PTR_ACCESS_* flags describing the type of
// the current ptr_data_access
uint32_t access_flags;
} WLR_PRIVATE;
};

View file

@ -182,17 +182,6 @@ static const struct wlr_drm_format_set *gles2_get_render_formats(
return wlr_egl_get_dmabuf_render_formats(renderer->egl);
}
static int gles2_get_drm_fd(struct wlr_renderer *wlr_renderer) {
struct wlr_gles2_renderer *renderer =
gles2_get_renderer(wlr_renderer);
if (renderer->drm_fd < 0) {
renderer->drm_fd = wlr_egl_dup_drm_fd(renderer->egl);
}
return renderer->drm_fd;
}
struct wlr_egl *wlr_gles2_renderer_get_egl(struct wlr_renderer *wlr_renderer) {
struct wlr_gles2_renderer *renderer =
gles2_get_renderer(wlr_renderer);
@ -231,8 +220,8 @@ static void gles2_destroy(struct wlr_renderer *wlr_renderer) {
wlr_drm_format_set_finish(&renderer->shm_texture_formats);
if (renderer->drm_fd >= 0) {
close(renderer->drm_fd);
if (wlr_renderer->drm_fd >= 0) {
close(wlr_renderer->drm_fd);
}
free(renderer);
@ -356,7 +345,6 @@ static const struct wlr_renderer_impl renderer_impl = {
.destroy = gles2_destroy,
.get_texture_formats = gles2_get_texture_formats,
.get_render_formats = gles2_get_render_formats,
.get_drm_fd = gles2_get_drm_fd,
.texture_from_buffer = gles2_texture_from_buffer,
.begin_buffer_pass = gles2_begin_buffer_pass,
.render_timer_create = gles2_render_timer_create,
@ -533,7 +521,7 @@ struct wlr_renderer *wlr_gles2_renderer_create(struct wlr_egl *egl) {
renderer->egl = egl;
renderer->exts_str = exts_str;
renderer->drm_fd = -1;
renderer->wlr_renderer.drm_fd = -1;
wlr_log(WLR_INFO, "Creating GLES2 renderer");
wlr_log(WLR_INFO, "Using %s", glGetString(GL_VERSION));
@ -683,7 +671,8 @@ struct wlr_renderer *wlr_gles2_renderer_create(struct wlr_egl *egl) {
get_gles2_shm_formats(renderer, &renderer->shm_texture_formats);
int drm_fd = wlr_renderer_get_drm_fd(&renderer->wlr_renderer);
int drm_fd = wlr_egl_dup_drm_fd(renderer->egl);
renderer->wlr_renderer.drm_fd = drm_fd;
uint64_t cap_syncobj_timeline;
if (drm_fd >= 0 && drmGetCap(drm_fd, DRM_CAP_SYNCOBJ_TIMELINE, &cap_syncobj_timeline) == 0) {
renderer->wlr_renderer.features.timeline = egl->procs.eglDupNativeFenceFDANDROID &&

View file

@ -2,6 +2,7 @@
#include <drm_fourcc.h>
#include <pixman.h>
#include <stdlib.h>
#include <unistd.h>
#include <wayland-util.h>
#include <wlr/render/interface.h>
#include <wlr/util/box.h>
@ -252,6 +253,11 @@ static struct wlr_texture *pixman_texture_from_buffer(
&data, &drm_format, &stride)) {
return NULL;
}
// This looks bad, because we're saying "end access" but also storing the
// pointer for later use. However, we only access the texture data from
// render_pass_add_texture() which does a begin/end access, and
// begin_pixman_data_ptr_access() will handle if the data pointer changes
// between accesses. So everything should be fine.
wlr_buffer_end_data_ptr_access(buffer);
struct wlr_pixman_texture *texture = pixman_texture_create(renderer,
@ -335,8 +341,9 @@ struct wlr_renderer *wlr_pixman_renderer_create(void) {
const uint32_t *formats = get_pixman_drm_formats(&len);
for (size_t i = 0; i < len; ++i) {
wlr_drm_format_set_add(&renderer->drm_formats, formats[i],
DRM_FORMAT_MOD_INVALID);
// Only support linear buffers. MOD_INVALID could mean the driver
// can do whatever it thinks appropriate, but pixman definitely
// only supports linear.
wlr_drm_format_set_add(&renderer->drm_formats, formats[i],
DRM_FORMAT_MOD_LINEAR);
}

View file

@ -1155,6 +1155,11 @@ static void vulkan_destroy(struct wlr_renderer *wlr_renderer) {
struct wlr_vk_instance *ini = dev->instance;
vulkan_device_destroy(dev);
vulkan_instance_destroy(ini);
if (wlr_renderer->drm_fd > 0) {
close(wlr_renderer->drm_fd);
}
free(renderer);
}
@ -1404,11 +1409,6 @@ destroy_image:
return false;
}
static int vulkan_get_drm_fd(struct wlr_renderer *wlr_renderer) {
struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
return renderer->dev->drm_fd;
}
static struct wlr_render_pass *vulkan_begin_buffer_pass(struct wlr_renderer *wlr_renderer,
struct wlr_buffer *buffer, const struct wlr_buffer_pass_options *options) {
struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
@ -1433,7 +1433,6 @@ static const struct wlr_renderer_impl renderer_impl = {
.get_texture_formats = vulkan_get_texture_formats,
.get_render_formats = vulkan_get_render_formats,
.destroy = vulkan_destroy,
.get_drm_fd = vulkan_get_drm_fd,
.texture_from_buffer = vulkan_texture_from_buffer,
.begin_buffer_pass = vulkan_begin_buffer_pass,
};
@ -2455,7 +2454,8 @@ struct wlr_renderer *vulkan_renderer_create_for_device(struct wlr_vk_device *dev
wl_list_init(&renderer->pipeline_layouts);
uint64_t cap_syncobj_timeline;
if (dev->drm_fd >= 0 && drmGetCap(dev->drm_fd, DRM_CAP_SYNCOBJ_TIMELINE, &cap_syncobj_timeline) == 0) {
if (renderer->wlr_renderer.drm_fd >= 0 &&
drmGetCap(renderer->wlr_renderer.drm_fd, DRM_CAP_SYNCOBJ_TIMELINE, &cap_syncobj_timeline) == 0) {
renderer->wlr_renderer.features.timeline = dev->sync_file_import_export && cap_syncobj_timeline != 0;
}
@ -2526,9 +2526,11 @@ struct wlr_renderer *wlr_vk_renderer_create_with_drm_fd(int drm_fd) {
// Do not use the drm_fd that was passed in: we should prefer the render
// node even if a primary node was provided
dev->drm_fd = vulkan_open_phdev_drm_fd(phdev);
int render_drm_fd = vulkan_open_phdev_drm_fd(phdev);
return vulkan_renderer_create_for_device(dev);
struct wlr_renderer *wlr_renderer = vulkan_renderer_create_for_device(dev);
wlr_renderer->drm_fd = render_drm_fd;
return wlr_renderer;
}
VkInstance wlr_vk_renderer_get_instance(struct wlr_renderer *renderer) {

View file

@ -458,7 +458,6 @@ struct wlr_vk_device *vulkan_device_create(struct wlr_vk_instance *ini,
dev->phdev = phdev;
dev->instance = ini;
dev->drm_fd = -1;
// For dmabuf import we require at least the external_memory_fd,
// external_memory_dma_buf, queue_family_foreign,
@ -665,10 +664,6 @@ void vulkan_device_destroy(struct wlr_vk_device *dev) {
vkDestroyDevice(dev->dev, NULL);
}
if (dev->drm_fd > 0) {
close(dev->drm_fd);
}
wlr_drm_format_set_finish(&dev->dmabuf_render_formats);
wlr_drm_format_set_finish(&dev->dmabuf_texture_formats);
wlr_drm_format_set_finish(&dev->shm_texture_formats);

View file

@ -84,7 +84,7 @@ bool wlr_renderer_init_wl_display(struct wlr_renderer *r,
return false;
}
if (wlr_renderer_get_texture_formats(r, WLR_BUFFER_CAP_DMABUF) != NULL &&
if (wlr_renderer_get_texture_formats(r, r->render_buffer_caps) != NULL &&
wlr_renderer_get_drm_fd(r) >= 0 &&
wlr_linux_dmabuf_v1_create_with_renderer(wl_display, 4, r) == NULL) {
return false;
@ -267,6 +267,14 @@ static struct wlr_renderer *renderer_autocreate(struct wlr_backend *backend, int
if ((is_auto && !has_render_node(backend)) || strcmp(renderer_name, "pixman") == 0) {
renderer = wlr_pixman_renderer_create();
if (open_preferred_drm_fd(backend, &drm_fd, &own_drm_fd)) {
wlr_log(WLR_DEBUG, "Creating pixman renderer with DRM FD %d", drm_fd);
renderer->drm_fd = drm_fd;
} else {
wlr_log(WLR_DEBUG, "Creating pixman renderer without DRM");
renderer->drm_fd = -1;
}
if (renderer) {
goto out;
} else {
@ -297,11 +305,8 @@ struct wlr_renderer *wlr_renderer_autocreate(struct wlr_backend *backend) {
return renderer_autocreate(backend, -1);
}
int wlr_renderer_get_drm_fd(struct wlr_renderer *r) {
if (!r->impl->get_drm_fd) {
return -1;
}
return r->impl->get_drm_fd(r);
int wlr_renderer_get_drm_fd(struct wlr_renderer *renderer) {
return renderer->drm_fd;
}
struct wlr_render_pass *wlr_renderer_begin_buffer_pass(struct wlr_renderer *renderer,

View file

@ -1,7 +1,13 @@
#include <assert.h>
#include "config.h"
#include <drm_fourcc.h>
#include <fcntl.h>
#include <stdlib.h>
#include <poll.h>
#if HAVE_LINUX_DMA_BUF_H
#include <linux/dma-buf.h>
#include <sys/ioctl.h>
#endif
#include <sys/mman.h>
#include <unistd.h>
#include <wlr/backend.h>
@ -119,9 +125,109 @@ static bool buffer_get_dmabuf(struct wlr_buffer *wlr_buffer,
return true;
}
static bool buffer_begin_data_ptr_access(struct wlr_buffer *wlr_buffer,
uint32_t flags, void **data, uint32_t *format, size_t *stride) {
struct wlr_dmabuf_v1_buffer *buffer =
dmabuf_v1_buffer_from_buffer(wlr_buffer);
if (buffer->attributes.n_planes != 1) {
// The current data_ptr_access interface can't support buffers
// split across multiple planes.
wlr_log(WLR_DEBUG, "Can't do data access on multi-planar dmabuf");
return false;
}
*format = buffer->attributes.format;
*stride = buffer->attributes.stride[0];
int fd = buffer->attributes.fd[0];
// Poll fences on the dmabuf, check it's finished rendering
struct pollfd fence_poll = {
.fd = fd,
.events = POLLIN,
};
if (poll(&fence_poll, 1, -1) < 0) {
wlr_log(WLR_ERROR, "Fence poll failed: %s", strerror(errno));
}
#if HAVE_LINUX_DMA_BUF_H
struct dma_buf_sync sync = {
.flags = DMA_BUF_SYNC_START,
};
if (flags & WLR_BUFFER_DATA_PTR_ACCESS_READ) {
sync.flags |= DMA_BUF_SYNC_READ;
}
if (flags & WLR_BUFFER_DATA_PTR_ACCESS_WRITE) {
sync.flags |= DMA_BUF_SYNC_WRITE;
}
#endif
int mmap_flags = 0;
if (flags & WLR_BUFFER_DATA_PTR_ACCESS_READ) {
mmap_flags |= PROT_READ;
}
if (flags & WLR_BUFFER_DATA_PTR_ACCESS_WRITE) {
mmap_flags |= PROT_WRITE;
}
if (!buffer->addr) {
int size = *stride * buffer->attributes.height;
buffer->addr = mmap(NULL, size, mmap_flags,
MAP_SHARED, fd, buffer->attributes.offset[0]);
buffer->access_flags = flags;
#if HAVE_LINUX_DMA_BUF_H
// dmabuf sync - this is for cache coherency
if (ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync) < 0) {
wlr_log(WLR_ERROR, "dmabuf sync start failed: %s",
strerror(errno));
}
#endif
}
if (buffer->addr == MAP_FAILED) {
wlr_log(WLR_ERROR, "Failed to map linux_dmabuf: %s",
strerror(errno));
*data = NULL;
return false;
}
*data = buffer->addr;
return true;
}
static void buffer_end_data_ptr_access(struct wlr_buffer *wlr_buffer) {
struct wlr_dmabuf_v1_buffer *buffer =
dmabuf_v1_buffer_from_buffer(wlr_buffer);
#if HAVE_LINUX_DMA_BUF_H
struct dma_buf_sync sync = {
.flags = DMA_BUF_SYNC_END,
};
if (buffer->access_flags & WLR_BUFFER_DATA_PTR_ACCESS_READ) {
sync.flags |= DMA_BUF_SYNC_READ;
}
if (buffer->access_flags & WLR_BUFFER_DATA_PTR_ACCESS_WRITE) {
sync.flags |= DMA_BUF_SYNC_WRITE;
}
if (ioctl(buffer->attributes.fd[0], DMA_BUF_IOCTL_SYNC, &sync) < 0) {
wlr_log(WLR_ERROR, "dmabuf sync end failed: %s", strerror(errno));
}
#endif
int res = munmap(buffer->addr,
buffer->attributes.stride[0] * buffer->attributes.height);
if (res < 0) {
wlr_log(WLR_ERROR, "Failed to munmap dmabuf: %s", strerror(errno));
}
buffer->addr = NULL;
}
static const struct wlr_buffer_impl buffer_impl = {
.destroy = buffer_destroy,
.get_dmabuf = buffer_get_dmabuf,
.begin_data_ptr_access = buffer_begin_data_ptr_access,
.end_data_ptr_access = buffer_end_data_ptr_access,
};
static void buffer_handle_release(struct wl_listener *listener, void *data) {
@ -1101,8 +1207,9 @@ bool wlr_linux_dmabuf_feedback_v1_init_with_options(struct wlr_linux_dmabuf_feed
feedback->main_device = renderer_dev;
uint32_t buffer_caps = WLR_BUFFER_CAP_DMABUF | WLR_BUFFER_CAP_DATA_PTR;
const struct wlr_drm_format_set *renderer_formats =
wlr_renderer_get_texture_formats(options->main_renderer, WLR_BUFFER_CAP_DMABUF);
wlr_renderer_get_texture_formats(options->main_renderer, buffer_caps);
if (renderer_formats == NULL) {
wlr_log(WLR_ERROR, "Failed to get renderer DMA-BUF texture formats");
goto error;