Merge branch 'vk-upload-thread' into 'master'

Draft: render/vulkan: add upload thread

See merge request wlroots/wlroots!4454
This commit is contained in:
Simon Ser 2024-01-25 11:00:38 +00:00
commit 9e53f89c1b
21 changed files with 378 additions and 88 deletions

View file

@ -39,9 +39,10 @@
#define WAIT_SESSION_TIMEOUT 10000 // ms
void wlr_backend_init(struct wlr_backend *backend,
const struct wlr_backend_impl *impl) {
const struct wlr_backend_impl *impl, struct wl_event_loop *loop) {
*backend = (struct wlr_backend){
.impl = impl,
.event_loop = loop,
};
wl_signal_init(&backend->events.destroy);
wl_signal_init(&backend->events.new_input);

View file

@ -208,7 +208,7 @@ struct wlr_backend *wlr_drm_backend_create(struct wl_display *display,
wlr_log_errno(WLR_ERROR, "Allocation failed");
return NULL;
}
wlr_backend_init(&drm->backend, &backend_impl);
wlr_backend_init(&drm->backend, &backend_impl, wl_display_get_event_loop(display));
drm->session = session;
wl_list_init(&drm->fbs);

View file

@ -13,7 +13,8 @@
bool init_drm_renderer(struct wlr_drm_backend *drm,
struct wlr_drm_renderer *renderer) {
renderer->wlr_rend = renderer_autocreate_with_drm_fd(drm->fd);
renderer->wlr_rend = renderer_autocreate_with_drm_fd(drm->fd,
wl_display_get_event_loop(drm->display));
if (!renderer->wlr_rend) {
wlr_log(WLR_ERROR, "Failed to create renderer");
return false;

View file

@ -72,7 +72,7 @@ struct wlr_backend *wlr_headless_backend_create(struct wl_display *display) {
return NULL;
}
wlr_backend_init(&backend->backend, &backend_impl);
wlr_backend_init(&backend->backend, &backend_impl, wl_display_get_event_loop(display));
backend->display = display;
wl_list_init(&backend->outputs);

View file

@ -199,7 +199,7 @@ struct wlr_backend *wlr_libinput_backend_create(struct wl_display *display,
wlr_log(WLR_ERROR, "Allocation failed: %s", strerror(errno));
return NULL;
}
wlr_backend_init(&backend->backend, &backend_impl);
wlr_backend_init(&backend->backend, &backend_impl, wl_display_get_event_loop(display));
wl_list_init(&backend->devices);

View file

@ -119,7 +119,7 @@ struct wlr_backend *wlr_multi_backend_create(struct wl_display *display) {
}
wl_list_init(&backend->backends);
wlr_backend_init(&backend->backend, &backend_impl);
wlr_backend_init(&backend->backend, &backend_impl, wl_display_get_event_loop(display));
wl_signal_init(&backend->events.backend_add);
wl_signal_init(&backend->events.backend_remove);

View file

@ -577,7 +577,7 @@ struct wlr_backend *wlr_wl_backend_create(struct wl_display *display,
return NULL;
}
wlr_backend_init(&wl->backend, &backend_impl);
wlr_backend_init(&wl->backend, &backend_impl, wl_display_get_event_loop(display));
wl->local_display = display;
wl_list_init(&wl->outputs);

View file

@ -400,7 +400,7 @@ struct wlr_backend *wlr_x11_backend_create(struct wl_display *display,
return NULL;
}
wlr_backend_init(&x11->backend, &backend_impl);
wlr_backend_init(&x11->backend, &backend_impl, wl_display_get_event_loop(display));
x11->wl_display = display;
wl_list_init(&x11->outputs);

View file

@ -1,6 +1,7 @@
#ifndef RENDER_VULKAN_H
#define RENDER_VULKAN_H
#include <pthread.h>
#include <stdint.h>
#include <string.h>
#include <stdbool.h>
@ -51,6 +52,7 @@ struct wlr_vk_device {
PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;
PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;
PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;
PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;
PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;
PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;
PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;
@ -254,6 +256,9 @@ struct wlr_vk_renderer {
VkSemaphore timeline_semaphore;
uint64_t timeline_point;
VkSemaphore upload_timeline_semaphore;
uint64_t upload_timeline_point;
size_t last_pool_size;
struct wl_list descriptor_pools; // wlr_vk_descriptor_pool.link
struct wl_list render_format_setups; // wlr_vk_render_format_setup.link
@ -268,6 +273,12 @@ struct wlr_vk_renderer {
// Pool of command buffers
struct wlr_vk_command_buffer command_buffers[VULKAN_COMMAND_BUFFERS_CAP];
struct {
pthread_t thread;
int worker_fd, control_fd;
struct wl_event_source *event_source;
} upload;
struct {
struct wlr_vk_command_buffer *cb;
uint64_t last_timeline_point;
@ -299,6 +310,18 @@ struct wlr_vk_texture_view {
struct wlr_vk_descriptor_pool *ds_pool;
};
struct wlr_vk_upload_task {
struct wlr_buffer *buffer;
VkDeviceMemory memory;
uint64_t timeline_point;
char *dst;
const char *src;
uint32_t src_stride, dst_size;
pixman_region32_t region;
const struct wlr_pixel_format_info *format_info;
int64_t start;
};
struct wlr_vk_pipeline *setup_get_or_create_pipeline(
struct wlr_vk_render_format_setup *setup,
const struct wlr_vk_pipeline_key *key);
@ -310,7 +333,8 @@ struct wlr_vk_texture_view *vulkan_texture_get_or_create_view(
const struct wlr_vk_pipeline_layout *layout);
// Creates a vulkan renderer for the given device.
struct wlr_renderer *vulkan_renderer_create_for_device(struct wlr_vk_device *dev);
struct wlr_renderer *vulkan_renderer_create_for_device(struct wlr_vk_device *dev,
struct wl_event_loop *loop);
// stage utility - for uploading/retrieving data
// Gets an command buffer in recording state which is guaranteed to be
@ -380,6 +404,9 @@ bool vulkan_read_pixels(struct wlr_vk_renderer *vk_renderer,
uint32_t width, uint32_t height, uint32_t src_x, uint32_t src_y,
uint32_t dst_x, uint32_t dst_y, void *data);
bool vulkan_init_upload_worker(struct wlr_vk_renderer *renderer,
struct wl_event_loop *loop);
// State (e.g. image texture) associated with a surface.
struct wlr_vk_texture {
struct wlr_texture wlr_texture;
@ -435,6 +462,7 @@ struct wlr_vk_shared_buffer {
VkBuffer buffer;
VkDeviceMemory memory;
VkDeviceSize buf_size;
void *map;
struct wl_array allocs; // struct wlr_vk_allocation
};

View file

@ -6,7 +6,7 @@
/**
* Automatically select and create a renderer suitable for the DRM FD.
*/
struct wlr_renderer *renderer_autocreate_with_drm_fd(int drm_fd);
struct wlr_renderer *renderer_autocreate_with_drm_fd(int drm_fd, struct wl_event_loop *loop);
/**
* Get the supported render formats. Buffers allocated with a format from this
* list may be attached via wlr_renderer_begin_with_buffer.

View file

@ -19,6 +19,7 @@ struct wlr_backend_impl;
*/
struct wlr_backend {
const struct wlr_backend_impl *impl;
struct wl_event_loop *event_loop;
struct {
/** Raised when destroyed */

View file

@ -24,7 +24,7 @@ struct wlr_backend_impl {
* to the provided struct wlr_backend_impl reference.
*/
void wlr_backend_init(struct wlr_backend *backend,
const struct wlr_backend_impl *impl);
const struct wlr_backend_impl *impl, struct wl_event_loop *loop);
/**
* Emit the destroy event and clean up common backend state.
*/

View file

@ -18,7 +18,8 @@ struct wlr_vk_image_attribs {
VkFormat format;
};
struct wlr_renderer *wlr_vk_renderer_create_with_drm_fd(int drm_fd);
struct wlr_renderer *wlr_vk_renderer_create_with_drm_fd(
struct wl_event_loop *loop, int drm_fd);
VkInstance wlr_vk_renderer_get_instance(struct wlr_renderer *renderer);
VkPhysicalDevice wlr_vk_renderer_get_physical_device(struct wlr_renderer *renderer);

View file

@ -51,7 +51,12 @@ struct wlr_buffer {
bool dropped;
size_t n_locks;
bool accessing_data_ptr;
size_t n_data_ptr_accesses;
uint32_t data_ptr_access_flags; // bitfield of wlr_buffer_data_ptr_access_flag
void *data_ptr_access_data;
uint32_t data_ptr_access_format;
size_t data_ptr_access_stride;
struct {
struct wl_signal destroy;

View file

@ -46,6 +46,7 @@ wlr_files += files(
)
wlr_deps += dep_vulkan
wlr_deps += dependency('threads')
features += { 'vulkan-renderer': true }
subdir('shaders')

View file

@ -300,27 +300,38 @@ static bool render_pass_submit(struct wlr_render_pass *wlr_pass) {
.semaphore = renderer->timeline_semaphore,
.value = stage_timeline_point,
};
VkSubmitInfo2KHR stage_submit = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR,
.commandBufferInfoCount = 1,
.pCommandBufferInfos = &stage_cb_info,
.signalSemaphoreInfoCount = 1,
.pSignalSemaphoreInfos = &stage_signal,
};
VkSemaphoreSubmitInfoKHR stage_wait;
VkSemaphoreSubmitInfoKHR stage_wait[2];
uint32_t stage_wait_len = 0;
if (renderer->upload_timeline_point > 0) {
stage_wait[stage_wait_len++] = (VkSemaphoreSubmitInfoKHR){
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR,
.semaphore = renderer->upload_timeline_semaphore,
.value = renderer->upload_timeline_point,
.stageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR,
};
}
if (renderer->stage.last_timeline_point > 0) {
stage_wait = (VkSemaphoreSubmitInfoKHR){
stage_wait[stage_wait_len++] = (VkSemaphoreSubmitInfoKHR){
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR,
.semaphore = renderer->timeline_semaphore,
.value = renderer->stage.last_timeline_point,
.stageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR,
};
stage_submit.waitSemaphoreInfoCount = 1;
stage_submit.pWaitSemaphoreInfos = &stage_wait;
}
VkSubmitInfo2KHR stage_submit = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR,
.commandBufferInfoCount = 1,
.pCommandBufferInfos = &stage_cb_info,
.waitSemaphoreInfoCount = stage_wait_len,
.pWaitSemaphoreInfos = stage_wait,
.signalSemaphoreInfoCount = 1,
.pSignalSemaphoreInfos = &stage_signal,
};
renderer->stage.last_timeline_point = stage_timeline_point;
uint64_t render_timeline_point = vulkan_end_command_buffer(render_cb, renderer);

View file

@ -178,6 +178,9 @@ static void shared_buffer_destroy(struct wlr_vk_renderer *r,
}
wl_array_release(&buffer->allocs);
if (buffer->map) {
vkUnmapMemory(r->dev->dev, buffer->memory);
}
if (buffer->buffer) {
vkDestroyBuffer(r->dev->dev, buffer->buffer, NULL);
}
@ -302,6 +305,12 @@ struct wlr_vk_buffer_span vulkan_get_stage_span(struct wlr_vk_renderer *r,
goto error;
}
res = vkMapMemory(r->dev->dev, buf->memory, 0, VK_WHOLE_SIZE, 0, &buf->map);
if (res != VK_SUCCESS) {
wlr_vk_error("vkMapMemory", res);
goto error;
}
struct wlr_vk_allocation *a = wl_array_add(&buf->allocs, sizeof(*a));
if (a == NULL) {
wlr_log_errno(WLR_ERROR, "Allocation failed");
@ -360,6 +369,7 @@ bool vulkan_submit_stage_wait(struct wlr_vk_renderer *renderer) {
return false;
}
// TODO
VkTimelineSemaphoreSubmitInfoKHR timeline_submit_info = {
.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR,
.signalSemaphoreValueCount = 1,
@ -1048,6 +1058,7 @@ static void vulkan_destroy(struct wlr_renderer *wlr_renderer) {
}
vkDestroySemaphore(dev->dev, renderer->timeline_semaphore, NULL);
vkDestroySemaphore(dev->dev, renderer->upload_timeline_semaphore, NULL);
vkDestroyPipelineLayout(dev->dev, renderer->output_pipe_layout, NULL);
vkDestroyDescriptorSetLayout(dev->dev, renderer->output_ds_layout, NULL);
vkDestroyCommandPool(dev->dev, renderer->command_pool, NULL);
@ -2145,7 +2156,8 @@ error:
return NULL;
}
struct wlr_renderer *vulkan_renderer_create_for_device(struct wlr_vk_device *dev) {
struct wlr_renderer *vulkan_renderer_create_for_device(struct wlr_vk_device *dev,
struct wl_event_loop *loop) {
struct wlr_vk_renderer *renderer;
VkResult res;
if (!(renderer = calloc(1, sizeof(*renderer)))) {
@ -2195,6 +2207,16 @@ struct wlr_renderer *vulkan_renderer_create_for_device(struct wlr_vk_device *dev
wlr_vk_error("vkCreateSemaphore", res);
goto error;
}
res = vkCreateSemaphore(dev->dev, &semaphore_info, NULL,
&renderer->upload_timeline_semaphore);
if (res != VK_SUCCESS) {
wlr_vk_error("vkCreateSemaphore", res);
goto error;
}
if (!vulkan_init_upload_worker(renderer, loop)) {
goto error;
}
return &renderer->wlr_renderer;
@ -2203,7 +2225,8 @@ error:
return NULL;
}
struct wlr_renderer *wlr_vk_renderer_create_with_drm_fd(int drm_fd) {
struct wlr_renderer *wlr_vk_renderer_create_with_drm_fd(struct wl_event_loop *loop,
int drm_fd) {
wlr_log(WLR_INFO, "The vulkan renderer is only experimental and "
"not expected to be ready for daily use");
wlr_log(WLR_INFO, "Run with VK_INSTANCE_LAYERS=VK_LAYER_KHRONOS_validation "
@ -2238,7 +2261,7 @@ struct wlr_renderer *wlr_vk_renderer_create_with_drm_fd(int drm_fd) {
return NULL;
}
return vulkan_renderer_create_for_device(dev);
return vulkan_renderer_create_for_device(dev, loop);
}
VkInstance wlr_vk_renderer_get_instance(struct wlr_renderer *renderer) {

View file

@ -2,9 +2,11 @@
#include <assert.h>
#include <drm_fourcc.h>
#include <fcntl.h>
#include <signal.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <unistd.h>
#include <wlr/render/wlr_texture.h>
@ -14,6 +16,8 @@
#include "render/pixel_format.h"
#include "render/vulkan.h"
#include "util/time.h"
static const struct wlr_texture_impl texture_impl;
bool wlr_texture_is_vk(struct wlr_texture *wlr_texture) {
@ -36,15 +40,202 @@ static VkImageAspectFlagBits mem_plane_aspect(unsigned i) {
}
}
static void copy_pixels(char *vmap, const char *vdata, uint32_t tex_width,
uint32_t stride, uint32_t size, const pixman_region32_t *region,
const struct wlr_pixel_format_info *format_info) {
int rects_len = 0;
const pixman_box32_t *rects = pixman_region32_rectangles(region, &rects_len);
char *map = vmap;
for (int i = 0; i < rects_len; i++) {
pixman_box32_t rect = rects[i];
uint32_t width = rect.x2 - rect.x1;
uint32_t height = rect.y2 - rect.y1;
uint32_t src_x = rect.x1;
uint32_t src_y = rect.y1;
uint32_t packed_stride = (uint32_t)pixel_format_info_min_stride(format_info, width);
// write data into staging buffer span
const char *pdata = vdata; // data iterator
pdata += stride * src_y;
pdata += format_info->bytes_per_block * src_x;
if (src_x == 0 && width == tex_width && stride == packed_stride) {
memcpy(map, pdata, packed_stride * height);
map += packed_stride * height;
} else {
for (unsigned i = 0u; i < height; ++i) {
memcpy(map, pdata, packed_stride);
pdata += stride;
map += packed_stride;
}
}
}
assert((uint32_t)(map - vmap) == size);
}
static bool read_upload_task(struct wlr_vk_upload_task *task, int fd) {
while (true) {
errno = 0;
ssize_t n = read(fd, task, sizeof(*task));
if (errno == EINTR) {
continue;
}
if (n == sizeof(*task)) {
return true;
} else if (n < 0) {
wlr_log_errno(WLR_ERROR, "read() failed");
} else if (n > 0) {
wlr_log(WLR_ERROR, "Unexpected partial read");
}
return false;
}
}
static bool write_upload_task(const struct wlr_vk_upload_task *task, int fd) {
while (true) {
errno = 0;
ssize_t n = write(fd, task, sizeof(*task));
if (errno == EINTR) {
continue;
}
if (n == sizeof(*task)) {
return true;
} else if (n < 0) {
wlr_log_errno(WLR_ERROR, "write() failed");
} else if (n > 0) {
wlr_log(WLR_ERROR, "Unexpected partial write");
}
return false;
}
}
static void process_upload_task(struct wlr_vk_renderer *renderer,
struct wlr_vk_upload_task *task) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
int64_t start = timespec_to_nsec(&ts);
copy_pixels(task->dst, task->src, task->buffer->width, task->src_stride,
task->dst_size, &task->region, task->format_info);
clock_gettime(CLOCK_MONOTONIC, &ts);
int64_t dur_ns = timespec_to_nsec(&ts) - start;
wlr_log(WLR_INFO, "UPLOAD: %f ms", (double)dur_ns / 1000 / 1000);
VkSemaphoreSignalInfoKHR signal_info = {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR,
.semaphore = renderer->upload_timeline_semaphore,
.value = task->timeline_point,
};
VkResult res = renderer->dev->api.vkSignalSemaphoreKHR(renderer->dev->dev, &signal_info);
if (res != VK_SUCCESS) {
wlr_vk_error("vkMapMemory", res);
}
}
static void *run_uploads(void *data) {
struct wlr_vk_renderer *renderer = data;
while (true) {
struct wlr_vk_upload_task task = {0};
if (!read_upload_task(&task, renderer->upload.worker_fd)) {
break;
}
process_upload_task(renderer, &task);
if (!write_upload_task(&task, renderer->upload.worker_fd)) {
break;
}
}
close(renderer->upload.worker_fd);
return NULL;
}
static void handle_upload_task_complete(struct wlr_vk_renderer *renderer,
struct wlr_vk_upload_task *task) {
wlr_buffer_end_data_ptr_access(task->buffer);
wlr_buffer_unlock(task->buffer);
pixman_region32_fini(&task->region);
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
int64_t dur_ns = timespec_to_nsec(&ts) - task->start;
wlr_log(WLR_INFO, "TOTAL: %f ms", (double)dur_ns / 1000 / 1000);
}
static int handle_upload_fd_event(int fd, uint32_t mask, void *data) {
struct wlr_vk_renderer *renderer = data;
if (mask & WL_EVENT_ERROR) {
wlr_log(WLR_ERROR, "Upload worker FD error");
return 0;
}
if (mask & WL_EVENT_HANGUP) {
return 0;
}
if (mask & WL_EVENT_READABLE) {
struct wlr_vk_upload_task task = {0};
if (!read_upload_task(&task, fd)) {
return 0;
}
handle_upload_task_complete(renderer, &task);
}
return 0;
}
bool vulkan_init_upload_worker(struct wlr_vk_renderer *renderer,
struct wl_event_loop *loop) {
int sockets[2];
if (socketpair(AF_UNIX, SOCK_STREAM, 0, sockets) != 0) {
wlr_log_errno(WLR_ERROR, "pipe() failed");
return false;
}
renderer->upload.worker_fd = sockets[0];
renderer->upload.control_fd = sockets[1];
renderer->upload.event_source = wl_event_loop_add_fd(loop,
renderer->upload.control_fd, WL_EVENT_READABLE,
handle_upload_fd_event, renderer);
if (renderer->upload.event_source == NULL) {
wlr_log(WLR_ERROR, "wl_event_loop_add_fd() failed");
goto error_fds;
}
// Block all signals in the new thread: let the main thread handle these
sigset_t saved_sigset, new_sigset;
sigfillset(&new_sigset);
pthread_sigmask(SIG_BLOCK, &new_sigset, &saved_sigset);
int ret = pthread_create(&renderer->upload.thread, NULL, run_uploads, renderer);
pthread_sigmask(SIG_SETMASK, &saved_sigset, NULL);
if (ret != 0) {
wlr_log_errno(WLR_ERROR, "pthread_create() failed");
goto error_event_source;
}
return true;
error_event_source:
wl_event_source_remove(renderer->upload.event_source);
error_fds:
close(renderer->upload.worker_fd);
close(renderer->upload.control_fd);
return false;
}
// Will transition the texture to shaderReadOnlyOptimal layout for reading
// from fragment shader later on
static bool write_pixels(struct wlr_vk_texture *texture,
static bool start_upload(struct wlr_vk_texture *texture, struct wlr_buffer *buffer,
uint32_t stride, const pixman_region32_t *region, const void *vdata,
VkImageLayout old_layout, VkPipelineStageFlags src_stage,
VkAccessFlags src_access) {
VkResult res;
struct wlr_vk_renderer *renderer = texture->renderer;
VkDevice dev = texture->renderer->dev->dev;
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
int64_t start = timespec_to_nsec(&ts);
const struct wlr_pixel_format_info *format_info = drm_get_pixel_format_info(texture->format->drm);
assert(format_info);
@ -81,19 +272,9 @@ static bool write_pixels(struct wlr_vk_texture *texture,
return false;
}
void *vmap;
res = vkMapMemory(dev, span.buffer->memory, span.alloc.start,
bsize, 0, &vmap);
if (res != VK_SUCCESS) {
wlr_vk_error("vkMapMemory", res);
free(copies);
return false;
}
char *map = (char *)vmap;
uint64_t timeline_point = ++renderer->upload_timeline_point;
// upload data
uint32_t buf_off = span.alloc.start + (map - (char *)vmap);
uint32_t buf_off = span.alloc.start;
for (int i = 0; i < rects_len; i++) {
pixman_box32_t rect = rects[i];
uint32_t width = rect.x2 - rect.x1;
@ -102,22 +283,6 @@ static bool write_pixels(struct wlr_vk_texture *texture,
uint32_t src_y = rect.y1;
uint32_t packed_stride = (uint32_t)pixel_format_info_min_stride(format_info, width);
// write data into staging buffer span
const char *pdata = vdata; // data iterator
pdata += stride * src_y;
pdata += format_info->bytes_per_block * src_x;
if (src_x == 0 && width == texture->wlr_texture.width &&
stride == packed_stride) {
memcpy(map, pdata, packed_stride * height);
map += packed_stride * height;
} else {
for (unsigned i = 0u; i < height; ++i) {
memcpy(map, pdata, packed_stride);
pdata += stride;
map += packed_stride;
}
}
copies[i] = (VkBufferImageCopy) {
.imageExtent.width = width,
.imageExtent.height = height,
@ -134,12 +299,34 @@ static bool write_pixels(struct wlr_vk_texture *texture,
.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
};
buf_off += height * packed_stride;
}
assert((uint32_t)(map - (char *)vmap) == bsize);
vkUnmapMemory(dev, span.buffer->memory);
struct wlr_vk_upload_task task = {
.buffer = wlr_buffer_lock(buffer),
.memory = span.buffer->memory,
.timeline_point = timeline_point,
.dst = (char *)span.buffer->map + span.alloc.start,
.src = vdata,
.src_stride = stride,
.dst_size = bsize,
.format_info = format_info,
.start = start,
};
pixman_region32_init(&task.region);
pixman_region32_copy(&task.region, region);
#if 1
if (!write_upload_task(&task, renderer->upload.control_fd)) {
free(copies);
return false;
}
#else
process_upload_task(renderer, &task);
handle_upload_task_complete(renderer, &task);
#endif
clock_gettime(CLOCK_MONOTONIC, &ts);
start = timespec_to_nsec(&ts);
// record staging cb
// will be executed before next frame
@ -149,6 +336,10 @@ static bool write_pixels(struct wlr_vk_texture *texture,
return false;
}
clock_gettime(CLOCK_MONOTONIC, &ts);
int64_t dur_ns = timespec_to_nsec(&ts) - start;
wlr_log(WLR_INFO, "STARTUP: %f ms", (double)dur_ns / 1000 / 1000);
vulkan_change_layout(cb, texture->image,
old_layout, src_stage, src_access,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
@ -156,6 +347,7 @@ static bool write_pixels(struct wlr_vk_texture *texture,
vkCmdCopyBufferToImage(cb, span.buffer->buffer, texture->image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (uint32_t)rects_len, copies);
vulkan_change_layout(cb, texture->image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT,
@ -180,19 +372,21 @@ static bool vulkan_texture_update_from_buffer(struct wlr_texture *wlr_texture,
return false;
}
bool ok = true;
if (format != texture->format->drm) {
ok = false;
goto out;
goto error;
}
ok = write_pixels(texture, stride, damage, data, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
if (!start_upload(texture, buffer, stride, damage, data,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT)) {
goto error;
}
out:
return true;
error:
wlr_buffer_end_data_ptr_access(buffer);
return ok;
return false;
}
void vulkan_texture_destroy(struct wlr_vk_texture *texture) {
@ -390,7 +584,8 @@ static void texture_set_format(struct wlr_vk_texture *texture,
}
static struct wlr_texture *vulkan_texture_from_pixels(
struct wlr_vk_renderer *renderer, uint32_t drm_fmt, uint32_t stride,
struct wlr_vk_renderer *renderer, struct wlr_buffer *buffer,
uint32_t drm_fmt, uint32_t stride,
uint32_t width, uint32_t height, const void *data) {
VkResult res;
VkDevice dev = renderer->dev->dev;
@ -476,7 +671,8 @@ static struct wlr_texture *vulkan_texture_from_pixels(
pixman_region32_t region;
pixman_region32_init_rect(&region, 0, 0, width, height);
if (!write_pixels(texture, stride, &region, data, VK_IMAGE_LAYOUT_UNDEFINED,
if (!start_upload(texture, buffer, stride, &region, data,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0)) {
goto error;
}
@ -829,8 +1025,10 @@ struct wlr_texture *vulkan_texture_from_buffer(struct wlr_renderer *wlr_renderer
} else if (wlr_buffer_begin_data_ptr_access(buffer,
WLR_BUFFER_DATA_PTR_ACCESS_READ, &data, &format, &stride)) {
struct wlr_texture *tex = vulkan_texture_from_pixels(renderer,
format, stride, buffer->width, buffer->height, data);
wlr_buffer_end_data_ptr_access(buffer);
buffer, format, stride, buffer->width, buffer->height, data);
if (tex == NULL) {
wlr_buffer_end_data_ptr_access(buffer);
}
return tex;
} else {
return NULL;

View file

@ -617,6 +617,8 @@ struct wlr_vk_device *vulkan_device_create(struct wlr_vk_instance *ini,
load_device_proc(dev, "vkWaitSemaphoresKHR", &dev->api.vkWaitSemaphoresKHR);
load_device_proc(dev, "vkGetSemaphoreCounterValueKHR",
&dev->api.vkGetSemaphoreCounterValueKHR);
load_device_proc(dev, "vkSignalSemaphoreKHR",
&dev->api.vkSignalSemaphoreKHR);
load_device_proc(dev, "vkGetSemaphoreFdKHR", &dev->api.vkGetSemaphoreFdKHR);
load_device_proc(dev, "vkImportSemaphoreFdKHR", &dev->api.vkImportSemaphoreFdKHR);
load_device_proc(dev, "vkQueueSubmit2KHR", &dev->api.vkQueueSubmit2KHR);

View file

@ -221,7 +221,8 @@ static bool has_render_node(struct wlr_backend *backend) {
return has_render_node;
}
static struct wlr_renderer *renderer_autocreate(struct wlr_backend *backend, int drm_fd) {
static struct wlr_renderer *renderer_autocreate(struct wlr_backend *backend, int drm_fd,
struct wl_event_loop *loop) {
const char *renderer_options[] = {
"auto",
"gles2",
@ -258,7 +259,7 @@ static struct wlr_renderer *renderer_autocreate(struct wlr_backend *backend, int
log_creation_failure(is_auto, "Cannot create Vulkan renderer: no DRM FD available");
} else {
#if WLR_HAS_VULKAN_RENDERER
renderer = wlr_vk_renderer_create_with_drm_fd(drm_fd);
renderer = wlr_vk_renderer_create_with_drm_fd(loop, drm_fd);
#else
wlr_log(WLR_ERROR, "Cannot create Vulkan renderer: disabled at compile-time");
#endif
@ -289,14 +290,13 @@ out:
return renderer;
}
struct wlr_renderer *renderer_autocreate_with_drm_fd(int drm_fd) {
struct wlr_renderer *renderer_autocreate_with_drm_fd(int drm_fd, struct wl_event_loop *loop) {
assert(drm_fd >= 0);
return renderer_autocreate(NULL, drm_fd);
return renderer_autocreate(NULL, drm_fd, loop);
}
struct wlr_renderer *wlr_renderer_autocreate(struct wlr_backend *backend) {
return renderer_autocreate(backend, -1);
return renderer_autocreate(backend, -1, backend->event_loop);
}
int wlr_renderer_get_drm_fd(struct wlr_renderer *r) {

View file

@ -26,7 +26,7 @@ static void buffer_consider_destroy(struct wlr_buffer *buffer) {
return;
}
assert(!buffer->accessing_data_ptr);
assert(buffer->n_data_ptr_accesses == 0);
wl_signal_emit_mutable(&buffer->events.destroy, NULL);
wlr_addon_set_finish(&buffer->addons);
@ -74,21 +74,39 @@ bool wlr_buffer_get_dmabuf(struct wlr_buffer *buffer,
bool wlr_buffer_begin_data_ptr_access(struct wlr_buffer *buffer, uint32_t flags,
void **data, uint32_t *format, size_t *stride) {
assert(!buffer->accessing_data_ptr);
if (!buffer->impl->begin_data_ptr_access) {
return false;
}
if (!buffer->impl->begin_data_ptr_access(buffer, flags, data, format, stride)) {
return false;
if (buffer->n_data_ptr_accesses == 0) {
if (!buffer->impl->begin_data_ptr_access(buffer, flags, data, format, stride)) {
return false;
}
buffer->data_ptr_access_flags = flags;
buffer->data_ptr_access_data = *data;
buffer->data_ptr_access_format = *format;
buffer->data_ptr_access_stride = *stride;
} else {
if (buffer->data_ptr_access_flags != flags) {
return false;
}
*data = buffer->data_ptr_access_data;
*format = buffer->data_ptr_access_format;
*stride = buffer->data_ptr_access_stride;
}
buffer->accessing_data_ptr = true;
buffer->n_data_ptr_accesses++;
return true;
}
void wlr_buffer_end_data_ptr_access(struct wlr_buffer *buffer) {
assert(buffer->accessing_data_ptr);
buffer->impl->end_data_ptr_access(buffer);
buffer->accessing_data_ptr = false;
assert(buffer->n_data_ptr_accesses > 0);
buffer->n_data_ptr_accesses--;
if (buffer->n_data_ptr_accesses == 0) {
buffer->impl->end_data_ptr_access(buffer);
buffer->data_ptr_access_flags = 0;
buffer->data_ptr_access_data = NULL;
buffer->data_ptr_access_format = 0;
buffer->data_ptr_access_stride = 0;
}
}
bool wlr_buffer_get_shm(struct wlr_buffer *buffer,