Merge branch 'vulkan-buckets' into 'master'

render/vulkan: Better bucket size selection

See merge request wlroots/wlroots!4893
This commit is contained in:
Kenny Levinsen 2026-04-07 06:14:23 +00:00
commit 960c026fd3

View file

@ -214,6 +214,25 @@ static void shared_buffer_destroy(struct wlr_vk_renderer *r,
free(buffer); free(buffer);
} }
static VkDeviceSize minimum_bucket(VkDeviceSize size) {
// Require at least 2x space in the bucket
size *= 2;
if (size < min_stage_size) {
return min_stage_size;
}
// Calculate the smallest containing power of two
size -= 1;
size |= size >> 1;
size |= size >> 2;
size |= size >> 4;
size |= size >> 8;
size |= size >> 16;
size |= size >> 32;
return size + 1;
}
struct wlr_vk_buffer_span vulkan_get_stage_span(struct wlr_vk_renderer *r, struct wlr_vk_buffer_span vulkan_get_stage_span(struct wlr_vk_renderer *r,
VkDeviceSize size, VkDeviceSize alignment) { VkDeviceSize size, VkDeviceSize alignment) {
// try to find free span // try to find free span
@ -261,16 +280,22 @@ struct wlr_vk_buffer_span vulkan_get_stage_span(struct wlr_vk_renderer *r,
goto error_alloc; goto error_alloc;
} }
// we didn't find a free buffer - create one // We allocate buffers in buckets of increasing size, each twice the
// size = clamp(max(size * 2, prev_size * 2), min_size, max_size) // previous and always able to hold at least 2x the allocation request.
VkDeviceSize bsize = size * 2; VkDeviceSize bsize = minimum_bucket(size);
bsize = bsize < min_stage_size ? min_stage_size : bsize; struct wl_list *insertion_target = NULL;
if (!wl_list_empty(&r->stage.buffers)) { wl_list_for_each_reverse(buf, &r->stage.buffers, link) {
struct wl_list *last_link = r->stage.buffers.prev; if (buf->buf_size >= bsize) {
struct wlr_vk_shared_buffer *prev = wl_container_of( if (buf->buf_size > bsize * 2) {
last_link, prev, link); // There have found a missing bucket size in the sequence.
VkDeviceSize last_size = 2 * prev->buf_size; insertion_target = &buf->link;
bsize = bsize < last_size ? last_size : bsize; break;
}
bsize *= 2;
}
}
if (insertion_target == NULL) {
insertion_target = &r->stage.buffers;
} }
if (bsize > max_stage_size) { if (bsize > max_stage_size) {
@ -342,7 +367,7 @@ struct wlr_vk_buffer_span vulkan_get_stage_span(struct wlr_vk_renderer *r,
} }
buf->buf_size = bsize; buf->buf_size = bsize;
wl_list_insert(&r->stage.buffers, &buf->link); wl_list_insert(insertion_target, &buf->link);
*a = (struct wlr_vk_allocation){ *a = (struct wlr_vk_allocation){
.start = 0, .start = 0,
@ -542,13 +567,22 @@ static void release_command_buffer_resources(struct wlr_vk_command_buffer *cb,
wlr_texture_destroy(&texture->wlr_texture); wlr_texture_destroy(&texture->wlr_texture);
} }
VkDeviceSize cur_size = min_stage_size;
struct wl_list *insertion_target = &renderer->stage.buffers;
struct wlr_vk_shared_buffer *buf, *buf_tmp; struct wlr_vk_shared_buffer *buf, *buf_tmp;
wl_list_for_each_safe(buf, buf_tmp, &cb->stage_buffers, link) { wl_list_for_each_reverse_safe(buf, buf_tmp, &cb->stage_buffers, link) {
buf->allocs.size = 0; buf->allocs.size = 0;
buf->last_used_ms = now; buf->last_used_ms = now;
wl_list_remove(&buf->link); wl_list_remove(&buf->link);
wl_list_insert(&renderer->stage.buffers, &buf->link);
// Sorted insert
while (insertion_target->prev != &renderer->stage.buffers && buf->buf_size > cur_size) {
insertion_target = insertion_target->prev;
struct wlr_vk_shared_buffer *tbuf = wl_container_of(insertion_target, tbuf, link);
cur_size = tbuf->buf_size;
}
wl_list_insert(insertion_target, &buf->link);
} }
if (cb->color_transform) { if (cb->color_transform) {