pipewire/spa/plugins/vulkan/vulkan-utils.c
columbarius ea2a2c47c7 vulkan: split vulkan-utils into generic and compute part
This commit should just shuffle code around with no functional changes.
The goal is to ease development of simple vulkan nodes by providing
generic helpers.
2023-08-31 10:08:06 +00:00

300 lines
7.9 KiB
C

/* Spa */
/* SPDX-FileCopyrightText: Copyright © 2019 Wim Taymans */
/* SPDX-License-Identifier: MIT */
#include <vulkan/vulkan.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <string.h>
#if !defined(__FreeBSD__) && !defined(__MidnightBSD__)
#include <alloca.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <time.h>
#include <spa/utils/result.h>
#include <spa/utils/string.h>
#include <spa/support/log.h>
#include <spa/debug/mem.h>
#include "vulkan-utils.h"
//#define ENABLE_VALIDATION
static int vkresult_to_errno(VkResult result)
{
switch (result) {
case VK_SUCCESS:
case VK_EVENT_SET:
case VK_EVENT_RESET:
return 0;
case VK_NOT_READY:
case VK_INCOMPLETE:
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
return EBUSY;
case VK_TIMEOUT:
return ETIMEDOUT;
case VK_ERROR_OUT_OF_HOST_MEMORY:
case VK_ERROR_OUT_OF_DEVICE_MEMORY:
case VK_ERROR_MEMORY_MAP_FAILED:
case VK_ERROR_OUT_OF_POOL_MEMORY:
case VK_ERROR_FRAGMENTED_POOL:
#ifdef VK_ERROR_FRAGMENTATION_EXT
case VK_ERROR_FRAGMENTATION_EXT:
#endif
return ENOMEM;
case VK_ERROR_INITIALIZATION_FAILED:
return EIO;
case VK_ERROR_DEVICE_LOST:
case VK_ERROR_SURFACE_LOST_KHR:
#ifdef VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT
case VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT:
#endif
return ENODEV;
case VK_ERROR_LAYER_NOT_PRESENT:
case VK_ERROR_EXTENSION_NOT_PRESENT:
case VK_ERROR_FEATURE_NOT_PRESENT:
return ENOENT;
case VK_ERROR_INCOMPATIBLE_DRIVER:
case VK_ERROR_FORMAT_NOT_SUPPORTED:
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
return ENOTSUP;
case VK_ERROR_TOO_MANY_OBJECTS:
return ENFILE;
case VK_SUBOPTIMAL_KHR:
case VK_ERROR_OUT_OF_DATE_KHR:
return EIO;
case VK_ERROR_INVALID_EXTERNAL_HANDLE:
case VK_ERROR_INVALID_SHADER_NV:
#ifdef VK_ERROR_VALIDATION_FAILED_EXT
case VK_ERROR_VALIDATION_FAILED_EXT:
#endif
#ifdef VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT
case VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT:
#endif
#ifdef VK_ERROR_INVALID_DEVICE_ADDRESS_EXT
case VK_ERROR_INVALID_DEVICE_ADDRESS_EXT:
#endif
return EINVAL;
#ifdef VK_ERROR_NOT_PERMITTED_EXT
case VK_ERROR_NOT_PERMITTED_EXT:
return EPERM;
#endif
default:
return EIO;
}
}
static int createInstance(struct vulkan_base *s)
{
static const VkApplicationInfo applicationInfo = {
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
.pApplicationName = "PipeWire",
.applicationVersion = 0,
.pEngineName = "PipeWire Vulkan Engine",
.engineVersion = 0,
.apiVersion = VK_API_VERSION_1_1
};
static const char * const extensions[] = {
VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME
};
static const char * const checkLayers[] = {
#ifdef ENABLE_VALIDATION
"VK_LAYER_KHRONOS_validation",
#endif
NULL
};
uint32_t i, j, layerCount, n_layers = 0;
const char *layers[1];
vkEnumerateInstanceLayerProperties(&layerCount, NULL);
VkLayerProperties availableLayers[layerCount];
vkEnumerateInstanceLayerProperties(&layerCount, availableLayers);
for (i = 0; i < layerCount; i++) {
for (j = 0; j < SPA_N_ELEMENTS(checkLayers); j++) {
if (spa_streq(availableLayers[i].layerName, checkLayers[j]))
layers[n_layers++] = checkLayers[j];
}
}
const VkInstanceCreateInfo createInfo = {
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
.pApplicationInfo = &applicationInfo,
.enabledExtensionCount = 1,
.ppEnabledExtensionNames = extensions,
.enabledLayerCount = n_layers,
.ppEnabledLayerNames = layers,
};
VK_CHECK_RESULT(vkCreateInstance(&createInfo, NULL, &s->instance));
return 0;
}
static int findPhysicalDevice(struct vulkan_base *s)
{
uint32_t deviceCount;
VkPhysicalDevice *devices;
vkEnumeratePhysicalDevices(s->instance, &deviceCount, NULL);
if (deviceCount == 0)
return -ENODEV;
devices = alloca(deviceCount * sizeof(VkPhysicalDevice));
vkEnumeratePhysicalDevices(s->instance, &deviceCount, devices);
s->physicalDevice = devices[0];
return 0;
}
static int getComputeQueueFamilyIndex(struct vulkan_base *s, uint32_t queueFlags, uint32_t *queueFamilyIndex)
{
uint32_t i, queueFamilyCount;
VkQueueFamilyProperties *queueFamilies;
vkGetPhysicalDeviceQueueFamilyProperties(s->physicalDevice, &queueFamilyCount, NULL);
queueFamilies = alloca(queueFamilyCount * sizeof(VkQueueFamilyProperties));
vkGetPhysicalDeviceQueueFamilyProperties(s->physicalDevice, &queueFamilyCount, queueFamilies);
for (i = 0; i < queueFamilyCount; i++) {
VkQueueFamilyProperties props = queueFamilies[i];
if (props.queueCount > 0 && ((props.queueFlags & queueFlags) == queueFlags))
break;
}
if (i == queueFamilyCount)
return -ENODEV;
*queueFamilyIndex = i;
return 0;
}
static int createDevice(struct vulkan_base *s, struct vulkan_base_info *info)
{
CHECK(getComputeQueueFamilyIndex(s, info->queueFlags, &s->queueFamilyIndex));
const VkDeviceQueueCreateInfo queueCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
.queueFamilyIndex = s->queueFamilyIndex,
.queueCount = 1,
.pQueuePriorities = (const float[]) { 1.0f }
};
static const char * const extensions[] = {
VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME
};
const VkDeviceCreateInfo deviceCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
.queueCreateInfoCount = 1,
.pQueueCreateInfos = &queueCreateInfo,
.enabledExtensionCount = 2,
.ppEnabledExtensionNames = extensions,
};
VK_CHECK_RESULT(vkCreateDevice(s->physicalDevice, &deviceCreateInfo, NULL, &s->device));
vkGetDeviceQueue(s->device, s->queueFamilyIndex, 0, &s->queue);
return 0;
}
int vulkan_commandPool_create(struct vulkan_base *s, VkCommandPool *commandPool)
{
const VkCommandPoolCreateInfo commandPoolCreateInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
.queueFamilyIndex = s->queueFamilyIndex,
};
VK_CHECK_RESULT(vkCreateCommandPool(s->device,
&commandPoolCreateInfo, NULL,
commandPool));
return 0;
}
int vulkan_commandBuffer_create(struct vulkan_base *s, VkCommandPool commandPool, VkCommandBuffer *commandBuffer)
{
const VkCommandBufferAllocateInfo commandBufferAllocateInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.commandPool = commandPool,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = 1,
};
VK_CHECK_RESULT(vkAllocateCommandBuffers(s->device,
&commandBufferAllocateInfo,
commandBuffer));
return 0;
}
uint32_t vulkan_memoryType_find(struct vulkan_base *s,
uint32_t memoryTypeBits, VkMemoryPropertyFlags properties)
{
uint32_t i;
VkPhysicalDeviceMemoryProperties memoryProperties;
vkGetPhysicalDeviceMemoryProperties(s->physicalDevice, &memoryProperties);
for (i = 0; i < memoryProperties.memoryTypeCount; i++) {
if ((memoryTypeBits & (1 << i)) &&
((memoryProperties.memoryTypes[i].propertyFlags & properties) == properties))
return i;
}
return -1;
}
void vulkan_buffer_clear(struct vulkan_base *s, struct vulkan_buffer *buffer)
{
if (buffer->fd != -1)
close(buffer->fd);
vkFreeMemory(s->device, buffer->memory, NULL);
vkDestroyImage(s->device, buffer->image, NULL);
vkDestroyImageView(s->device, buffer->view, NULL);
}
int vulkan_stream_init(struct vulkan_stream *stream, enum spa_direction direction,
struct spa_dict *props)
{
spa_zero(*stream);
stream->direction = direction;
stream->current_buffer_id = SPA_ID_INVALID;
stream->busy_buffer_id = SPA_ID_INVALID;
stream->ready_buffer_id = SPA_ID_INVALID;
return 0;
}
int vulkan_vkresult_to_errno(VkResult result)
{
return vkresult_to_errno(result);
}
int vulkan_base_init(struct vulkan_base *s, struct vulkan_base_info *info)
{
if (!s->initialized) {
CHECK(createInstance(s));
CHECK(findPhysicalDevice(s));
CHECK(createDevice(s, info));
s->initialized = true;
}
return 0;
}
void vulkan_base_deinit(struct vulkan_base *s)
{
if (s->initialized) {
vkDestroyDevice(s->device, NULL);
vkDestroyInstance(s->instance, NULL);
s->initialized = false;
}
}