Index: ps/trunk/source/renderer/backend/vulkan/Buffer.cpp
===================================================================
--- ps/trunk/source/renderer/backend/vulkan/Buffer.cpp (revision 27838)
+++ ps/trunk/source/renderer/backend/vulkan/Buffer.cpp (revision 27839)
@@ -1,117 +1,118 @@
/* Copyright (C) 2023 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* 0 A.D. is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with 0 A.D. If not, see .
*/
#include "precompiled.h"
#include "Buffer.h"
#include "renderer/backend/vulkan/Device.h"
+#include "renderer/backend/vulkan/Utilities.h"
namespace Renderer
{
namespace Backend
{
namespace Vulkan
{
// static
std::unique_ptr CBuffer::Create(
CDevice* device, const char* name, const Type type, const uint32_t size,
const bool dynamic)
{
std::unique_ptr buffer(new CBuffer());
buffer->m_Device = device;
buffer->m_Type = type;
buffer->m_Size = size;
buffer->m_Dynamic = dynamic;
VkMemoryPropertyFlags properties = 0;
VkBufferUsageFlags usage = VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM;
VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
switch (type)
{
case Type::VERTEX:
usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
properties = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
memoryUsage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
break;
case Type::INDEX:
usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
properties = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
memoryUsage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
break;
case Type::UPLOAD:
usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
properties = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
break;
case Type::UNIFORM:
usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
properties = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
memoryUsage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
break;
}
VkBufferCreateInfo bufferCreateInfo{};
bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferCreateInfo.size = size;
bufferCreateInfo.usage = usage;
bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VmaAllocationCreateInfo allocationCreateInfo{};
if (type == Type::UPLOAD)
allocationCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
#ifndef NDEBUG
allocationCreateInfo.flags |= VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
allocationCreateInfo.pUserData = const_cast(name);
#endif
allocationCreateInfo.requiredFlags = properties;
allocationCreateInfo.usage = memoryUsage;
const VkResult createBufferResult = vmaCreateBuffer(
device->GetVMAAllocator(), &bufferCreateInfo, &allocationCreateInfo,
&buffer->m_Buffer, &buffer->m_Allocation, &buffer->m_AllocationInfo);
if (createBufferResult != VK_SUCCESS)
{
- LOGERROR("Failed to create VkBuffer: %d", static_cast(createBufferResult));
+ LOGERROR("Failed to create VkBuffer: %d (%s)", static_cast(createBufferResult), Utilities::GetVkResultName(createBufferResult));
return nullptr;
}
device->SetObjectName(VK_OBJECT_TYPE_BUFFER, buffer->m_Buffer, name);
return buffer;
}
CBuffer::CBuffer() = default;
CBuffer::~CBuffer()
{
if (m_Allocation != VK_NULL_HANDLE)
m_Device->ScheduleObjectToDestroy(
VK_OBJECT_TYPE_BUFFER, m_Buffer, m_Allocation);
}
IDevice* CBuffer::GetDevice()
{
return m_Device;
}
} // namespace Vulkan
} // namespace Backend
} // namespace Renderer
Index: ps/trunk/source/renderer/backend/vulkan/Device.cpp
===================================================================
--- ps/trunk/source/renderer/backend/vulkan/Device.cpp (revision 27838)
+++ ps/trunk/source/renderer/backend/vulkan/Device.cpp (revision 27839)
@@ -1,1040 +1,1041 @@
/* Copyright (C) 2023 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* 0 A.D. is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with 0 A.D. If not, see .
*/
#include "precompiled.h"
#include "Device.h"
#include "lib/external_libraries/libsdl.h"
#include "lib/hash.h"
#include "lib/sysdep/os.h"
#include "maths/MathUtil.h"
#include "ps/CLogger.h"
#include "ps/ConfigDB.h"
#include "ps/Profile.h"
#include "renderer/backend/vulkan/Buffer.h"
#include "renderer/backend/vulkan/DescriptorManager.h"
#include "renderer/backend/vulkan/DeviceCommandContext.h"
#include "renderer/backend/vulkan/DeviceSelection.h"
#include "renderer/backend/vulkan/Framebuffer.h"
#include "renderer/backend/vulkan/Mapping.h"
#include "renderer/backend/vulkan/PipelineState.h"
#include "renderer/backend/vulkan/RenderPassManager.h"
#include "renderer/backend/vulkan/RingCommandContext.h"
#include "renderer/backend/vulkan/SamplerManager.h"
#include "renderer/backend/vulkan/ShaderProgram.h"
#include "renderer/backend/vulkan/SubmitScheduler.h"
#include "renderer/backend/vulkan/SwapChain.h"
#include "renderer/backend/vulkan/Texture.h"
#include "renderer/backend/vulkan/Utilities.h"
#include "scriptinterface/JSON.h"
#include "scriptinterface/Object.h"
#include "scriptinterface/ScriptInterface.h"
#include "scriptinterface/ScriptRequest.h"
#include
#include
#include
#include
#include
#include
#include
// According to https://wiki.libsdl.org/SDL_Vulkan_LoadLibrary the following
// functionality is supported since SDL 2.0.6.
#if SDL_VERSION_ATLEAST(2, 0, 6)
#include
#endif
namespace Renderer
{
namespace Backend
{
namespace Vulkan
{
namespace
{
std::vector GetRequiredSDLExtensions(SDL_Window* window)
{
if (!window)
return {};
const size_t MAX_EXTENSION_COUNT = 16;
unsigned int SDLExtensionCount = MAX_EXTENSION_COUNT;
const char* SDLExtensions[MAX_EXTENSION_COUNT];
ENSURE(SDL_Vulkan_GetInstanceExtensions(window, &SDLExtensionCount, SDLExtensions));
std::vector requiredExtensions;
requiredExtensions.reserve(SDLExtensionCount);
std::copy_n(SDLExtensions, SDLExtensionCount, std::back_inserter(requiredExtensions));
return requiredExtensions;
}
std::vector GetAvailableValidationLayers()
{
uint32_t layerCount = 0;
ENSURE_VK_SUCCESS(vkEnumerateInstanceLayerProperties(&layerCount, nullptr));
std::vector availableLayers(layerCount);
ENSURE_VK_SUCCESS(vkEnumerateInstanceLayerProperties(&layerCount, availableLayers.data()));
for (const VkLayerProperties& layer : availableLayers)
{
LOGMESSAGE("Vulkan validation layer: '%s' (%s) v%u.%u.%u.%u",
layer.layerName, layer.description,
VK_API_VERSION_VARIANT(layer.specVersion),
VK_API_VERSION_MAJOR(layer.specVersion),
VK_API_VERSION_MINOR(layer.specVersion),
VK_API_VERSION_PATCH(layer.specVersion));
}
std::vector availableValidationLayers;
availableValidationLayers.reserve(layerCount);
for (const VkLayerProperties& layer : availableLayers)
availableValidationLayers.emplace_back(layer.layerName);
return availableValidationLayers;
}
std::vector GetAvailableInstanceExtensions(const char* layerName = nullptr)
{
uint32_t extensionCount = 0;
ENSURE_VK_SUCCESS(vkEnumerateInstanceExtensionProperties(layerName, &extensionCount, nullptr));
std::vector extensions(extensionCount);
ENSURE_VK_SUCCESS(vkEnumerateInstanceExtensionProperties(layerName, &extensionCount, extensions.data()));
std::vector availableExtensions;
for (const VkExtensionProperties& extension : extensions)
availableExtensions.emplace_back(extension.extensionName);
return availableExtensions;
}
VKAPI_ATTR VkBool32 VKAPI_CALL DebugCallback(
VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT* callbackData,
void* UNUSED(userData))
{
if ((messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) || (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT))
LOGMESSAGE("Vulkan: %s", callbackData->pMessage);
else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT)
{
struct HideRule
{
VkDebugUtilsMessageTypeFlagsEXT flags;
std::string_view pattern;
bool skip;
};
constexpr HideRule hideRules[] =
{
// Not consumed shader output is a known problem which produces too
// many warning.
{VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT, "OutputNotConsumed", false},
// TODO: check vkGetImageMemoryRequirements2 for prefersDedicatedAllocation.
{VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, "vkBindMemory-small-dedicated-allocation", false},
{VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, "vkAllocateMemory-small-allocation", false},
// We have some unnecessary clears which were needed for GL.
// Ignore message for now, because they're spawned each frame.
{VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, "ClearCmdBeforeDraw", true},
{VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, "vkCmdClearAttachments-clear-after-load", true},
// TODO: investigate probably false-positive report.
{VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT, "vkCmdBeginRenderPass-StoreOpDontCareThenLoadOpLoad", true},
};
const auto it = std::find_if(std::begin(hideRules), std::end(hideRules),
[messageType, message = std::string_view{callbackData->pMessage}](const HideRule& hideRule) -> bool
{
return (hideRule.flags & messageType) && message.find(hideRule.pattern) != std::string_view::npos;
});
if (it == std::end(hideRules))
LOGWARNING("Vulkan: %s", callbackData->pMessage);
else if (!it->skip)
LOGMESSAGE("Vulkan: %s", callbackData->pMessage);
}
else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT)
LOGERROR("Vulkan: %s", callbackData->pMessage);
return VK_FALSE;
}
// A workaround function to meet calling conventions of Vulkan, SDL and GLAD.
GLADapiproc GetInstanceProcAddr(VkInstance instance, const char* name)
{
#if SDL_VERSION_ATLEAST(2, 0, 6)
PFN_vkGetInstanceProcAddr function = reinterpret_cast(SDL_Vulkan_GetVkGetInstanceProcAddr());
return reinterpret_cast(function(instance, name));
#else
return nullptr;
#endif
}
} // anonymous namespace
// static
std::unique_ptr CDevice::Create(SDL_Window* window)
{
if (!window)
{
LOGERROR("Can't create Vulkan device without window.");
return nullptr;
}
GLADuserptrloadfunc gladLoadFunction = reinterpret_cast(GetInstanceProcAddr);
std::unique_ptr device(new CDevice());
device->m_Window = window;
#ifdef NDEBUG
bool enableDebugMessages = false;
CFG_GET_VAL("renderer.backend.debugmessages", enableDebugMessages);
bool enableDebugLabels = false;
CFG_GET_VAL("renderer.backend.debuglabels", enableDebugLabels);
bool enableDebugScopedLabels = false;
CFG_GET_VAL("renderer.backend.debugscopedlabels", enableDebugScopedLabels);
#else
bool enableDebugMessages = true;
bool enableDebugLabels = true;
bool enableDebugScopedLabels = true;
#endif
int gladVulkanVersion = gladLoadVulkanUserPtr(nullptr, gladLoadFunction, nullptr);
if (!gladVulkanVersion)
{
LOGERROR("GLAD unable to load vulkan.");
return nullptr;
}
VkApplicationInfo applicationInfo{};
applicationInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
applicationInfo.pApplicationName = "0 A.D.";
applicationInfo.applicationVersion = VK_MAKE_VERSION(0, 0, 27);
applicationInfo.pEngineName = "Pyrogenesis";
applicationInfo.engineVersion = applicationInfo.applicationVersion;
applicationInfo.apiVersion = VK_API_VERSION_1_1;
std::vector requiredInstanceExtensions = GetRequiredSDLExtensions(window);
device->m_ValidationLayers = GetAvailableValidationLayers();
auto hasValidationLayer = [&layers = device->m_ValidationLayers](const char* name) -> bool
{
return std::find(layers.begin(), layers.end(), name) != layers.end();
};
device->m_InstanceExtensions = GetAvailableInstanceExtensions();
auto hasInstanceExtension = [&extensions = device->m_InstanceExtensions](const char* name) -> bool
{
return std::find(extensions.begin(), extensions.end(), name) != extensions.end();
};
#ifdef NDEBUG
bool enableDebugContext = false;
CFG_GET_VAL("renderer.backend.debugcontext", enableDebugContext);
#else
bool enableDebugContext = true;
#endif
if (!hasInstanceExtension(VK_EXT_DEBUG_UTILS_EXTENSION_NAME))
enableDebugMessages = enableDebugLabels = enableDebugScopedLabels = false;
const bool enableDebugLayers = enableDebugContext || enableDebugMessages || enableDebugLabels || enableDebugScopedLabels;
if (enableDebugLayers)
requiredInstanceExtensions.emplace_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
std::vector requestedValidationLayers;
const bool enableValidationFeatures = enableDebugMessages && hasValidationLayer("VK_LAYER_KHRONOS_validation");
if (enableValidationFeatures)
requestedValidationLayers.emplace_back("VK_LAYER_KHRONOS_validation");
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/blob/master/docs/synchronization_usage.md
VkValidationFeatureEnableEXT validationFeatureEnables[] =
{
VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT,
VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT
};
VkValidationFeaturesEXT validationFeatures{};
validationFeatures.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT;
validationFeatures.enabledValidationFeatureCount = std::size(validationFeatureEnables);
validationFeatures.pEnabledValidationFeatures = validationFeatureEnables;
VkInstanceCreateInfo instanceCreateInfo{};
instanceCreateInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
instanceCreateInfo.pApplicationInfo = &applicationInfo;
instanceCreateInfo.enabledExtensionCount = requiredInstanceExtensions.size();
instanceCreateInfo.ppEnabledExtensionNames = requiredInstanceExtensions.data();
if (requestedValidationLayers.empty())
{
instanceCreateInfo.enabledLayerCount = 0;
instanceCreateInfo.ppEnabledLayerNames = nullptr;
}
else
{
instanceCreateInfo.enabledLayerCount = requestedValidationLayers.size();
instanceCreateInfo.ppEnabledLayerNames = requestedValidationLayers.data();
}
// Enabling validation features might significantly reduce performance,
// even more than the standard validation layer.
if (enableValidationFeatures && enableDebugContext)
{
instanceCreateInfo.pNext = &validationFeatures;
}
const VkResult createInstanceResult = vkCreateInstance(&instanceCreateInfo, nullptr, &device->m_Instance);
if (createInstanceResult != VK_SUCCESS)
{
if (createInstanceResult == VK_ERROR_INCOMPATIBLE_DRIVER)
LOGERROR("Can't create Vulkan instance: incompatible driver.");
else if (createInstanceResult == VK_ERROR_EXTENSION_NOT_PRESENT)
LOGERROR("Can't create Vulkan instance: extension not present.");
else if (createInstanceResult == VK_ERROR_LAYER_NOT_PRESENT)
LOGERROR("Can't create Vulkan instance: layer not present.");
else
- LOGERROR("Unknown error during Vulkan instance creation: %d", static_cast(createInstanceResult));
+ LOGERROR("Unknown error during Vulkan instance creation: %d (%s)",
+ static_cast(createInstanceResult), Utilities::GetVkResultName(createInstanceResult));
return nullptr;
}
gladVulkanVersion = gladLoadVulkanUserPtr(nullptr, gladLoadFunction, device->m_Instance);
if (!gladVulkanVersion)
{
LOGERROR("GLAD unable to re-load vulkan after its instance creation.");
return nullptr;
}
if (GLAD_VK_EXT_debug_utils && enableDebugMessages)
{
VkDebugUtilsMessengerCreateInfoEXT debugCreateInfo{};
debugCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
debugCreateInfo.messageSeverity =
VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
debugCreateInfo.messageType =
VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
debugCreateInfo.pfnUserCallback = DebugCallback;
debugCreateInfo.pUserData = nullptr;
ENSURE_VK_SUCCESS(vkCreateDebugUtilsMessengerEXT(
device->m_Instance, &debugCreateInfo, nullptr, &device->m_DebugMessenger));
}
if (window)
ENSURE(SDL_Vulkan_CreateSurface(window, device->m_Instance, &device->m_Surface));
const std::vector requiredDeviceExtensions =
{
VK_KHR_SWAPCHAIN_EXTENSION_NAME
};
std::vector availablePhyscialDevices =
GetAvailablePhysicalDevices(device->m_Instance, device->m_Surface, requiredDeviceExtensions);
for (const SAvailablePhysicalDevice& device : availablePhyscialDevices)
{
LOGMESSAGE("Vulkan available device: '%s' Type: %u Supported: %c",
device.properties.deviceName, static_cast(device.properties.deviceType),
IsPhysicalDeviceUnsupported(device) ? 'N' : 'Y');
LOGMESSAGE(" ID: %u VendorID: %u API Version: %u Driver Version: %u",
device.properties.deviceID, device.properties.vendorID,
device.properties.apiVersion, device.properties.driverVersion);
LOGMESSAGE(" hasRequiredExtensions: %c hasOutputToSurfaceSupport: %c",
device.hasRequiredExtensions ? 'Y' : 'N', device.hasOutputToSurfaceSupport ? 'Y' : 'N');
LOGMESSAGE(" graphicsQueueFamilyIndex: %u presentQueueFamilyIndex: %u families: %zu",
device.graphicsQueueFamilyIndex, device.presentQueueFamilyIndex,
device.queueFamilies.size());
LOGMESSAGE(" maxBoundDescriptorSets: %u", device.properties.limits.maxBoundDescriptorSets);
for (const VkSurfaceFormatKHR& surfaceFormat : device.surfaceFormats)
{
LOGMESSAGE(" Surface format: %u colorSpace: %u Supported: %c",
static_cast(surfaceFormat.format),
static_cast(surfaceFormat.colorSpace),
IsSurfaceFormatSupported(surfaceFormat) ? 'Y' : 'N');
}
for (uint32_t memoryTypeIndex = 0; memoryTypeIndex < device.memoryProperties.memoryTypeCount; ++memoryTypeIndex)
{
const VkMemoryType& type = device.memoryProperties.memoryTypes[memoryTypeIndex];
LOGMESSAGE(" Memory Type Index: %u Flags: %u Heap Index: %u",
memoryTypeIndex, static_cast(type.propertyFlags), type.heapIndex);
}
for (uint32_t memoryHeapIndex = 0; memoryHeapIndex < device.memoryProperties.memoryHeapCount; ++memoryHeapIndex)
{
const VkMemoryHeap& heap = device.memoryProperties.memoryHeaps[memoryHeapIndex];
LOGMESSAGE(" Memory Heap Index: %u Size: %zu Flags: %u",
memoryHeapIndex, static_cast(heap.size / 1024), static_cast(heap.flags));
}
}
device->m_AvailablePhysicalDevices = availablePhyscialDevices;
// We need to remove unsupported devices first.
availablePhyscialDevices.erase(
std::remove_if(
availablePhyscialDevices.begin(), availablePhyscialDevices.end(),
IsPhysicalDeviceUnsupported),
availablePhyscialDevices.end());
if (availablePhyscialDevices.empty())
{
LOGERROR("Vulkan can not find any supported and suitable device.");
return nullptr;
}
int deviceIndexOverride = -1;
CFG_GET_VAL("renderer.backend.vulkan.deviceindexoverride", deviceIndexOverride);
auto choosedDeviceIt = device->m_AvailablePhysicalDevices.end();
if (deviceIndexOverride >= 0)
{
choosedDeviceIt = std::find_if(
device->m_AvailablePhysicalDevices.begin(), device->m_AvailablePhysicalDevices.end(),
[deviceIndexOverride](const SAvailablePhysicalDevice& availableDevice)
{
return availableDevice.index == static_cast(deviceIndexOverride);
});
if (choosedDeviceIt == device->m_AvailablePhysicalDevices.end())
LOGWARNING("Device with override index %d not found.", deviceIndexOverride);
}
if (choosedDeviceIt == device->m_AvailablePhysicalDevices.end())
{
// We need to choose the best available device fits our needs.
choosedDeviceIt = min_element(
availablePhyscialDevices.begin(), availablePhyscialDevices.end(),
ComparePhysicalDevices);
}
device->m_ChoosenDevice = *choosedDeviceIt;
const SAvailablePhysicalDevice& choosenDevice = device->m_ChoosenDevice;
device->m_AvailablePhysicalDevices.erase(std::remove_if(
device->m_AvailablePhysicalDevices.begin(), device->m_AvailablePhysicalDevices.end(),
[physicalDevice = choosenDevice.device](const SAvailablePhysicalDevice& device)
{
return physicalDevice == device.device;
}), device->m_AvailablePhysicalDevices.end());
gladVulkanVersion = gladLoadVulkanUserPtr(choosenDevice.device, gladLoadFunction, device->m_Instance);
if (!gladVulkanVersion)
{
LOGERROR("GLAD unable to re-load vulkan after choosing its physical device.");
return nullptr;
}
#if !OS_MACOSX
auto hasDeviceExtension = [&extensions = choosenDevice.extensions](const char* name) -> bool
{
return std::find(extensions.begin(), extensions.end(), name) != extensions.end();
};
const bool hasDescriptorIndexing = hasDeviceExtension(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
#else
// Metal on macOS doesn't support combined samplers natively. Currently
// they break compiling SPIR-V shaders with descriptor indexing into MTL
// shaders when using MoltenVK.
const bool hasDescriptorIndexing = false;
#endif
const bool hasNeededDescriptorIndexingFeatures =
hasDescriptorIndexing &&
choosenDevice.descriptorIndexingProperties.maxUpdateAfterBindDescriptorsInAllPools >= 65536 &&
choosenDevice.descriptorIndexingFeatures.shaderSampledImageArrayNonUniformIndexing &&
choosenDevice.descriptorIndexingFeatures.runtimeDescriptorArray &&
choosenDevice.descriptorIndexingFeatures.descriptorBindingVariableDescriptorCount &&
choosenDevice.descriptorIndexingFeatures.descriptorBindingPartiallyBound &&
choosenDevice.descriptorIndexingFeatures.descriptorBindingUpdateUnusedWhilePending &&
choosenDevice.descriptorIndexingFeatures.descriptorBindingSampledImageUpdateAfterBind;
std::vector deviceExtensions = requiredDeviceExtensions;
if (hasDescriptorIndexing)
deviceExtensions.emplace_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
device->m_GraphicsQueueFamilyIndex = choosenDevice.graphicsQueueFamilyIndex;
const std::array queueFamilyIndices{{
choosenDevice.graphicsQueueFamilyIndex
}};
PS::StaticVector queueCreateInfos;
const float queuePriority = 1.0f;
std::transform(queueFamilyIndices.begin(), queueFamilyIndices.end(),
std::back_inserter(queueCreateInfos),
[&queuePriority](const size_t queueFamilyIndex)
{
VkDeviceQueueCreateInfo queueCreateInfo{};
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.pQueuePriorities = &queuePriority;
queueCreateInfo.queueCount = 1;
queueCreateInfo.queueFamilyIndex = queueFamilyIndex;
return queueCreateInfo;
});
// https://github.com/KhronosGroup/Vulkan-Guide/blob/master/chapters/enabling_features.adoc
VkPhysicalDeviceFeatures deviceFeatures{};
VkPhysicalDeviceFeatures2 deviceFeatures2{};
VkPhysicalDeviceDescriptorIndexingFeaturesEXT descriptorIndexingFeatures{};
deviceFeatures.textureCompressionBC = choosenDevice.features.textureCompressionBC;
deviceFeatures.samplerAnisotropy = choosenDevice.features.samplerAnisotropy;
deviceFeatures.fillModeNonSolid = choosenDevice.features.fillModeNonSolid;
descriptorIndexingFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT;
descriptorIndexingFeatures.shaderSampledImageArrayNonUniformIndexing =
choosenDevice.descriptorIndexingFeatures.shaderSampledImageArrayNonUniformIndexing;
descriptorIndexingFeatures.runtimeDescriptorArray =
choosenDevice.descriptorIndexingFeatures.runtimeDescriptorArray;
descriptorIndexingFeatures.descriptorBindingVariableDescriptorCount =
choosenDevice.descriptorIndexingFeatures.descriptorBindingVariableDescriptorCount;
descriptorIndexingFeatures.descriptorBindingPartiallyBound =
choosenDevice.descriptorIndexingFeatures.descriptorBindingPartiallyBound;
descriptorIndexingFeatures.descriptorBindingUpdateUnusedWhilePending =
choosenDevice.descriptorIndexingFeatures.descriptorBindingUpdateUnusedWhilePending;
descriptorIndexingFeatures.descriptorBindingSampledImageUpdateAfterBind =
choosenDevice.descriptorIndexingFeatures.descriptorBindingSampledImageUpdateAfterBind;
deviceFeatures2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
deviceFeatures2.features = deviceFeatures;
if (hasNeededDescriptorIndexingFeatures)
deviceFeatures2.pNext = &descriptorIndexingFeatures;
VkDeviceCreateInfo deviceCreateInfo{};
deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
deviceCreateInfo.queueCreateInfoCount = queueCreateInfos.size();
deviceCreateInfo.pQueueCreateInfos = queueCreateInfos.data();
deviceCreateInfo.enabledExtensionCount = deviceExtensions.size();
deviceCreateInfo.ppEnabledExtensionNames = deviceExtensions.data();
deviceCreateInfo.pEnabledFeatures = nullptr;
deviceCreateInfo.pNext = &deviceFeatures2;
deviceCreateInfo.enabledLayerCount = 0;
deviceCreateInfo.ppEnabledLayerNames = nullptr;
const VkResult createDeviceResult = vkCreateDevice(
choosenDevice.device, &deviceCreateInfo, nullptr, &device->m_Device);
if (createDeviceResult != VK_SUCCESS)
{
if (createDeviceResult == VK_ERROR_FEATURE_NOT_PRESENT)
LOGERROR("Can't create Vulkan device: feature not present.");
else if (createDeviceResult == VK_ERROR_EXTENSION_NOT_PRESENT)
LOGERROR("Can't create Vulkan device: extension not present.");
else
- LOGERROR("Unknown error during Vulkan device creation: %d",
- static_cast(createDeviceResult));
+ LOGERROR("Unknown error during Vulkan device creation: %d (%s)",
+ static_cast(createDeviceResult), Utilities::GetVkResultName(createDeviceResult));
return nullptr;
}
VmaVulkanFunctions vulkanFunctions{};
vulkanFunctions.vkGetInstanceProcAddr = vkGetInstanceProcAddr;
vulkanFunctions.vkGetDeviceProcAddr = vkGetDeviceProcAddr;
vulkanFunctions.vkGetPhysicalDeviceProperties = vkGetPhysicalDeviceProperties;
vulkanFunctions.vkGetPhysicalDeviceMemoryProperties = vkGetPhysicalDeviceMemoryProperties;
vulkanFunctions.vkAllocateMemory = vkAllocateMemory;
vulkanFunctions.vkFreeMemory = vkFreeMemory;
vulkanFunctions.vkMapMemory = vkMapMemory;
vulkanFunctions.vkUnmapMemory = vkUnmapMemory;
vulkanFunctions.vkFlushMappedMemoryRanges = vkFlushMappedMemoryRanges;
vulkanFunctions.vkInvalidateMappedMemoryRanges = vkInvalidateMappedMemoryRanges;
vulkanFunctions.vkBindBufferMemory = vkBindBufferMemory;
vulkanFunctions.vkBindImageMemory = vkBindImageMemory;
vulkanFunctions.vkGetBufferMemoryRequirements = vkGetBufferMemoryRequirements;
vulkanFunctions.vkGetImageMemoryRequirements = vkGetImageMemoryRequirements;
vulkanFunctions.vkCreateBuffer = vkCreateBuffer;
vulkanFunctions.vkDestroyBuffer = vkDestroyBuffer;
vulkanFunctions.vkCreateImage = vkCreateImage;
vulkanFunctions.vkDestroyImage = vkDestroyImage;
vulkanFunctions.vkCmdCopyBuffer = vkCmdCopyBuffer;
// Functions promoted to Vulkan 1.1.
vulkanFunctions.vkGetBufferMemoryRequirements2KHR = vkGetBufferMemoryRequirements2;
vulkanFunctions.vkGetImageMemoryRequirements2KHR = vkGetImageMemoryRequirements2;
vulkanFunctions.vkBindBufferMemory2KHR = vkBindBufferMemory2;
vulkanFunctions.vkBindImageMemory2KHR = vkBindImageMemory2;
vulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = vkGetPhysicalDeviceMemoryProperties2;
VmaAllocatorCreateInfo allocatorCreateInfo{};
allocatorCreateInfo.instance = device->m_Instance;
allocatorCreateInfo.physicalDevice = choosenDevice.device;
allocatorCreateInfo.device = device->m_Device;
allocatorCreateInfo.vulkanApiVersion = applicationInfo.apiVersion;
allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions;
const VkResult createVMAAllocatorResult =
vmaCreateAllocator(&allocatorCreateInfo, &device->m_VMAAllocator);
if (createVMAAllocatorResult != VK_SUCCESS)
{
- LOGERROR("Failed to create VMA allocator: %d",
- static_cast(createDeviceResult));
+ LOGERROR("Failed to create VMA allocator: %d (%s)",
+ static_cast(createVMAAllocatorResult), Utilities::GetVkResultName(createVMAAllocatorResult));
return nullptr;
}
// We need to use VK_SHARING_MODE_CONCURRENT if we have graphics and present
// in different queues.
vkGetDeviceQueue(device->m_Device, choosenDevice.graphicsQueueFamilyIndex,
0, &device->m_GraphicsQueue);
ENSURE(device->m_GraphicsQueue != VK_NULL_HANDLE);
Capabilities& capabilities = device->m_Capabilities;
capabilities.debugLabels = enableDebugLabels;
capabilities.debugScopedLabels = enableDebugScopedLabels;
capabilities.S3TC = choosenDevice.features.textureCompressionBC;
capabilities.ARBShaders = false;
capabilities.ARBShadersShadow = false;
capabilities.computeShaders = true;
capabilities.instancing = true;
capabilities.maxSampleCount = 1;
const VkSampleCountFlags sampleCountFlags =
choosenDevice.properties.limits.framebufferColorSampleCounts
& choosenDevice.properties.limits.framebufferDepthSampleCounts
& choosenDevice.properties.limits.framebufferStencilSampleCounts;
const std::array allowedSampleCountBits =
{
VK_SAMPLE_COUNT_1_BIT,
VK_SAMPLE_COUNT_2_BIT,
VK_SAMPLE_COUNT_4_BIT,
VK_SAMPLE_COUNT_8_BIT,
VK_SAMPLE_COUNT_16_BIT,
};
for (size_t index = 0; index < allowedSampleCountBits.size(); ++index)
if (sampleCountFlags & allowedSampleCountBits[index])
device->m_Capabilities.maxSampleCount = 1u << index;
capabilities.multisampling = device->m_Capabilities.maxSampleCount > 1;
capabilities.anisotropicFiltering = choosenDevice.features.samplerAnisotropy;
capabilities.maxAnisotropy = choosenDevice.properties.limits.maxSamplerAnisotropy;
capabilities.maxTextureSize =
choosenDevice.properties.limits.maxImageDimension2D;
device->m_RenderPassManager =
std::make_unique(device.get());
device->m_SamplerManager = std::make_unique(device.get());
device->m_SubmitScheduler =
std::make_unique(
device.get(), device->m_GraphicsQueueFamilyIndex, device->m_GraphicsQueue);
bool disableDescriptorIndexing = false;
CFG_GET_VAL("renderer.backend.vulkan.disabledescriptorindexing", disableDescriptorIndexing);
const bool useDescriptorIndexing = hasNeededDescriptorIndexingFeatures && !disableDescriptorIndexing;
device->m_DescriptorManager =
std::make_unique(device.get(), useDescriptorIndexing);
device->RecreateSwapChain();
device->m_Name = choosenDevice.properties.deviceName;
device->m_Version =
std::to_string(VK_API_VERSION_VARIANT(choosenDevice.properties.apiVersion)) +
"." + std::to_string(VK_API_VERSION_MAJOR(choosenDevice.properties.apiVersion)) +
"." + std::to_string(VK_API_VERSION_MINOR(choosenDevice.properties.apiVersion)) +
"." + std::to_string(VK_API_VERSION_PATCH(choosenDevice.properties.apiVersion));
device->m_DriverInformation = std::to_string(choosenDevice.properties.driverVersion);
// Refs:
// * https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/VkPhysicalDeviceProperties.html
// * https://pcisig.com/membership/member-companies
device->m_VendorID = std::to_string(choosenDevice.properties.vendorID);
device->m_Extensions = choosenDevice.extensions;
return device;
}
CDevice::CDevice() = default;
CDevice::~CDevice()
{
if (m_Device)
vkDeviceWaitIdle(m_Device);
// The order of destroying does matter to avoid use-after-free and validation
// layers complaints.
m_BackbufferReadbackTexture.reset();
m_SubmitScheduler.reset();
ProcessTextureToDestroyQueue(true);
m_RenderPassManager.reset();
m_SamplerManager.reset();
m_DescriptorManager.reset();
m_SwapChain.reset();
ProcessObjectToDestroyQueue(true);
if (m_VMAAllocator != VK_NULL_HANDLE)
vmaDestroyAllocator(m_VMAAllocator);
if (m_Device != VK_NULL_HANDLE)
vkDestroyDevice(m_Device, nullptr);
if (m_Surface != VK_NULL_HANDLE)
vkDestroySurfaceKHR(m_Instance, m_Surface, nullptr);
if (GLAD_VK_EXT_debug_utils && m_DebugMessenger)
vkDestroyDebugUtilsMessengerEXT(m_Instance, m_DebugMessenger, nullptr);
if (m_Instance != VK_NULL_HANDLE)
vkDestroyInstance(m_Instance, nullptr);
}
void CDevice::Report(const ScriptRequest& rq, JS::HandleValue settings)
{
Script::SetProperty(rq, settings, "name", "vulkan");
Script::SetProperty(rq, settings, "extensions", m_Extensions);
JS::RootedValue device(rq.cx);
Script::CreateObject(rq, &device);
ReportAvailablePhysicalDevice(m_ChoosenDevice, rq, device);
Script::SetProperty(rq, settings, "choosen_device", device);
JS::RootedValue availableDevices(rq.cx);
Script::CreateArray(rq, &availableDevices, m_AvailablePhysicalDevices.size());
for (size_t index = 0; index < m_AvailablePhysicalDevices.size(); ++index)
{
JS::RootedValue device(rq.cx);
Script::CreateObject(rq, &device);
ReportAvailablePhysicalDevice(m_AvailablePhysicalDevices[index], rq, device);
Script::SetPropertyInt(rq, availableDevices, index, device);
}
Script::SetProperty(rq, settings, "available_devices", availableDevices);
Script::SetProperty(rq, settings, "instance_extensions", m_InstanceExtensions);
Script::SetProperty(rq, settings, "validation_layers", m_ValidationLayers);
}
std::unique_ptr CDevice::CreateGraphicsPipelineState(
const SGraphicsPipelineStateDesc& pipelineStateDesc)
{
return CGraphicsPipelineState::Create(this, pipelineStateDesc);
}
std::unique_ptr CDevice::CreateVertexInputLayout(
const PS::span attributes)
{
return std::make_unique(this, attributes);
}
std::unique_ptr CDevice::CreateTexture(
const char* name, const ITexture::Type type, const uint32_t usage,
const Format format, const uint32_t width, const uint32_t height,
const Sampler::Desc& defaultSamplerDesc, const uint32_t MIPLevelCount, const uint32_t sampleCount)
{
return CTexture::Create(
this, name, type, usage, format, width, height,
defaultSamplerDesc, MIPLevelCount, sampleCount);
}
std::unique_ptr CDevice::CreateTexture2D(
const char* name, const uint32_t usage,
const Format format, const uint32_t width, const uint32_t height,
const Sampler::Desc& defaultSamplerDesc, const uint32_t MIPLevelCount, const uint32_t sampleCount)
{
return CreateTexture(
name, ITexture::Type::TEXTURE_2D, usage, format,
width, height, defaultSamplerDesc, MIPLevelCount, sampleCount);
}
std::unique_ptr CDevice::CreateFramebuffer(
const char* name, SColorAttachment* colorAttachment,
SDepthStencilAttachment* depthStencilAttachment)
{
return CFramebuffer::Create(
this, name, colorAttachment, depthStencilAttachment);
}
std::unique_ptr CDevice::CreateBuffer(
const char* name, const IBuffer::Type type, const uint32_t size, const bool dynamic)
{
return CreateCBuffer(name, type, size, dynamic);
}
std::unique_ptr CDevice::CreateCBuffer(
const char* name, const IBuffer::Type type, const uint32_t size, const bool dynamic)
{
return CBuffer::Create(this, name, type, size, dynamic);
}
std::unique_ptr CDevice::CreateShaderProgram(
const CStr& name, const CShaderDefines& defines)
{
return CShaderProgram::Create(this, name, defines);
}
std::unique_ptr CDevice::CreateCommandContext()
{
return CDeviceCommandContext::Create(this);
}
bool CDevice::AcquireNextBackbuffer()
{
if (!IsSwapChainValid())
{
vkDeviceWaitIdle(m_Device);
RecreateSwapChain();
if (!IsSwapChainValid())
return false;
}
PROFILE3("AcquireNextBackbuffer");
return m_SubmitScheduler->AcquireNextImage(*m_SwapChain);
}
IFramebuffer* CDevice::GetCurrentBackbuffer(
const AttachmentLoadOp colorAttachmentLoadOp,
const AttachmentStoreOp colorAttachmentStoreOp,
const AttachmentLoadOp depthStencilAttachmentLoadOp,
const AttachmentStoreOp depthStencilAttachmentStoreOp)
{
return IsSwapChainValid() ? m_SwapChain->GetCurrentBackbuffer(
colorAttachmentLoadOp, colorAttachmentStoreOp,
depthStencilAttachmentLoadOp, depthStencilAttachmentStoreOp) : nullptr;
}
void CDevice::Present()
{
if (!IsSwapChainValid())
return;
PROFILE3("Present");
m_SubmitScheduler->Present(*m_SwapChain);
ProcessObjectToDestroyQueue();
ProcessTextureToDestroyQueue();
++m_FrameID;
}
void CDevice::OnWindowResize(const uint32_t width, const uint32_t height)
{
if (!IsSwapChainValid() ||
width != m_SwapChain->GetDepthTexture()->GetWidth() ||
height != m_SwapChain->GetDepthTexture()->GetHeight())
{
RecreateSwapChain();
}
}
bool CDevice::IsTextureFormatSupported(const Format format) const
{
switch (format)
{
case Format::UNDEFINED:
return false;
case Format::R8G8B8_UNORM:
return false;
case Format::BC1_RGB_UNORM: FALLTHROUGH;
case Format::BC1_RGBA_UNORM: FALLTHROUGH;
case Format::BC2_UNORM: FALLTHROUGH;
case Format::BC3_UNORM:
return m_Capabilities.S3TC;
default:
break;
}
VkFormatProperties formatProperties{};
vkGetPhysicalDeviceFormatProperties(
m_ChoosenDevice.device, Mapping::FromFormat(format), &formatProperties);
return formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
}
bool CDevice::IsFramebufferFormatSupported(const Format format) const
{
VkFormatProperties formatProperties{};
vkGetPhysicalDeviceFormatProperties(
m_ChoosenDevice.device, Mapping::FromFormat(format), &formatProperties);
if (IsDepthFormat(format))
return formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
return formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
}
Format CDevice::GetPreferredDepthStencilFormat(
const uint32_t usage, const bool depth, const bool stencil) const
{
ENSURE(depth || stencil);
Format format = Format::UNDEFINED;
if (stencil)
{
// https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/depth.adoc#depth-formats
// At least one of VK_FORMAT_D24_UNORM_S8_UINT or VK_FORMAT_D32_SFLOAT_S8_UINT
// must also be supported.
if (IsFormatSupportedForUsage(Format::D24_UNORM_S8_UINT, usage))
format = Format::D24_UNORM_S8_UINT;
else
format = Format::D32_SFLOAT_S8_UINT;
}
else
{
std::array formatRequestOrder;
// TODO: add most known vendors to enum.
// https://developer.nvidia.com/blog/vulkan-dos-donts/
if (m_ChoosenDevice.properties.vendorID == 0x10DE)
formatRequestOrder = {Format::D24_UNORM, Format::D32_SFLOAT, Format::D16_UNORM};
else
formatRequestOrder = {Format::D32_SFLOAT, Format::D24_UNORM, Format::D16_UNORM};
for (const Format formatRequest : formatRequestOrder)
if (IsFormatSupportedForUsage(formatRequest, usage))
{
format = formatRequest;
break;
}
}
return format;
}
bool CDevice::IsFormatSupportedForUsage(const Format format, const uint32_t usage) const
{
VkFormatProperties formatProperties{};
vkGetPhysicalDeviceFormatProperties(
m_ChoosenDevice.device, Mapping::FromFormat(format), &formatProperties);
VkFormatFeatureFlags expectedFeatures = 0;
if (usage & ITexture::Usage::COLOR_ATTACHMENT)
expectedFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
if (usage & ITexture::Usage::DEPTH_STENCIL_ATTACHMENT)
expectedFeatures |= VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
if (usage & ITexture::Usage::SAMPLED)
expectedFeatures |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
if (usage & ITexture::Usage::TRANSFER_SRC)
expectedFeatures |= VK_FORMAT_FEATURE_TRANSFER_SRC_BIT;
if (usage & ITexture::Usage::TRANSFER_DST)
expectedFeatures |= VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
return (formatProperties.optimalTilingFeatures & expectedFeatures) == expectedFeatures;
}
void CDevice::ScheduleObjectToDestroy(
VkObjectType type, const uint64_t handle, const VmaAllocation allocation)
{
m_ObjectToDestroyQueue.push({m_FrameID, type, handle, allocation});
}
void CDevice::ScheduleTextureToDestroy(const CTexture::UID uid)
{
m_TextureToDestroyQueue.push({m_FrameID, uid});
}
void CDevice::SetObjectName(VkObjectType type, const uint64_t handle, const char* name)
{
if (!m_Capabilities.debugLabels)
return;
VkDebugUtilsObjectNameInfoEXT nameInfo{};
nameInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
nameInfo.objectType = type;
nameInfo.objectHandle = handle;
nameInfo.pObjectName = name;
vkSetDebugUtilsObjectNameEXT(m_Device, &nameInfo);
}
std::unique_ptr CDevice::CreateRingCommandContext(const size_t size)
{
return std::make_unique(
this, size, m_GraphicsQueueFamilyIndex, *m_SubmitScheduler);
}
void CDevice::RecreateSwapChain()
{
m_BackbufferReadbackTexture.reset();
int surfaceDrawableWidth = 0, surfaceDrawableHeight = 0;
SDL_Vulkan_GetDrawableSize(m_Window, &surfaceDrawableWidth, &surfaceDrawableHeight);
m_SwapChain = CSwapChain::Create(
this, m_Surface, surfaceDrawableWidth, surfaceDrawableHeight, std::move(m_SwapChain));
}
bool CDevice::IsSwapChainValid()
{
return m_SwapChain && m_SwapChain->IsValid();
}
void CDevice::ProcessObjectToDestroyQueue(const bool ignoreFrameID)
{
while (!m_ObjectToDestroyQueue.empty() &&
(ignoreFrameID || m_ObjectToDestroyQueue.front().frameID + NUMBER_OF_FRAMES_IN_FLIGHT < m_FrameID))
{
ObjectToDestroy& object = m_ObjectToDestroyQueue.front();
#if VK_USE_64_BIT_PTR_DEFINES
void* handle = reinterpret_cast(object.handle);
#else
const uint64_t handle = object.handle;
#endif
switch (object.type)
{
case VK_OBJECT_TYPE_IMAGE:
vmaDestroyImage(GetVMAAllocator(), static_cast(handle), object.allocation);
break;
case VK_OBJECT_TYPE_BUFFER:
vmaDestroyBuffer(GetVMAAllocator(), static_cast(handle), object.allocation);
break;
case VK_OBJECT_TYPE_IMAGE_VIEW:
vkDestroyImageView(m_Device, static_cast(handle), nullptr);
break;
case VK_OBJECT_TYPE_BUFFER_VIEW:
vkDestroyBufferView(m_Device, static_cast(handle), nullptr);
break;
case VK_OBJECT_TYPE_FRAMEBUFFER:
vkDestroyFramebuffer(m_Device, static_cast(handle), nullptr);
break;
case VK_OBJECT_TYPE_RENDER_PASS:
vkDestroyRenderPass(m_Device, static_cast(handle), nullptr);
break;
case VK_OBJECT_TYPE_SAMPLER:
vkDestroySampler(m_Device, static_cast(handle), nullptr);
break;
case VK_OBJECT_TYPE_SHADER_MODULE:
vkDestroyShaderModule(m_Device, static_cast(handle), nullptr);
break;
case VK_OBJECT_TYPE_PIPELINE_LAYOUT:
vkDestroyPipelineLayout(m_Device, static_cast(handle), nullptr);
break;
case VK_OBJECT_TYPE_PIPELINE:
vkDestroyPipeline(m_Device, static_cast(handle), nullptr);
break;
default:
debug_warn("Unsupported object to destroy type.");
}
m_ObjectToDestroyQueue.pop();
}
}
void CDevice::ProcessTextureToDestroyQueue(const bool ignoreFrameID)
{
while (!m_TextureToDestroyQueue.empty() &&
(ignoreFrameID || m_TextureToDestroyQueue.front().first + NUMBER_OF_FRAMES_IN_FLIGHT < m_FrameID))
{
GetDescriptorManager().OnTextureDestroy(m_TextureToDestroyQueue.front().second);
m_TextureToDestroyQueue.pop();
}
}
CTexture* CDevice::GetCurrentBackbufferTexture()
{
return IsSwapChainValid() ? m_SwapChain->GetCurrentBackbufferTexture() : nullptr;
}
CTexture* CDevice::GetOrCreateBackbufferReadbackTexture()
{
if (!IsSwapChainValid())
return nullptr;
if (!m_BackbufferReadbackTexture)
{
CTexture* currentBackbufferTexture = m_SwapChain->GetCurrentBackbufferTexture();
m_BackbufferReadbackTexture = CTexture::CreateReadback(
this, "BackbufferReadback",
currentBackbufferTexture->GetFormat(),
currentBackbufferTexture->GetWidth(),
currentBackbufferTexture->GetHeight());
}
return m_BackbufferReadbackTexture.get();
}
std::unique_ptr CreateDevice(SDL_Window* window)
{
return Vulkan::CDevice::Create(window);
}
} // namespace Vulkan
} // namespace Backend
} // namespace Renderer
Index: ps/trunk/source/renderer/backend/vulkan/ShaderProgram.cpp
===================================================================
--- ps/trunk/source/renderer/backend/vulkan/ShaderProgram.cpp (revision 27838)
+++ ps/trunk/source/renderer/backend/vulkan/ShaderProgram.cpp (revision 27839)
@@ -1,699 +1,703 @@
/* Copyright (C) 2023 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* 0 A.D. is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with 0 A.D. If not, see .
*/
#include "precompiled.h"
#include "ShaderProgram.h"
#include "graphics/ShaderDefines.h"
#include "ps/CLogger.h"
#include "ps/CStr.h"
#include "ps/CStrInternStatic.h"
#include "ps/Filesystem.h"
#include "ps/Profile.h"
#include "ps/XML/Xeromyces.h"
#include "renderer/backend/vulkan/DescriptorManager.h"
#include "renderer/backend/vulkan/Device.h"
#include "renderer/backend/vulkan/Texture.h"
+#include "renderer/backend/vulkan/Utilities.h"
#include
#include
namespace Renderer
{
namespace Backend
{
namespace Vulkan
{
namespace
{
VkShaderModule CreateShaderModule(CDevice* device, const VfsPath& path)
{
CVFSFile file;
if (file.Load(g_VFS, path) != PSRETURN_OK)
{
LOGERROR("Failed to load shader file: '%s'", path.string8());
return VK_NULL_HANDLE;
}
VkShaderModuleCreateInfo createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
// Casting to uint32_t requires to fit alignment and size.
ENSURE(file.GetBufferSize() % 4 == 0);
ENSURE(reinterpret_cast(file.GetBuffer()) % alignof(uint32_t) == 0u);
createInfo.codeSize = file.GetBufferSize();
createInfo.pCode = reinterpret_cast(file.GetBuffer());
VkShaderModule shaderModule;
- if (vkCreateShaderModule(device->GetVkDevice(), &createInfo, nullptr, &shaderModule) != VK_SUCCESS)
+ const VkResult result = vkCreateShaderModule(device->GetVkDevice(), &createInfo, nullptr, &shaderModule);
+ if (result != VK_SUCCESS)
{
- LOGERROR("Failed to create shader module from file: '%s'", path.string8());
+ LOGERROR("Failed to create shader module from file: '%s' %d (%s)",
+ path.string8(), static_cast(result), Utilities::GetVkResultName(result));
return VK_NULL_HANDLE;
}
device->SetObjectName(VK_OBJECT_TYPE_SHADER_MODULE, shaderModule, path.string8().c_str());
return shaderModule;
}
VfsPath FindProgramMatchingDefines(const VfsPath& xmlFilename, const CShaderDefines& defines)
{
CXeromyces xeroFile;
PSRETURN ret = xeroFile.Load(g_VFS, xmlFilename);
if (ret != PSRETURN_OK)
return {};
// TODO: add XML validation.
#define EL(x) const int el_##x = xeroFile.GetElementID(#x)
#define AT(x) const int at_##x = xeroFile.GetAttributeID(#x)
EL(define);
EL(defines);
EL(program);
AT(file);
AT(name);
AT(value);
#undef AT
#undef EL
const CStrIntern strUndefined("UNDEFINED");
VfsPath programFilename;
XMBElement root = xeroFile.GetRoot();
XERO_ITER_EL(root, rootChild)
{
if (rootChild.GetNodeName() == el_program)
{
CShaderDefines programDefines;
XERO_ITER_EL(rootChild, programChild)
{
if (programChild.GetNodeName() == el_defines)
{
XERO_ITER_EL(programChild, definesChild)
{
XMBAttributeList attributes = definesChild.GetAttributes();
if (definesChild.GetNodeName() == el_define)
{
const CStrIntern value(attributes.GetNamedItem(at_value));
if (value == strUndefined)
continue;
programDefines.Add(
CStrIntern(attributes.GetNamedItem(at_name)), value);
}
}
}
}
if (programDefines == defines)
return L"shaders/" + rootChild.GetAttributes().GetNamedItem(at_file).FromUTF8();
}
}
return {};
}
} // anonymous namespace
IDevice* CVertexInputLayout::GetDevice()
{
return m_Device;
}
// static
std::unique_ptr CShaderProgram::Create(
CDevice* device, const CStr& name, const CShaderDefines& baseDefines)
{
const VfsPath xmlFilename = L"shaders/" + wstring_from_utf8(name) + L".xml";
std::unique_ptr shaderProgram(new CShaderProgram());
shaderProgram->m_Device = device;
shaderProgram->m_FileDependencies = {xmlFilename};
CShaderDefines defines = baseDefines;
if (device->GetDescriptorManager().UseDescriptorIndexing())
defines.Add(str_USE_DESCRIPTOR_INDEXING, str_1);
const VfsPath programFilename = FindProgramMatchingDefines(xmlFilename, defines);
if (programFilename.empty())
{
LOGERROR("Program '%s' with required defines not found.", name);
for (const auto& pair : defines.GetMap())
LOGERROR(" \"%s\": \"%s\"", pair.first.c_str(), pair.second.c_str());
return nullptr;
}
shaderProgram->m_FileDependencies.emplace_back(programFilename);
CXeromyces programXeroFile;
if (programXeroFile.Load(g_VFS, programFilename) != PSRETURN_OK)
return nullptr;
XMBElement programRoot = programXeroFile.GetRoot();
#define EL(x) const int el_##x = programXeroFile.GetElementID(#x)
#define AT(x) const int at_##x = programXeroFile.GetAttributeID(#x)
EL(binding);
EL(descriptor_set);
EL(descriptor_sets);
EL(fragment);
EL(member);
EL(push_constant);
EL(stream);
EL(vertex);
AT(binding);
AT(file);
AT(location);
AT(name);
AT(offset);
AT(set);
AT(size);
AT(type);
#undef AT
#undef EL
auto addPushConstant =
[&pushConstants=shaderProgram->m_PushConstants, &pushConstantDataFlags=shaderProgram->m_PushConstantDataFlags, &at_name, &at_offset, &at_size](
const XMBElement& element, VkShaderStageFlags stageFlags) -> bool
{
const XMBAttributeList attributes = element.GetAttributes();
const CStrIntern name = CStrIntern(attributes.GetNamedItem(at_name));
const uint32_t size = attributes.GetNamedItem(at_size).ToUInt();
const uint32_t offset = attributes.GetNamedItem(at_offset).ToUInt();
if (offset % 4 != 0 || size % 4 != 0)
{
LOGERROR("Push constant should have offset and size be multiple of 4.");
return false;
}
for (PushConstant& pushConstant : pushConstants)
{
if (pushConstant.name == name)
{
if (size != pushConstant.size || offset != pushConstant.offset)
{
LOGERROR("All shared push constants must have the same size and offset.");
return false;
}
// We found the same constant so we don't need to add it again.
pushConstant.stageFlags |= stageFlags;
for (uint32_t index = 0; index < (size >> 2); ++index)
pushConstantDataFlags[(offset >> 2) + index] |= stageFlags;
return true;
}
if (offset + size < pushConstant.offset || offset >= pushConstant.offset + pushConstant.size)
continue;
LOGERROR("All push constant must not intersect each other in memory.");
return false;
}
pushConstants.push_back({name, offset, size, stageFlags});
for (uint32_t index = 0; index < (size >> 2); ++index)
pushConstantDataFlags[(offset >> 2) + index] = stageFlags;
return true;
};
auto addDescriptorSets = [&](const XMBElement& element) -> bool
{
const bool useDescriptorIndexing =
device->GetDescriptorManager().UseDescriptorIndexing();
// TODO: reduce the indentation.
XERO_ITER_EL(element, descriporSetsChild)
{
if (descriporSetsChild.GetNodeName() == el_descriptor_set)
{
const uint32_t set = descriporSetsChild.GetAttributes().GetNamedItem(at_set).ToUInt();
if (useDescriptorIndexing && set == 0 && !descriporSetsChild.GetChildNodes().empty())
{
LOGERROR("Descritor set for descriptor indexing shouldn't contain bindings.");
return false;
}
XERO_ITER_EL(descriporSetsChild, descriporSetChild)
{
if (descriporSetChild.GetNodeName() == el_binding)
{
const XMBAttributeList attributes = descriporSetChild.GetAttributes();
const uint32_t binding = attributes.GetNamedItem(at_binding).ToUInt();
const uint32_t size = attributes.GetNamedItem(at_size).ToUInt();
const CStr type = attributes.GetNamedItem(at_type);
if (type == "uniform")
{
const uint32_t expectedSet =
device->GetDescriptorManager().GetUniformSet();
if (set != expectedSet || binding != 0)
{
LOGERROR("We support only a single uniform block per shader program.");
return false;
}
shaderProgram->m_MaterialConstantsDataSize = size;
XERO_ITER_EL(descriporSetChild, bindingChild)
{
if (bindingChild.GetNodeName() == el_member)
{
const XMBAttributeList memberAttributes = bindingChild.GetAttributes();
const uint32_t offset = memberAttributes.GetNamedItem(at_offset).ToUInt();
const uint32_t size = memberAttributes.GetNamedItem(at_size).ToUInt();
const CStrIntern name{memberAttributes.GetNamedItem(at_name)};
bool found = false;
for (const Uniform& uniform : shaderProgram->m_Uniforms)
{
if (uniform.name == name)
{
if (offset != uniform.offset || size != uniform.size)
{
LOGERROR("All uniforms across all stage should match.");
return false;
}
found = true;
}
else
{
if (offset + size <= uniform.offset || uniform.offset + uniform.size <= offset)
continue;
LOGERROR("Uniforms must not overlap each other.");
return false;
}
}
if (!found)
shaderProgram->m_Uniforms.push_back({name, offset, size});
}
}
}
else if (type == "sampler1D" || type == "sampler2D" || type == "sampler2DShadow" || type == "sampler3D" || type == "samplerCube")
{
if (useDescriptorIndexing)
{
LOGERROR("We support only uniform descriptor sets with enabled descriptor indexing.");
return false;
}
const CStrIntern name{attributes.GetNamedItem(at_name)};
shaderProgram->m_TextureMapping[name] = binding;
shaderProgram->m_TexturesDescriptorSetSize =
std::max(shaderProgram->m_TexturesDescriptorSetSize, binding + 1);
}
else
{
LOGERROR("Unsupported binding: '%s'", type.c_str());
return false;
}
}
}
}
}
return true;
};
XERO_ITER_EL(programRoot, programChild)
{
if (programChild.GetNodeName() == el_vertex)
{
const VfsPath shaderModulePath =
L"shaders/" + programChild.GetAttributes().GetNamedItem(at_file).FromUTF8();
shaderProgram->m_FileDependencies.emplace_back(shaderModulePath);
shaderProgram->m_ShaderModules.emplace_back(
CreateShaderModule(device, shaderModulePath));
if (shaderProgram->m_ShaderModules.back() == VK_NULL_HANDLE)
return nullptr;
VkPipelineShaderStageCreateInfo vertexShaderStageInfo{};
vertexShaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertexShaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertexShaderStageInfo.module = shaderProgram->m_ShaderModules.back();
vertexShaderStageInfo.pName = "main";
shaderProgram->m_Stages.emplace_back(std::move(vertexShaderStageInfo));
XERO_ITER_EL(programChild, stageChild)
{
if (stageChild.GetNodeName() == el_stream)
{
XMBAttributeList attributes = stageChild.GetAttributes();
const uint32_t location = attributes.GetNamedItem(at_location).ToUInt();
const CStr streamName = attributes.GetNamedItem(at_name);
VertexAttributeStream stream = VertexAttributeStream::UV7;
if (streamName == "pos")
stream = VertexAttributeStream::POSITION;
else if (streamName == "normal")
stream = VertexAttributeStream::NORMAL;
else if (streamName == "color")
stream = VertexAttributeStream::COLOR;
else if (streamName == "uv0")
stream = VertexAttributeStream::UV0;
else if (streamName == "uv1")
stream = VertexAttributeStream::UV1;
else if (streamName == "uv2")
stream = VertexAttributeStream::UV2;
else if (streamName == "uv3")
stream = VertexAttributeStream::UV3;
else if (streamName == "uv4")
stream = VertexAttributeStream::UV4;
else if (streamName == "uv5")
stream = VertexAttributeStream::UV5;
else if (streamName == "uv6")
stream = VertexAttributeStream::UV6;
else if (streamName == "uv7")
stream = VertexAttributeStream::UV7;
else
debug_warn("Unknown stream");
shaderProgram->m_StreamLocations[stream] = location;
}
else if (stageChild.GetNodeName() == el_push_constant)
{
if (!addPushConstant(stageChild, VK_SHADER_STAGE_VERTEX_BIT))
return nullptr;
}
else if (stageChild.GetNodeName() == el_descriptor_sets)
{
if (!addDescriptorSets(stageChild))
return nullptr;
}
}
}
else if (programChild.GetNodeName() == el_fragment)
{
const VfsPath shaderModulePath =
L"shaders/" + programChild.GetAttributes().GetNamedItem(at_file).FromUTF8();
shaderProgram->m_FileDependencies.emplace_back(shaderModulePath);
shaderProgram->m_ShaderModules.emplace_back(
CreateShaderModule(device, shaderModulePath));
if (shaderProgram->m_ShaderModules.back() == VK_NULL_HANDLE)
return nullptr;
VkPipelineShaderStageCreateInfo fragmentShaderStageInfo{};
fragmentShaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragmentShaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragmentShaderStageInfo.module = shaderProgram->m_ShaderModules.back();
fragmentShaderStageInfo.pName = "main";
shaderProgram->m_Stages.emplace_back(std::move(fragmentShaderStageInfo));
XERO_ITER_EL(programChild, stageChild)
{
if (stageChild.GetNodeName() == el_push_constant)
{
if (!addPushConstant(stageChild, VK_SHADER_STAGE_FRAGMENT_BIT))
return nullptr;
}
else if (stageChild.GetNodeName() == el_descriptor_sets)
{
if (!addDescriptorSets(stageChild))
return nullptr;
}
}
}
}
if (shaderProgram->m_Stages.empty())
{
LOGERROR("Program should contain at least one stage.");
return nullptr;
}
for (size_t index = 0; index < shaderProgram->m_PushConstants.size(); ++index)
shaderProgram->m_PushConstantMapping[shaderProgram->m_PushConstants[index].name] = index;
std::vector pushConstantRanges;
pushConstantRanges.reserve(shaderProgram->m_PushConstants.size());
std::transform(
shaderProgram->m_PushConstants.begin(), shaderProgram->m_PushConstants.end(),
std::back_insert_iterator(pushConstantRanges), [](const PushConstant& pushConstant)
{
return VkPushConstantRange{pushConstant.stageFlags, pushConstant.offset, pushConstant.size};
});
if (!pushConstantRanges.empty())
{
std::sort(pushConstantRanges.begin(), pushConstantRanges.end(),
[](const VkPushConstantRange& lhs, const VkPushConstantRange& rhs)
{
return lhs.offset < rhs.offset;
});
// Merge subsequent constants.
auto it = pushConstantRanges.begin();
while (std::next(it) != pushConstantRanges.end())
{
auto next = std::next(it);
if (it->stageFlags == next->stageFlags)
{
it->size = next->offset - it->offset + next->size;
pushConstantRanges.erase(next);
}
else
it = next;
}
for (const VkPushConstantRange& range : pushConstantRanges)
if (std::count_if(pushConstantRanges.begin(), pushConstantRanges.end(),
[stageFlags=range.stageFlags](const VkPushConstantRange& range) { return range.stageFlags & stageFlags; }) != 1)
{
LOGERROR("Any two range must not include the same stage in stageFlags.");
return nullptr;
}
}
for (size_t index = 0; index < shaderProgram->m_Uniforms.size(); ++index)
shaderProgram->m_UniformMapping[shaderProgram->m_Uniforms[index].name] = index;
if (!shaderProgram->m_Uniforms.empty())
{
if (shaderProgram->m_MaterialConstantsDataSize > device->GetChoosenPhysicalDevice().properties.limits.maxUniformBufferRange)
{
LOGERROR("Uniform buffer size is too big for the device.");
return nullptr;
}
shaderProgram->m_MaterialConstantsData =
std::make_unique(shaderProgram->m_MaterialConstantsDataSize);
}
std::vector layouts =
device->GetDescriptorManager().GetDescriptorSetLayouts();
if (shaderProgram->m_TexturesDescriptorSetSize > 0)
{
ENSURE(!device->GetDescriptorManager().UseDescriptorIndexing());
shaderProgram->m_BoundTextures.resize(shaderProgram->m_TexturesDescriptorSetSize);
shaderProgram->m_BoundTexturesUID.resize(shaderProgram->m_TexturesDescriptorSetSize);
shaderProgram->m_BoundTexturesOutdated = true;
shaderProgram->m_TexturesDescriptorSetLayout =
device->GetDescriptorManager().GetSingleTypeDescritorSetLayout(
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, shaderProgram->m_TexturesDescriptorSetSize);
layouts.emplace_back(shaderProgram->m_TexturesDescriptorSetLayout);
}
VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo{};
pipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutCreateInfo.setLayoutCount = layouts.size();
pipelineLayoutCreateInfo.pSetLayouts = layouts.data();
pipelineLayoutCreateInfo.pushConstantRangeCount = pushConstantRanges.size();
pipelineLayoutCreateInfo.pPushConstantRanges = pushConstantRanges.data();
const VkResult result = vkCreatePipelineLayout(
device->GetVkDevice(), &pipelineLayoutCreateInfo, nullptr,
&shaderProgram->m_PipelineLayout);
if (result != VK_SUCCESS)
{
- LOGERROR("Failed to create a pipeline layout: %d", static_cast(result));
+ LOGERROR("Failed to create a pipeline layout: %d (%s)",
+ static_cast(result), Utilities::GetVkResultName(result));
return nullptr;
}
return shaderProgram;
}
CShaderProgram::CShaderProgram() = default;
CShaderProgram::~CShaderProgram()
{
if (m_PipelineLayout != VK_NULL_HANDLE)
m_Device->ScheduleObjectToDestroy(VK_OBJECT_TYPE_PIPELINE_LAYOUT, m_PipelineLayout, VK_NULL_HANDLE);
for (VkShaderModule shaderModule : m_ShaderModules)
if (shaderModule != VK_NULL_HANDLE)
m_Device->ScheduleObjectToDestroy(VK_OBJECT_TYPE_SHADER_MODULE, shaderModule, VK_NULL_HANDLE);
}
IDevice* CShaderProgram::GetDevice()
{
return m_Device;
}
int32_t CShaderProgram::GetBindingSlot(const CStrIntern name) const
{
if (auto it = m_PushConstantMapping.find(name); it != m_PushConstantMapping.end())
return it->second;
if (auto it = m_UniformMapping.find(name); it != m_UniformMapping.end())
return it->second + m_PushConstants.size();
if (auto it = m_TextureMapping.find(name); it != m_TextureMapping.end())
return it->second + m_PushConstants.size() + m_UniformMapping.size();
return -1;
}
std::vector CShaderProgram::GetFileDependencies() const
{
return m_FileDependencies;
}
uint32_t CShaderProgram::GetStreamLocation(const VertexAttributeStream stream) const
{
auto it = m_StreamLocations.find(stream);
return it != m_StreamLocations.end() ? it->second : std::numeric_limits::max();
}
void CShaderProgram::Bind()
{
if (m_MaterialConstantsData)
m_MaterialConstantsDataOutdated = true;
}
void CShaderProgram::Unbind()
{
if (m_TexturesDescriptorSetSize > 0)
{
for (CTexture*& texture : m_BoundTextures)
texture = nullptr;
for (CTexture::UID& uid : m_BoundTexturesUID)
uid = 0;
m_BoundTexturesOutdated = true;
}
}
void CShaderProgram::PreDraw(VkCommandBuffer commandBuffer)
{
UpdateActiveDescriptorSet(commandBuffer);
if (m_PushConstantDataMask)
{
for (uint32_t index = 0; index < 32;)
{
if (!(m_PushConstantDataMask & (1 << index)))
{
++index;
continue;
}
uint32_t indexEnd = index + 1;
while (indexEnd < 32 && (m_PushConstantDataMask & (1 << indexEnd)) && m_PushConstantDataFlags[index] == m_PushConstantDataFlags[indexEnd])
++indexEnd;
vkCmdPushConstants(
commandBuffer, GetPipelineLayout(),
m_PushConstantDataFlags[index],
index * 4, (indexEnd - index) * 4, m_PushConstantData.data() + index * 4);
index = indexEnd;
}
m_PushConstantDataMask = 0;
}
}
void CShaderProgram::UpdateActiveDescriptorSet(
VkCommandBuffer commandBuffer)
{
if (m_BoundTexturesOutdated)
{
m_BoundTexturesOutdated = false;
m_ActiveTexturesDescriptorSet =
m_Device->GetDescriptorManager().GetSingleTypeDescritorSet(
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, m_TexturesDescriptorSetLayout,
m_BoundTexturesUID, m_BoundTextures);
ENSURE(m_ActiveTexturesDescriptorSet != VK_NULL_HANDLE);
vkCmdBindDescriptorSets(
commandBuffer, GetPipelineBindPoint(), GetPipelineLayout(),
1, 1, &m_ActiveTexturesDescriptorSet, 0, nullptr);
}
}
void CShaderProgram::SetUniform(
const int32_t bindingSlot,
const float value)
{
const float values[1] = {value};
SetUniform(bindingSlot, PS::span(values, values + 1));
}
void CShaderProgram::SetUniform(
const int32_t bindingSlot,
const float valueX, const float valueY)
{
const float values[2] = {valueX, valueY};
SetUniform(bindingSlot, PS::span(values, values + 2));
}
void CShaderProgram::SetUniform(
const int32_t bindingSlot,
const float valueX, const float valueY,
const float valueZ)
{
const float values[3] = {valueX, valueY, valueZ};
SetUniform(bindingSlot, PS::span(values, values + 3));
}
void CShaderProgram::SetUniform(
const int32_t bindingSlot,
const float valueX, const float valueY,
const float valueZ, const float valueW)
{
const float values[4] = {valueX, valueY, valueZ, valueW};
SetUniform(bindingSlot, PS::span(values, values + 4));
}
void CShaderProgram::SetUniform(const int32_t bindingSlot, PS::span values)
{
if (bindingSlot < 0)
return;
const auto data = GetUniformData(bindingSlot, values.size() * sizeof(float));
std::memcpy(data.first, values.data(), data.second);
}
std::pair CShaderProgram::GetUniformData(
const int32_t bindingSlot, const uint32_t dataSize)
{
if (bindingSlot < static_cast(m_PushConstants.size()))
{
const uint32_t size = m_PushConstants[bindingSlot].size;
const uint32_t offset = m_PushConstants[bindingSlot].offset;
ENSURE(size <= dataSize);
m_PushConstantDataMask |= ((1 << (size >> 2)) - 1) << (offset >> 2);
return {m_PushConstantData.data() + offset, size};
}
else
{
ENSURE(bindingSlot - m_PushConstants.size() < m_Uniforms.size());
const Uniform& uniform = m_Uniforms[bindingSlot - m_PushConstants.size()];
m_MaterialConstantsDataOutdated = true;
const uint32_t size = uniform.size;
const uint32_t offset = uniform.offset;
ENSURE(size <= dataSize);
return {m_MaterialConstantsData.get() + offset, size};
}
}
void CShaderProgram::SetTexture(const int32_t bindingSlot, CTexture* texture)
{
if (bindingSlot < 0)
return;
CDescriptorManager& descriptorManager = m_Device->GetDescriptorManager();
if (descriptorManager.UseDescriptorIndexing())
{
const uint32_t descriptorIndex = descriptorManager.GetTextureDescriptor(texture->As());
ENSURE(bindingSlot < static_cast(m_PushConstants.size()));
const uint32_t size = m_PushConstants[bindingSlot].size;
const uint32_t offset = m_PushConstants[bindingSlot].offset;
ENSURE(size == sizeof(descriptorIndex));
std::memcpy(m_PushConstantData.data() + offset, &descriptorIndex, size);
m_PushConstantDataMask |= ((1 << (size >> 2)) - 1) << (offset >> 2);
}
else
{
ENSURE(bindingSlot >= static_cast(m_PushConstants.size() + m_UniformMapping.size()));
const uint32_t index = bindingSlot - (m_PushConstants.size() + m_UniformMapping.size());
if (m_BoundTexturesUID[index] != texture->GetUID())
{
m_BoundTextures[index] = texture;
m_BoundTexturesUID[index] = texture->GetUID();
m_BoundTexturesOutdated = true;
}
}
}
} // namespace Vulkan
} // namespace Backend
} // namespace Renderer
Index: ps/trunk/source/renderer/backend/vulkan/SwapChain.cpp
===================================================================
--- ps/trunk/source/renderer/backend/vulkan/SwapChain.cpp (revision 27838)
+++ ps/trunk/source/renderer/backend/vulkan/SwapChain.cpp (revision 27839)
@@ -1,394 +1,396 @@
/* Copyright (C) 2023 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* 0 A.D. is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with 0 A.D. If not, see .
*/
#include "precompiled.h"
#include "SwapChain.h"
#include "lib/hash.h"
#include "maths/MathUtil.h"
#include "ps/ConfigDB.h"
#include "ps/Profile.h"
#include "renderer/backend/vulkan/Device.h"
#include "renderer/backend/vulkan/Framebuffer.h"
#include "renderer/backend/vulkan/RingCommandContext.h"
#include "renderer/backend/vulkan/Texture.h"
#include "renderer/backend/vulkan/Utilities.h"
#include
#include
namespace Renderer
{
namespace Backend
{
namespace Vulkan
{
// static
std::unique_ptr CSwapChain::Create(
CDevice* device, VkSurfaceKHR surface, int surfaceDrawableWidth, int surfaceDrawableHeight,
std::unique_ptr oldSwapChain)
{
VkPhysicalDevice physicalDevice = device->GetChoosenPhysicalDevice().device;
VkSurfaceCapabilitiesKHR surfaceCapabilities{};
ENSURE_VK_SUCCESS(vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
physicalDevice, surface, &surfaceCapabilities));
const uint32_t swapChainWidth = Clamp(surfaceDrawableWidth,
surfaceCapabilities.minImageExtent.width,
surfaceCapabilities.maxImageExtent.width);
const uint32_t swapChainHeight = Clamp(surfaceDrawableHeight,
surfaceCapabilities.minImageExtent.height,
surfaceCapabilities.maxImageExtent.height);
// Some drivers (for example NVIDIA on Windows during minimize) might
// return zeroes for both minImageExtent and maxImageExtent. It means we're
// not able to create any swapchain. Because we can't choose zeros (they're
// not allowed) and we can't choose values bigger than maxImageExtent
// (which are also zeroes in that case).
if (swapChainWidth == 0 || swapChainHeight == 0)
return nullptr;
std::vector surfaceFormats;
uint32_t surfaceFormatCount = 0;
ENSURE_VK_SUCCESS(vkGetPhysicalDeviceSurfaceFormatsKHR(
physicalDevice, surface, &surfaceFormatCount, nullptr));
if (surfaceFormatCount > 0)
{
surfaceFormats.resize(surfaceFormatCount);
ENSURE_VK_SUCCESS(vkGetPhysicalDeviceSurfaceFormatsKHR(
physicalDevice, surface, &surfaceFormatCount, surfaceFormats.data()));
}
std::vector presentModes;
uint32_t presentModeCount = 0;
ENSURE_VK_SUCCESS(vkGetPhysicalDeviceSurfacePresentModesKHR(
physicalDevice, surface, &presentModeCount, nullptr));
if (presentModeCount > 0)
{
presentModes.resize(presentModeCount);
ENSURE_VK_SUCCESS(vkGetPhysicalDeviceSurfacePresentModesKHR(
physicalDevice, surface, &presentModeCount, presentModes.data()));
}
// VK_PRESENT_MODE_FIFO_KHR is guaranteed to be supported.
VkPresentModeKHR presentMode = VK_PRESENT_MODE_FIFO_KHR;
auto isPresentModeAvailable = [&presentModes](const VkPresentModeKHR presentMode)
{
return std::find(presentModes.begin(), presentModes.end(), presentMode) != presentModes.end();
};
bool vsyncEnabled = true;
CFG_GET_VAL("vsync", vsyncEnabled);
if (vsyncEnabled)
{
// TODO: use the adaptive one when possible.
// https://gitlab.freedesktop.org/mesa/mesa/-/issues/5516
//if (isPresentModeAvailable(VK_PRESENT_MODE_MAILBOX_KHR))
// presentMode = VK_PRESENT_MODE_MAILBOX_KHR;
}
else
{
if (isPresentModeAvailable(VK_PRESENT_MODE_IMMEDIATE_KHR))
presentMode = VK_PRESENT_MODE_IMMEDIATE_KHR;
}
// Spec says:
// The number of format pairs supported must be greater than or equal to 1.
// pSurfaceFormats must not contain an entry whose value for format is
// VK_FORMAT_UNDEFINED.
const auto surfaceFormatIt =
std::find_if(surfaceFormats.begin(), surfaceFormats.end(), IsSurfaceFormatSupported);
if (surfaceFormatIt == surfaceFormats.end())
{
LOGERROR("Can't find a suitable surface format to render to.");
return nullptr;
}
const VkSurfaceFormatKHR& surfaceFormat = *surfaceFormatIt;
VkSwapchainCreateInfoKHR swapChainCreateInfo{};
swapChainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
swapChainCreateInfo.surface = surface;
// minImageCount + 1 is to have a less chance for a presenter to wait.
// maxImageCount might be zero, it means it's unlimited.
const uint32_t maxImageCount = surfaceCapabilities.maxImageCount > 0
? surfaceCapabilities.maxImageCount
: std::numeric_limits::max();
const uint32_t minImageCount = surfaceCapabilities.minImageCount < maxImageCount
? surfaceCapabilities.minImageCount + 1
: surfaceCapabilities.minImageCount;
swapChainCreateInfo.minImageCount =
Clamp(NUMBER_OF_FRAMES_IN_FLIGHT,
minImageCount, maxImageCount);
swapChainCreateInfo.imageFormat = surfaceFormat.format;
swapChainCreateInfo.imageColorSpace = surfaceFormat.colorSpace;
swapChainCreateInfo.imageExtent.width = swapChainWidth;
swapChainCreateInfo.imageExtent.height = swapChainHeight;
swapChainCreateInfo.imageArrayLayers = 1;
// VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT is guaranteed to present.
// VK_IMAGE_USAGE_TRANSFER_SRC_BIT allows a simpler backbuffer readback.
// VK_IMAGE_USAGE_TRANSFER_DST_BIT allows a blit to the backbuffer.
swapChainCreateInfo.imageUsage =
(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT) &
surfaceCapabilities.supportedUsageFlags;
swapChainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
// We need to set these only if imageSharingMode is VK_SHARING_MODE_CONCURRENT.
swapChainCreateInfo.queueFamilyIndexCount = 0;
swapChainCreateInfo.pQueueFamilyIndices = nullptr;
// By default VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR is preferable.
if (surfaceCapabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR)
swapChainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
else
swapChainCreateInfo.preTransform = surfaceCapabilities.currentTransform;
// By default VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR is preferable, other bits
// might require some format or rendering adjustemnts to avoid
// semi-transparent areas.
const VkCompositeAlphaFlagBitsKHR compositeAlphaOrder[] =
{
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR
};
for (const VkCompositeAlphaFlagBitsKHR compositeAlpha : compositeAlphaOrder)
if (compositeAlpha & surfaceCapabilities.supportedCompositeAlpha)
{
swapChainCreateInfo.compositeAlpha = compositeAlpha;
break;
}
swapChainCreateInfo.presentMode = presentMode;
swapChainCreateInfo.clipped = VK_TRUE;
if (oldSwapChain)
swapChainCreateInfo.oldSwapchain = oldSwapChain->GetVkSwapchain();
std::unique_ptr swapChain(new CSwapChain());
swapChain->m_Device = device;
ENSURE_VK_SUCCESS(vkCreateSwapchainKHR(
device->GetVkDevice(), &swapChainCreateInfo, nullptr, &swapChain->m_SwapChain));
char nameBuffer[64];
snprintf(nameBuffer, std::size(nameBuffer), "SwapChain: %dx%d", surfaceDrawableWidth, surfaceDrawableHeight);
device->SetObjectName(VK_OBJECT_TYPE_SWAPCHAIN_KHR, swapChain->m_SwapChain, nameBuffer);
uint32_t imageCount = 0;
VkResult getSwapchainImagesResult = VK_INCOMPLETE;
do
{
getSwapchainImagesResult = vkGetSwapchainImagesKHR(
device->GetVkDevice(), swapChain->m_SwapChain, &imageCount, nullptr);
if (getSwapchainImagesResult == VK_SUCCESS && imageCount > 0)
{
swapChain->m_Images.resize(imageCount);
getSwapchainImagesResult = vkGetSwapchainImagesKHR(
device->GetVkDevice(), swapChain->m_SwapChain, &imageCount, swapChain->m_Images.data());
}
} while (getSwapchainImagesResult == VK_INCOMPLETE);
LOGMESSAGE("SwapChain image count: %u (min: %u)", imageCount, swapChainCreateInfo.minImageCount);
ENSURE_VK_SUCCESS(getSwapchainImagesResult);
ENSURE(imageCount > 0);
swapChain->m_DepthTexture = CTexture::Create(
device, "SwapChainDepthTexture", ITexture::Type::TEXTURE_2D,
ITexture::Usage::DEPTH_STENCIL_ATTACHMENT,
device->GetPreferredDepthStencilFormat(
Renderer::Backend::ITexture::Usage::DEPTH_STENCIL_ATTACHMENT,
true, true),
swapChainWidth, swapChainHeight, Sampler::MakeDefaultSampler(
Sampler::Filter::NEAREST, Sampler::AddressMode::CLAMP_TO_EDGE),
1, 1);
swapChain->m_ImageFormat = swapChainCreateInfo.imageFormat;
swapChain->m_Textures.resize(imageCount);
swapChain->m_Backbuffers.resize(imageCount);
for (size_t index = 0; index < imageCount; ++index)
{
snprintf(nameBuffer, std::size(nameBuffer), "SwapChainImage #%zu", index);
device->SetObjectName(VK_OBJECT_TYPE_IMAGE, swapChain->m_Images[index], nameBuffer);
snprintf(nameBuffer, std::size(nameBuffer), "SwapChainImageView #%zu", index);
swapChain->m_Textures[index] = CTexture::WrapBackbufferImage(
device, nameBuffer, swapChain->m_Images[index], swapChainCreateInfo.imageFormat,
swapChainCreateInfo.imageUsage, swapChainWidth, swapChainHeight);
}
swapChain->m_IsValid = true;
return swapChain;
}
CSwapChain::CSwapChain() = default;
CSwapChain::~CSwapChain()
{
m_Backbuffers.clear();
m_Textures.clear();
m_DepthTexture.reset();
if (m_SwapChain != VK_NULL_HANDLE)
vkDestroySwapchainKHR(m_Device->GetVkDevice(), m_SwapChain, nullptr);
}
size_t CSwapChain::SwapChainBackbuffer::BackbufferKeyHash::operator()(const BackbufferKey& key) const
{
size_t seed = 0;
hash_combine(seed, std::get<0>(key));
hash_combine(seed, std::get<1>(key));
hash_combine(seed, std::get<2>(key));
hash_combine(seed, std::get<3>(key));
return seed;
}
CSwapChain::SwapChainBackbuffer::SwapChainBackbuffer() = default;
CSwapChain::SwapChainBackbuffer::SwapChainBackbuffer(SwapChainBackbuffer&& other) = default;
CSwapChain::SwapChainBackbuffer& CSwapChain::SwapChainBackbuffer::operator=(SwapChainBackbuffer&& other) = default;
bool CSwapChain::AcquireNextImage(VkSemaphore acquireImageSemaphore)
{
ENSURE(m_CurrentImageIndex == std::numeric_limits::max());
const VkResult acquireResult = vkAcquireNextImageKHR(
m_Device->GetVkDevice(), m_SwapChain, std::numeric_limits::max(),
acquireImageSemaphore,
VK_NULL_HANDLE, &m_CurrentImageIndex);
if (acquireResult != VK_SUCCESS)
{
if (acquireResult == VK_ERROR_OUT_OF_DATE_KHR)
m_IsValid = false;
else if (acquireResult != VK_SUBOPTIMAL_KHR)
{
- LOGERROR("Acquire result: %d", static_cast(acquireResult));
+ LOGERROR("Acquire result: %d (%s)",
+ static_cast(acquireResult), Utilities::GetVkResultName(acquireResult));
debug_warn("Unknown acquire error.");
}
}
return m_IsValid;
}
void CSwapChain::SubmitCommandsAfterAcquireNextImage(
CRingCommandContext& commandContext)
{
const bool firstAcquirement = !m_Textures[m_CurrentImageIndex]->IsInitialized();
Utilities::SubmitImageMemoryBarrier(
commandContext.GetCommandBuffer(),
m_Images[m_CurrentImageIndex], 0, 0,
0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
firstAcquirement ? VK_IMAGE_LAYOUT_UNDEFINED : VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
if (!m_DepthTexture->IsInitialized())
{
Utilities::SubmitImageMemoryBarrier(
commandContext.GetCommandBuffer(),
m_DepthTexture->GetImage(), 0, 0,
0, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
}
}
void CSwapChain::SubmitCommandsBeforePresent(
CRingCommandContext& commandContext)
{
ENSURE(m_CurrentImageIndex != std::numeric_limits::max());
Utilities::SubmitImageMemoryBarrier(
commandContext.GetCommandBuffer(), m_Images[m_CurrentImageIndex], 0, 0,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, 0,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
}
void CSwapChain::Present(VkSemaphore submitDone, VkQueue queue)
{
ENSURE(m_CurrentImageIndex != std::numeric_limits::max());
VkSwapchainKHR swapChains[] = {m_SwapChain};
VkPresentInfoKHR presentInfo{};
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = swapChains;
presentInfo.pImageIndices = &m_CurrentImageIndex;
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = &submitDone;
const VkResult presentResult = vkQueuePresentKHR(queue, &presentInfo);
if (presentResult != VK_SUCCESS)
{
if (presentResult == VK_ERROR_OUT_OF_DATE_KHR)
m_IsValid = false;
else if (presentResult != VK_SUBOPTIMAL_KHR)
{
- LOGERROR("Present result: %d", static_cast(presentResult));
+ LOGERROR("Present result: %d (%s)",
+ static_cast(presentResult), Utilities::GetVkResultName(presentResult));
debug_warn("Unknown present error.");
}
}
m_CurrentImageIndex = std::numeric_limits::max();
}
CFramebuffer* CSwapChain::GetCurrentBackbuffer(
const AttachmentLoadOp colorAttachmentLoadOp,
const AttachmentStoreOp colorAttachmentStoreOp,
const AttachmentLoadOp depthStencilAttachmentLoadOp,
const AttachmentStoreOp depthStencilAttachmentStoreOp)
{
ENSURE(m_CurrentImageIndex != std::numeric_limits::max());
SwapChainBackbuffer& swapChainBackbuffer =
m_Backbuffers[m_CurrentImageIndex];
const SwapChainBackbuffer::BackbufferKey key{
colorAttachmentLoadOp, colorAttachmentStoreOp,
depthStencilAttachmentLoadOp, depthStencilAttachmentStoreOp};
auto it = swapChainBackbuffer.backbuffers.find(key);
if (it == swapChainBackbuffer.backbuffers.end())
{
char nameBuffer[64];
snprintf(nameBuffer, std::size(nameBuffer), "Backbuffer #%u", m_CurrentImageIndex);
SColorAttachment colorAttachment{};
colorAttachment.texture = m_Textures[m_CurrentImageIndex].get();
colorAttachment.loadOp = colorAttachmentLoadOp;
colorAttachment.storeOp = colorAttachmentStoreOp;
SDepthStencilAttachment depthStencilAttachment{};
depthStencilAttachment.texture = m_DepthTexture.get();
depthStencilAttachment.loadOp = depthStencilAttachmentLoadOp;
depthStencilAttachment.storeOp = depthStencilAttachmentStoreOp;
it = swapChainBackbuffer.backbuffers.emplace(key, CFramebuffer::Create(
m_Device, nameBuffer, &colorAttachment, &depthStencilAttachment)).first;
}
return it->second.get();
}
CTexture* CSwapChain::GetCurrentBackbufferTexture()
{
ENSURE(m_CurrentImageIndex != std::numeric_limits::max());
return m_Textures[m_CurrentImageIndex].get();
}
} // namespace Vulkan
} // namespace Backend
} // namespace Renderer
Index: ps/trunk/source/renderer/backend/vulkan/Texture.cpp
===================================================================
--- ps/trunk/source/renderer/backend/vulkan/Texture.cpp (revision 27838)
+++ ps/trunk/source/renderer/backend/vulkan/Texture.cpp (revision 27839)
@@ -1,371 +1,373 @@
/* Copyright (C) 2023 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* 0 A.D. is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with 0 A.D. If not, see .
*/
#include "precompiled.h"
#include "Texture.h"
#include "renderer/backend/vulkan/Device.h"
#include "renderer/backend/vulkan/Mapping.h"
#include "renderer/backend/vulkan/SamplerManager.h"
#include "renderer/backend/vulkan/Utilities.h"
namespace Renderer
{
namespace Backend
{
namespace Vulkan
{
// static
std::unique_ptr CTexture::Create(
CDevice* device, const char* name, const Type type, const uint32_t usage,
const Format format, const uint32_t width, const uint32_t height,
const Sampler::Desc& defaultSamplerDesc,
const uint32_t MIPLevelCount, const uint32_t sampleCount)
{
std::unique_ptr texture(new CTexture());
texture->m_Device = device;
texture->m_Format = format;
texture->m_Type = type;
texture->m_Usage = usage;
texture->m_Width = width;
texture->m_Height = height;
texture->m_MIPLevelCount = MIPLevelCount;
texture->m_SampleCount = sampleCount;
texture->m_LayerCount = type == ITexture::Type::TEXTURE_CUBE ? 6 : 1;
if (type == Type::TEXTURE_2D_MULTISAMPLE)
ENSURE(sampleCount > 1);
VkFormat imageFormat = VK_FORMAT_UNDEFINED;
// A8 and L8 are special cases for GL2.1, because it doesn't have a proper
// channel swizzling.
if (format == Format::A8_UNORM || format == Format::L8_UNORM)
imageFormat = VK_FORMAT_R8_UNORM;
else
imageFormat = Mapping::FromFormat(format);
texture->m_VkFormat = imageFormat;
VkImageType imageType = VK_IMAGE_TYPE_2D;
VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL;
const VkPhysicalDevice physicalDevice =
device->GetChoosenPhysicalDevice().device;
VkFormatProperties formatProperties{};
vkGetPhysicalDeviceFormatProperties(
physicalDevice, imageFormat, &formatProperties);
VkImageUsageFlags usageFlags = 0;
// Vulkan 1.0 implies that TRANSFER_SRC and TRANSFER_DST are supported.
// TODO: account Vulkan 1.1.
if (usage & Usage::TRANSFER_SRC)
usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
if (usage & Usage::TRANSFER_DST)
usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
if (usage & Usage::SAMPLED)
{
ENSURE(type != Type::TEXTURE_2D_MULTISAMPLE);
if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT))
{
LOGERROR("Format %d doesn't support sampling for optimal tiling.", static_cast(imageFormat));
return nullptr;
}
usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
}
if (usage & Usage::COLOR_ATTACHMENT)
{
ENSURE(device->IsFramebufferFormatSupported(format));
if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
{
LOGERROR("Format %d doesn't support color attachment for optimal tiling.", static_cast(imageFormat));
return nullptr;
}
usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
}
if (usage & Usage::DEPTH_STENCIL_ATTACHMENT)
{
ENSURE(IsDepthFormat(format));
if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
{
LOGERROR("Format %d doesn't support depth stencil attachment for optimal tiling.", static_cast(imageFormat));
return nullptr;
}
usageFlags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
}
if (IsDepthFormat(format))
{
texture->m_AttachmentImageAspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
texture->m_SamplerImageAspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
if (format == Format::D24_UNORM_S8_UINT || format == Format::D32_SFLOAT_S8_UINT)
texture->m_AttachmentImageAspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT;
}
else
{
texture->m_AttachmentImageAspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
texture->m_SamplerImageAspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
}
VkImageCreateInfo imageCreateInfo{};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.imageType = imageType;
imageCreateInfo.extent.width = width;
imageCreateInfo.extent.height = height;
imageCreateInfo.extent.depth = 1;
imageCreateInfo.mipLevels = MIPLevelCount;
imageCreateInfo.arrayLayers = type == Type::TEXTURE_CUBE ? 6 : 1;
imageCreateInfo.format = imageFormat;
imageCreateInfo.samples = Mapping::FromSampleCount(sampleCount);
imageCreateInfo.tiling = tiling;
imageCreateInfo.usage = usageFlags;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
if (type == Type::TEXTURE_CUBE)
imageCreateInfo.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
VmaAllocationCreateInfo allocationCreateInfo{};
if ((usage & Usage::COLOR_ATTACHMENT) || (usage & Usage::DEPTH_STENCIL_ATTACHMENT))
allocationCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
#ifndef NDEBUG
allocationCreateInfo.flags |= VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
allocationCreateInfo.pUserData = const_cast(name);
#endif
allocationCreateInfo.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
allocationCreateInfo.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
const VkResult createImageResult = vmaCreateImage(
device->GetVMAAllocator(), &imageCreateInfo, &allocationCreateInfo,
&texture->m_Image, &texture->m_Allocation, nullptr);
if (createImageResult != VK_SUCCESS)
{
- LOGERROR("Failed to create VkImage: %d", static_cast(createImageResult));
+ LOGERROR("Failed to create VkImage: %d (%s)",
+ static_cast(createImageResult), Utilities::GetVkResultName(createImageResult));
return nullptr;
}
VkImageViewCreateInfo imageViewCreateInfo{};
imageViewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imageViewCreateInfo.image = texture->m_Image;
imageViewCreateInfo.viewType = type == Type::TEXTURE_CUBE ? VK_IMAGE_VIEW_TYPE_CUBE : VK_IMAGE_VIEW_TYPE_2D;
imageViewCreateInfo.format = imageFormat;
imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
imageViewCreateInfo.subresourceRange.levelCount = MIPLevelCount;
imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
imageViewCreateInfo.subresourceRange.layerCount = type == Type::TEXTURE_CUBE ? 6 : 1;
if (format == Format::A8_UNORM)
{
imageViewCreateInfo.components.r = VK_COMPONENT_SWIZZLE_ZERO;
imageViewCreateInfo.components.g = VK_COMPONENT_SWIZZLE_ZERO;
imageViewCreateInfo.components.b = VK_COMPONENT_SWIZZLE_ZERO;
imageViewCreateInfo.components.a = VK_COMPONENT_SWIZZLE_R;
}
else if (format == Format::L8_UNORM)
{
imageViewCreateInfo.components.r = VK_COMPONENT_SWIZZLE_R;
imageViewCreateInfo.components.g = VK_COMPONENT_SWIZZLE_R;
imageViewCreateInfo.components.b = VK_COMPONENT_SWIZZLE_R;
imageViewCreateInfo.components.a = VK_COMPONENT_SWIZZLE_ONE;
}
else
{
imageViewCreateInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
imageViewCreateInfo.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
imageViewCreateInfo.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
imageViewCreateInfo.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
}
if ((usage & Usage::COLOR_ATTACHMENT) || (usage & Usage::DEPTH_STENCIL_ATTACHMENT))
{
imageViewCreateInfo.subresourceRange.aspectMask = texture->m_AttachmentImageAspectMask;
ENSURE_VK_SUCCESS(vkCreateImageView(
device->GetVkDevice(), &imageViewCreateInfo, nullptr, &texture->m_AttachmentImageView));
}
if (usage & Usage::SAMPLED)
{
imageViewCreateInfo.subresourceRange.aspectMask = texture->m_SamplerImageAspectMask;
ENSURE_VK_SUCCESS(vkCreateImageView(
device->GetVkDevice(), &imageViewCreateInfo, nullptr, &texture->m_SamplerImageView));
texture->m_Sampler = device->GetSamplerManager().GetOrCreateSampler(
defaultSamplerDesc);
texture->m_IsCompareEnabled = defaultSamplerDesc.compareEnabled;
}
device->SetObjectName(VK_OBJECT_TYPE_IMAGE, texture->m_Image, name);
if (texture->m_AttachmentImageView != VK_NULL_HANDLE)
device->SetObjectName(VK_OBJECT_TYPE_IMAGE_VIEW, texture->m_AttachmentImageView, name);
if (texture->m_SamplerImageView != VK_NULL_HANDLE)
device->SetObjectName(VK_OBJECT_TYPE_IMAGE_VIEW, texture->m_SamplerImageView, name);
return texture;
}
// static
std::unique_ptr CTexture::WrapBackbufferImage(
CDevice* device, const char* name, const VkImage image, const VkFormat format,
const VkImageUsageFlags usage, const uint32_t width, const uint32_t height)
{
std::unique_ptr texture(new CTexture());
texture->m_Device = device;
if (format == VK_FORMAT_R8G8B8A8_UNORM)
texture->m_Format = Format::R8G8B8A8_UNORM;
else if (format == VK_FORMAT_B8G8R8A8_UNORM)
texture->m_Format = Format::B8G8R8A8_UNORM;
else
texture->m_Format = Format::UNDEFINED;
texture->m_Type = Type::TEXTURE_2D;
if (usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)
texture->m_Usage |= Usage::COLOR_ATTACHMENT;
if (usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)
texture->m_Usage |= Usage::TRANSFER_SRC;
if (usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)
texture->m_Usage |= Usage::TRANSFER_DST;
texture->m_Width = width;
texture->m_Height = height;
texture->m_MIPLevelCount = 1;
texture->m_SampleCount = 1;
texture->m_LayerCount = 1;
texture->m_VkFormat = format;
// The image is owned by its swapchain, but we don't set a special flag
// because the ownership is detected by m_Allocation presence.
texture->m_Image = image;
texture->m_AttachmentImageAspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
texture->m_SamplerImageAspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
VkImageViewCreateInfo imageViewCreateInfo{};
imageViewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imageViewCreateInfo.image = image;
imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
imageViewCreateInfo.format = format;
imageViewCreateInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
imageViewCreateInfo.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
imageViewCreateInfo.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
imageViewCreateInfo.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
imageViewCreateInfo.subresourceRange.levelCount = 1;
imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
imageViewCreateInfo.subresourceRange.layerCount = 1;
ENSURE_VK_SUCCESS(vkCreateImageView(
device->GetVkDevice(), &imageViewCreateInfo, nullptr, &texture->m_AttachmentImageView));
device->SetObjectName(VK_OBJECT_TYPE_IMAGE_VIEW, texture->m_AttachmentImageView, name);
return texture;
}
// static
std::unique_ptr CTexture::CreateReadback(
CDevice* device, const char* name, const Format format,
const uint32_t width, const uint32_t height)
{
std::unique_ptr texture(new CTexture());
texture->m_Device = device;
texture->m_Format = format;
texture->m_Type = Type::TEXTURE_2D;
texture->m_Usage = Usage::TRANSFER_DST;
texture->m_Width = width;
texture->m_Height = height;
texture->m_MIPLevelCount = 1;
texture->m_SampleCount = 1;
texture->m_LayerCount = 1;
texture->m_VkFormat = Mapping::FromFormat(texture->m_Format);
texture->m_AttachmentImageAspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
texture->m_SamplerImageAspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
VkImageCreateInfo imageCreateInfo{};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.extent.width = width;
imageCreateInfo.extent.height = height;
imageCreateInfo.extent.depth = 1;
imageCreateInfo.mipLevels = 1;
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.format = texture->m_VkFormat;
imageCreateInfo.samples = Mapping::FromSampleCount(1);
imageCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VmaAllocationCreateInfo allocationCreateInfo{};
allocationCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
#ifndef NDEBUG
allocationCreateInfo.flags |= VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
allocationCreateInfo.pUserData = const_cast(name);
#endif
allocationCreateInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
allocationCreateInfo.usage = VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
const VkResult createImageResult = vmaCreateImage(
device->GetVMAAllocator(), &imageCreateInfo, &allocationCreateInfo,
&texture->m_Image, &texture->m_Allocation, &texture->m_AllocationInfo);
if (createImageResult != VK_SUCCESS)
{
- LOGERROR("Failed to create VkImage: %d", static_cast(createImageResult));
+ LOGERROR("Failed to create VkImage: %d (%s)",
+ static_cast(createImageResult), Utilities::GetVkResultName(createImageResult));
return nullptr;
}
if (!texture->m_AllocationInfo.pMappedData)
{
LOGERROR("Failed to map readback image.");
return nullptr;
}
device->SetObjectName(VK_OBJECT_TYPE_IMAGE, texture->m_Image, name);
return texture;
}
CTexture::CTexture()
{
static uint32_t m_LastAvailableUID = 1;
m_UID = m_LastAvailableUID++;
}
CTexture::~CTexture()
{
if (m_AttachmentImageView != VK_NULL_HANDLE)
m_Device->ScheduleObjectToDestroy(
VK_OBJECT_TYPE_IMAGE_VIEW, m_AttachmentImageView, VK_NULL_HANDLE);
if (m_SamplerImageView != VK_NULL_HANDLE)
m_Device->ScheduleObjectToDestroy(
VK_OBJECT_TYPE_IMAGE_VIEW, m_SamplerImageView, VK_NULL_HANDLE);
if (m_Allocation != VK_NULL_HANDLE)
m_Device->ScheduleObjectToDestroy(
VK_OBJECT_TYPE_IMAGE, m_Image, m_Allocation);
m_Device->ScheduleTextureToDestroy(m_UID);
}
IDevice* CTexture::GetDevice()
{
return m_Device;
}
} // namespace Vulkan
} // namespace Backend
} // namespace Renderer
Index: ps/trunk/source/renderer/backend/vulkan/Utilities.cpp
===================================================================
--- ps/trunk/source/renderer/backend/vulkan/Utilities.cpp (revision 27838)
+++ ps/trunk/source/renderer/backend/vulkan/Utilities.cpp (revision 27839)
@@ -1,170 +1,209 @@
/* Copyright (C) 2023 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* 0 A.D. is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with 0 A.D. If not, see .
*/
#include "precompiled.h"
#include "Utilities.h"
#include "lib/code_annotation.h"
#include "lib/config2.h"
#include "renderer/backend/vulkan/Buffer.h"
#include "renderer/backend/vulkan/Texture.h"
namespace Renderer
{
namespace Backend
{
namespace Vulkan
{
namespace Utilities
{
void SetTextureLayout(
VkCommandBuffer commandBuffer, CTexture* texture,
const VkImageLayout oldLayout, const VkImageLayout newLayout,
const VkAccessFlags srcAccessMask, const VkAccessFlags dstAccessMask,
const VkPipelineStageFlags srcStageMask, const VkPipelineStageFlags dstStageMask)
{
ENSURE(texture->GetMIPLevelCount() == 1);
ENSURE(texture->GetLayerCount() == 1);
VkImageMemoryBarrier imageMemoryBarrier{};
imageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imageMemoryBarrier.image = texture->GetImage();
imageMemoryBarrier.srcAccessMask = srcAccessMask;
imageMemoryBarrier.dstAccessMask = dstAccessMask;
imageMemoryBarrier.oldLayout = oldLayout;
imageMemoryBarrier.newLayout = newLayout;
imageMemoryBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrier.subresourceRange.aspectMask = texture->GetAttachmentImageAspectMask();
imageMemoryBarrier.subresourceRange.baseMipLevel = 0;
imageMemoryBarrier.subresourceRange.levelCount = texture->GetMIPLevelCount();
imageMemoryBarrier.subresourceRange.baseArrayLayer = 0;
imageMemoryBarrier.subresourceRange.layerCount = texture->GetLayerCount();
vkCmdPipelineBarrier(commandBuffer,
srcStageMask, dstStageMask, 0,
0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
texture->SetInitialized();
}
void SubmitImageMemoryBarrier(
VkCommandBuffer commandBuffer, VkImage image, const uint32_t level, const uint32_t layer,
const VkAccessFlags srcAccessMask, const VkAccessFlags dstAccessMask,
const VkImageLayout oldLayout, const VkImageLayout newLayout,
const VkPipelineStageFlags srcStageMask, const VkPipelineStageFlags dstStageMask,
const VkImageAspectFlags aspectMask)
{
VkImageMemoryBarrier imageMemoryBarrier{};
imageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imageMemoryBarrier.image = image;
imageMemoryBarrier.srcAccessMask = srcAccessMask;
imageMemoryBarrier.dstAccessMask = dstAccessMask;
imageMemoryBarrier.oldLayout = oldLayout;
imageMemoryBarrier.newLayout = newLayout;
imageMemoryBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrier.subresourceRange.aspectMask = aspectMask;
imageMemoryBarrier.subresourceRange.baseMipLevel = level;
imageMemoryBarrier.subresourceRange.levelCount = 1;
imageMemoryBarrier.subresourceRange.baseArrayLayer = layer;
imageMemoryBarrier.subresourceRange.layerCount = 1;
vkCmdPipelineBarrier(commandBuffer,
srcStageMask, dstStageMask, 0,
0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
}
void SubmitBufferMemoryBarrier(
VkCommandBuffer commandBuffer, CBuffer* buffer,
const VkDeviceSize offset, const VkDeviceSize size,
const VkAccessFlags srcAccessMask, const VkAccessFlags dstAccessMask,
const VkPipelineStageFlags srcStageMask, const VkPipelineStageFlags dstStageMask)
{
VkBufferMemoryBarrier bufferMemoryBarrier{};
bufferMemoryBarrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
bufferMemoryBarrier.srcAccessMask = srcAccessMask;
bufferMemoryBarrier.dstAccessMask = dstAccessMask;
bufferMemoryBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
bufferMemoryBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
bufferMemoryBarrier.buffer = buffer->GetVkBuffer();
bufferMemoryBarrier.offset = offset;
bufferMemoryBarrier.size = size;
vkCmdPipelineBarrier(
commandBuffer, srcStageMask, dstStageMask, 0,
0, nullptr, 1, &bufferMemoryBarrier, 0, nullptr);
}
void SubmitMemoryBarrier(
VkCommandBuffer commandBuffer,
const VkAccessFlags srcAccessMask, const VkAccessFlags dstAccessMask,
const VkPipelineStageFlags srcStageMask, const VkPipelineStageFlags dstStageMask)
{
VkMemoryBarrier memoryBarrier{};
memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
memoryBarrier.srcAccessMask = srcAccessMask;
memoryBarrier.dstAccessMask = dstAccessMask;
vkCmdPipelineBarrier(
commandBuffer, srcStageMask, dstStageMask, 0,
1, &memoryBarrier, 0, nullptr, 0, nullptr);
}
void SubmitPipelineBarrier(
VkCommandBuffer commandBuffer,
const VkPipelineStageFlags srcStageMask, const VkPipelineStageFlags dstStageMask)
{
vkCmdPipelineBarrier(
commandBuffer, srcStageMask, dstStageMask, 0,
0, nullptr, 0, nullptr, 0, nullptr);
}
void SubmitDebugSyncMemoryBarrier(VkCommandBuffer commandBuffer)
{
const VkAccessFlags accessMask =
VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
VK_ACCESS_INDEX_READ_BIT |
VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
VK_ACCESS_UNIFORM_READ_BIT |
VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
VK_ACCESS_SHADER_READ_BIT |
VK_ACCESS_SHADER_WRITE_BIT |
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_TRANSFER_READ_BIT |
VK_ACCESS_TRANSFER_WRITE_BIT |
VK_ACCESS_HOST_READ_BIT |
VK_ACCESS_HOST_WRITE_BIT;
SubmitMemoryBarrier(
commandBuffer, accessMask, accessMask,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
}
+const char* GetVkResultName(const VkResult result)
+{
+#define CASE(NAME) case NAME: return #NAME
+ switch (result)
+ {
+ CASE(VK_SUCCESS);
+ CASE(VK_NOT_READY);
+ CASE(VK_TIMEOUT);
+ CASE(VK_EVENT_SET);
+ CASE(VK_EVENT_RESET);
+ CASE(VK_INCOMPLETE);
+ CASE(VK_ERROR_OUT_OF_HOST_MEMORY);
+ CASE(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ CASE(VK_ERROR_INITIALIZATION_FAILED);
+ CASE(VK_ERROR_DEVICE_LOST);
+ CASE(VK_ERROR_MEMORY_MAP_FAILED);
+ CASE(VK_ERROR_LAYER_NOT_PRESENT);
+ CASE(VK_ERROR_EXTENSION_NOT_PRESENT);
+ CASE(VK_ERROR_FEATURE_NOT_PRESENT);
+ CASE(VK_ERROR_INCOMPATIBLE_DRIVER);
+ CASE(VK_ERROR_TOO_MANY_OBJECTS);
+ CASE(VK_ERROR_FORMAT_NOT_SUPPORTED);
+ CASE(VK_ERROR_FRAGMENTED_POOL);
+ CASE(VK_ERROR_UNKNOWN);
+ CASE(VK_ERROR_OUT_OF_POOL_MEMORY);
+ CASE(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ CASE(VK_ERROR_FRAGMENTATION);
+ CASE(VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS);
+ CASE(VK_ERROR_SURFACE_LOST_KHR);
+ CASE(VK_ERROR_NATIVE_WINDOW_IN_USE_KHR);
+ CASE(VK_SUBOPTIMAL_KHR);
+ CASE(VK_ERROR_OUT_OF_DATE_KHR);
+ default:
+ break;
+ }
+#undef CASE
+ return "UNLISTED";
+}
+
} // namespace Utilities
} // namespace Vulkan
} // namespace Backend
} // namespace Renderer
Index: ps/trunk/source/renderer/backend/vulkan/Utilities.h
===================================================================
--- ps/trunk/source/renderer/backend/vulkan/Utilities.h (revision 27838)
+++ ps/trunk/source/renderer/backend/vulkan/Utilities.h (revision 27839)
@@ -1,91 +1,93 @@
/* Copyright (C) 2023 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* 0 A.D. is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with 0 A.D. If not, see .
*/
#ifndef INCLUDED_RENDERER_BACKEND_VULKAN_UTILITIES
#define INCLUDED_RENDERER_BACKEND_VULKAN_UTILITIES
#include "ps/CStr.h"
#include
#define ENSURE_VK_SUCCESS(EXPR) \
do \
{ \
const VkResult result = (EXPR); \
if (result != VK_SUCCESS) \
{ \
- LOGERROR(#EXPR " returned %d instead of VK_SUCCESS", static_cast(result)); \
+ LOGERROR(#EXPR " returned %d (%s) instead of VK_SUCCESS", static_cast(result), Utilities::GetVkResultName(result)); \
ENSURE(false && #EXPR); \
} \
} while (0)
namespace Renderer
{
namespace Backend
{
namespace Vulkan
{
class CBuffer;
class CTexture;
namespace Utilities
{
// https://github.com/KhronosGroup/Vulkan-Docs/wiki/Synchronization-Examples-(Legacy-synchronization-APIs)
void SetTextureLayout(
VkCommandBuffer commandBuffer, CTexture* texture,
const VkImageLayout oldLayout, const VkImageLayout newLayout,
const VkAccessFlags srcAccessMask, const VkAccessFlags dstAccessMask,
const VkPipelineStageFlags srcStageMask, const VkPipelineStageFlags dstStageMask);
void SubmitImageMemoryBarrier(
VkCommandBuffer commandBuffer, VkImage image, const uint32_t level, const uint32_t layer,
const VkAccessFlags srcAccessMask, const VkAccessFlags dstAccessMask,
const VkImageLayout oldLayout, const VkImageLayout newLayout,
const VkPipelineStageFlags srcStageMask, const VkPipelineStageFlags dstStageMask,
const VkImageAspectFlags aspectMask = VK_IMAGE_ASPECT_COLOR_BIT);
void SubmitBufferMemoryBarrier(
VkCommandBuffer commandBuffer, CBuffer* buffer,
const VkDeviceSize offset, const VkDeviceSize size,
const VkAccessFlags srcAccessMask, const VkAccessFlags dstAccessMask,
const VkPipelineStageFlags srcStageMask, const VkPipelineStageFlags dstStageMask);
void SubmitMemoryBarrier(
VkCommandBuffer commandBuffer,
const VkAccessFlags srcAccessMask, const VkAccessFlags dstAccessMask,
const VkPipelineStageFlags srcStageMask, const VkPipelineStageFlags dstStageMask);
void SubmitPipelineBarrier(
VkCommandBuffer commandBuffer,
const VkPipelineStageFlags srcStageMask, const VkPipelineStageFlags dstStageMask);
void SubmitDebugSyncMemoryBarrier(VkCommandBuffer commandBuffer);
+const char* GetVkResultName(const VkResult result);
+
} // namespace Utilities
} // namespace Vulkan
} // namespace Backend
} // namespace Renderer
#endif // INCLUDED_RENDERER_BACKEND_VULKAN_UTILITIES