vk_shader_decompiler: Implement indexed textures
Implement accessing textures through an index. It uses the same interface as OpenGL, the main difference is that Vulkan bindings are forced to be arrayed (the binding index doesn't change for stacked textures in SPIR-V).
This commit is contained in:
parent
1dda77d392
commit
1e9213632a
6 changed files with 100 additions and 55 deletions
|
@ -73,7 +73,7 @@ UniqueDescriptorUpdateTemplate VKComputePipeline::CreateDescriptorUpdateTemplate
|
|||
std::vector<vk::DescriptorUpdateTemplateEntry> template_entries;
|
||||
u32 binding = 0;
|
||||
u32 offset = 0;
|
||||
FillDescriptorUpdateTemplateEntries(device, entries, binding, offset, template_entries);
|
||||
FillDescriptorUpdateTemplateEntries(entries, binding, offset, template_entries);
|
||||
if (template_entries.empty()) {
|
||||
// If the shader doesn't use descriptor sets, skip template creation.
|
||||
return UniqueDescriptorUpdateTemplate{};
|
||||
|
|
|
@ -97,8 +97,7 @@ UniqueDescriptorUpdateTemplate VKGraphicsPipeline::CreateDescriptorUpdateTemplat
|
|||
u32 offset = 0;
|
||||
for (const auto& stage : program) {
|
||||
if (stage) {
|
||||
FillDescriptorUpdateTemplateEntries(device, stage->entries, binding, offset,
|
||||
template_entries);
|
||||
FillDescriptorUpdateTemplateEntries(stage->entries, binding, offset, template_entries);
|
||||
}
|
||||
}
|
||||
if (template_entries.empty()) {
|
||||
|
|
|
@ -36,6 +36,13 @@ using Tegra::Engines::ShaderType;
|
|||
|
||||
namespace {
|
||||
|
||||
// C++20's using enum
|
||||
constexpr auto eUniformBuffer = vk::DescriptorType::eUniformBuffer;
|
||||
constexpr auto eStorageBuffer = vk::DescriptorType::eStorageBuffer;
|
||||
constexpr auto eUniformTexelBuffer = vk::DescriptorType::eUniformTexelBuffer;
|
||||
constexpr auto eCombinedImageSampler = vk::DescriptorType::eCombinedImageSampler;
|
||||
constexpr auto eStorageImage = vk::DescriptorType::eStorageImage;
|
||||
|
||||
constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
|
||||
VideoCommon::Shader::CompileDepth::FullDecompile};
|
||||
|
||||
|
@ -119,23 +126,32 @@ ShaderType GetShaderType(Maxwell::ShaderProgram program) {
|
|||
}
|
||||
}
|
||||
|
||||
template <vk::DescriptorType descriptor_type, class Container>
|
||||
void AddBindings(std::vector<vk::DescriptorSetLayoutBinding>& bindings, u32& binding,
|
||||
vk::ShaderStageFlags stage_flags, const Container& container) {
|
||||
const u32 num_entries = static_cast<u32>(std::size(container));
|
||||
for (std::size_t i = 0; i < num_entries; ++i) {
|
||||
u32 count = 1;
|
||||
if constexpr (descriptor_type == eCombinedImageSampler) {
|
||||
// Combined image samplers can be arrayed.
|
||||
count = container[i].Size();
|
||||
}
|
||||
bindings.emplace_back(binding++, descriptor_type, count, stage_flags, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
u32 FillDescriptorLayout(const ShaderEntries& entries,
|
||||
std::vector<vk::DescriptorSetLayoutBinding>& bindings,
|
||||
Maxwell::ShaderProgram program_type, u32 base_binding) {
|
||||
const ShaderType stage = GetStageFromProgram(program_type);
|
||||
const vk::ShaderStageFlags stage_flags = MaxwellToVK::ShaderStage(stage);
|
||||
const vk::ShaderStageFlags flags = MaxwellToVK::ShaderStage(stage);
|
||||
|
||||
u32 binding = base_binding;
|
||||
const auto AddBindings = [&](vk::DescriptorType descriptor_type, std::size_t num_entries) {
|
||||
for (std::size_t i = 0; i < num_entries; ++i) {
|
||||
bindings.emplace_back(binding++, descriptor_type, 1, stage_flags, nullptr);
|
||||
}
|
||||
};
|
||||
AddBindings(vk::DescriptorType::eUniformBuffer, entries.const_buffers.size());
|
||||
AddBindings(vk::DescriptorType::eStorageBuffer, entries.global_buffers.size());
|
||||
AddBindings(vk::DescriptorType::eUniformTexelBuffer, entries.texel_buffers.size());
|
||||
AddBindings(vk::DescriptorType::eCombinedImageSampler, entries.samplers.size());
|
||||
AddBindings(vk::DescriptorType::eStorageImage, entries.images.size());
|
||||
AddBindings<eUniformBuffer>(bindings, binding, flags, entries.const_buffers);
|
||||
AddBindings<eStorageBuffer>(bindings, binding, flags, entries.global_buffers);
|
||||
AddBindings<eUniformTexelBuffer>(bindings, binding, flags, entries.texel_buffers);
|
||||
AddBindings<eCombinedImageSampler>(bindings, binding, flags, entries.samplers);
|
||||
AddBindings<eStorageImage>(bindings, binding, flags, entries.images);
|
||||
return binding;
|
||||
}
|
||||
|
||||
|
@ -361,32 +377,45 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
|
|||
return {std::move(program), std::move(bindings)};
|
||||
}
|
||||
|
||||
void FillDescriptorUpdateTemplateEntries(
|
||||
const VKDevice& device, const ShaderEntries& entries, u32& binding, u32& offset,
|
||||
std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries) {
|
||||
static constexpr auto entry_size = static_cast<u32>(sizeof(DescriptorUpdateEntry));
|
||||
const auto AddEntry = [&](vk::DescriptorType descriptor_type, std::size_t count_) {
|
||||
const u32 count = static_cast<u32>(count_);
|
||||
if (descriptor_type == vk::DescriptorType::eUniformTexelBuffer &&
|
||||
device.GetDriverID() == vk::DriverIdKHR::eNvidiaProprietary) {
|
||||
// Nvidia has a bug where updating multiple uniform texels at once causes the driver to
|
||||
// crash.
|
||||
for (u32 i = 0; i < count; ++i) {
|
||||
template_entries.emplace_back(binding + i, 0, 1, descriptor_type,
|
||||
offset + i * entry_size, entry_size);
|
||||
}
|
||||
} else if (count != 0) {
|
||||
template_entries.emplace_back(binding, 0, count, descriptor_type, offset, entry_size);
|
||||
}
|
||||
offset += count * entry_size;
|
||||
binding += count;
|
||||
};
|
||||
template <vk::DescriptorType descriptor_type, class Container>
|
||||
void AddEntry(std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries, u32& binding,
|
||||
u32& offset, const Container& container) {
|
||||
static constexpr u32 entry_size = static_cast<u32>(sizeof(DescriptorUpdateEntry));
|
||||
const u32 count = static_cast<u32>(std::size(container));
|
||||
|
||||
AddEntry(vk::DescriptorType::eUniformBuffer, entries.const_buffers.size());
|
||||
AddEntry(vk::DescriptorType::eStorageBuffer, entries.global_buffers.size());
|
||||
AddEntry(vk::DescriptorType::eUniformTexelBuffer, entries.texel_buffers.size());
|
||||
AddEntry(vk::DescriptorType::eCombinedImageSampler, entries.samplers.size());
|
||||
AddEntry(vk::DescriptorType::eStorageImage, entries.images.size());
|
||||
if constexpr (descriptor_type == eCombinedImageSampler) {
|
||||
for (u32 i = 0; i < count; ++i) {
|
||||
const u32 num_samplers = container[i].Size();
|
||||
template_entries.emplace_back(binding, 0, num_samplers, descriptor_type, offset,
|
||||
entry_size);
|
||||
++binding;
|
||||
offset += num_samplers * entry_size;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if constexpr (descriptor_type == eUniformTexelBuffer) {
|
||||
// Nvidia has a bug where updating multiple uniform texels at once causes the driver to
|
||||
// crash.
|
||||
for (u32 i = 0; i < count; ++i) {
|
||||
template_entries.emplace_back(binding + i, 0, 1, descriptor_type,
|
||||
offset + i * entry_size, entry_size);
|
||||
}
|
||||
} else if (count > 0) {
|
||||
template_entries.emplace_back(binding, 0, count, descriptor_type, offset, entry_size);
|
||||
}
|
||||
offset += count * entry_size;
|
||||
binding += count;
|
||||
}
|
||||
|
||||
void FillDescriptorUpdateTemplateEntries(
|
||||
const ShaderEntries& entries, u32& binding, u32& offset,
|
||||
std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries) {
|
||||
AddEntry<eUniformBuffer>(template_entries, offset, binding, entries.const_buffers);
|
||||
AddEntry<eStorageBuffer>(template_entries, offset, binding, entries.global_buffers);
|
||||
AddEntry<eUniformTexelBuffer>(template_entries, offset, binding, entries.texel_buffers);
|
||||
AddEntry<eCombinedImageSampler>(template_entries, offset, binding, entries.samplers);
|
||||
AddEntry<eStorageImage>(template_entries, offset, binding, entries.images);
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
|
|
@ -194,7 +194,7 @@ private:
|
|||
};
|
||||
|
||||
void FillDescriptorUpdateTemplateEntries(
|
||||
const VKDevice& device, const ShaderEntries& entries, u32& binding, u32& offset,
|
||||
const ShaderEntries& entries, u32& binding, u32& offset,
|
||||
std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries);
|
||||
|
||||
} // namespace Vulkan
|
||||
|
|
|
@ -105,17 +105,20 @@ void TransitionImages(const std::vector<ImageView>& views, vk::PipelineStageFlag
|
|||
|
||||
template <typename Engine, typename Entry>
|
||||
Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
|
||||
std::size_t stage) {
|
||||
std::size_t stage, std::size_t index = 0) {
|
||||
const auto stage_type = static_cast<Tegra::Engines::ShaderType>(stage);
|
||||
if (entry.IsBindless()) {
|
||||
const Tegra::Texture::TextureHandle tex_handle =
|
||||
engine.AccessConstBuffer32(stage_type, entry.GetBuffer(), entry.GetOffset());
|
||||
return engine.GetTextureInfo(tex_handle);
|
||||
}
|
||||
const auto& gpu_profile = engine.AccessGuestDriverProfile();
|
||||
const u32 entry_offset = static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
|
||||
const u32 offset = entry.GetOffset() + entry_offset;
|
||||
if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
|
||||
return engine.GetStageTexture(stage_type, entry.GetOffset());
|
||||
return engine.GetStageTexture(stage_type, offset);
|
||||
} else {
|
||||
return engine.GetTexture(entry.GetOffset());
|
||||
return engine.GetTexture(offset);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -835,8 +838,10 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
|
|||
MICROPROFILE_SCOPE(Vulkan_Textures);
|
||||
const auto& gpu = system.GPU().Maxwell3D();
|
||||
for (const auto& entry : entries.samplers) {
|
||||
const auto texture = GetTextureInfo(gpu, entry, stage);
|
||||
SetupTexture(texture, entry);
|
||||
for (std::size_t i = 0; i < entry.Size(); ++i) {
|
||||
const auto texture = GetTextureInfo(gpu, entry, stage, i);
|
||||
SetupTexture(texture, entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -885,8 +890,10 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
|
|||
MICROPROFILE_SCOPE(Vulkan_Textures);
|
||||
const auto& gpu = system.GPU().KeplerCompute();
|
||||
for (const auto& entry : entries.samplers) {
|
||||
const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex);
|
||||
SetupTexture(texture, entry);
|
||||
for (std::size_t i = 0; i < entry.Size(); ++i) {
|
||||
const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex, i);
|
||||
SetupTexture(texture, entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -69,8 +69,9 @@ struct TexelBuffer {
|
|||
|
||||
struct SampledImage {
|
||||
Id image_type{};
|
||||
Id sampled_image_type{};
|
||||
Id sampler{};
|
||||
Id sampler_type{};
|
||||
Id sampler_pointer_type{};
|
||||
Id variable{};
|
||||
};
|
||||
|
||||
struct StorageImage {
|
||||
|
@ -833,16 +834,20 @@ private:
|
|||
constexpr int sampled = 1;
|
||||
constexpr auto format = spv::ImageFormat::Unknown;
|
||||
const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format);
|
||||
const Id sampled_image_type = TypeSampledImage(image_type);
|
||||
const Id pointer_type =
|
||||
TypePointer(spv::StorageClass::UniformConstant, sampled_image_type);
|
||||
const Id sampler_type = TypeSampledImage(image_type);
|
||||
const Id sampler_pointer_type =
|
||||
TypePointer(spv::StorageClass::UniformConstant, sampler_type);
|
||||
const Id type = sampler.IsIndexed()
|
||||
? TypeArray(sampler_type, Constant(t_uint, sampler.Size()))
|
||||
: sampler_type;
|
||||
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, type);
|
||||
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
|
||||
AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex())));
|
||||
Decorate(id, spv::Decoration::Binding, binding++);
|
||||
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
|
||||
|
||||
sampled_images.emplace(sampler.GetIndex(),
|
||||
SampledImage{image_type, sampled_image_type, id});
|
||||
sampled_images.emplace(sampler.GetIndex(), SampledImage{image_type, sampler_type,
|
||||
sampler_pointer_type, id});
|
||||
}
|
||||
return binding;
|
||||
}
|
||||
|
@ -1525,7 +1530,12 @@ private:
|
|||
ASSERT(!meta.sampler.IsBuffer());
|
||||
|
||||
const auto& entry = sampled_images.at(meta.sampler.GetIndex());
|
||||
return OpLoad(entry.sampled_image_type, entry.sampler);
|
||||
Id sampler = entry.variable;
|
||||
if (meta.sampler.IsIndexed()) {
|
||||
const Id index = AsInt(Visit(meta.index));
|
||||
sampler = OpAccessChain(entry.sampler_pointer_type, sampler, index);
|
||||
}
|
||||
return OpLoad(entry.sampler_type, sampler);
|
||||
}
|
||||
|
||||
Id GetTextureImage(Operation operation) {
|
||||
|
|
Loading…
Reference in a new issue