suyu/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp

186 lines
8.4 KiB
C++
Raw Normal View History

// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <vector>
2021-02-17 04:59:28 +01:00
#include <boost/container/small_vector.hpp>
#include "video_core/renderer_vulkan/pipeline_helper.h"
2021-02-17 04:59:28 +01:00
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
2021-02-17 04:59:28 +01:00
ComputePipeline::ComputePipeline(const Device& device_, DescriptorPool& descriptor_pool,
2021-02-17 04:59:28 +01:00
VKUpdateDescriptorQueue& update_descriptor_queue_,
Common::ThreadWorker* thread_worker, const Shader::Info& info_,
vk::ShaderModule spv_module_)
: device{device_}, update_descriptor_queue{update_descriptor_queue_}, info{info_},
spv_module(std::move(spv_module_)) {
auto func{[this, &descriptor_pool] {
DescriptorLayoutBuilder builder{device.GetLogical()};
builder.Add(info, VK_SHADER_STAGE_COMPUTE_BIT);
descriptor_set_layout = builder.CreateDescriptorSetLayout();
pipeline_layout = builder.CreatePipelineLayout(*descriptor_set_layout);
descriptor_update_template =
builder.CreateTemplate(*descriptor_set_layout, *pipeline_layout);
descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, info);
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
.pNext = nullptr,
.requiredSubgroupSize = GuestWarpSize,
};
pipeline = device.GetLogical().CreateComputePipeline({
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = device.IsExtSubgroupSizeControlSupported() ? &subgroup_size_ci : nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
.module = *spv_module,
.pName = "main",
.pSpecializationInfo = nullptr,
},
.layout = *pipeline_layout,
.basePipelineHandle = 0,
.basePipelineIndex = 0,
});
std::lock_guard lock{build_mutex};
is_built = true;
build_condvar.notify_one();
}};
if (thread_worker) {
thread_worker->QueueWork(std::move(func));
} else {
func();
}
}
2021-02-17 04:59:28 +01:00
void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
Tegra::MemoryManager& gpu_memory, VKScheduler& scheduler,
BufferCache& buffer_cache, TextureCache& texture_cache) {
update_descriptor_queue.Acquire();
2021-02-20 07:30:13 +01:00
buffer_cache.SetEnabledComputeUniformBuffers(info.constant_buffer_mask);
2021-02-17 04:59:28 +01:00
buffer_cache.UnbindComputeStorageBuffers();
size_t ssbo_index{};
2021-02-17 04:59:28 +01:00
for (const auto& desc : info.storage_buffers_descriptors) {
ASSERT(desc.count == 1);
buffer_cache.BindComputeStorageBuffer(ssbo_index, desc.cbuf_index, desc.cbuf_offset,
desc.is_written);
++ssbo_index;
2021-02-17 04:59:28 +01:00
}
texture_cache.SynchronizeComputeDescriptors();
static constexpr size_t max_elements = 64;
std::array<ImageId, max_elements> image_view_ids;
boost::container::static_vector<u32, max_elements> image_view_indices;
boost::container::static_vector<VkSampler, max_elements> samplers;
2021-04-21 00:48:45 +02:00
const auto& qmd{kepler_compute.launch_description};
const auto& cbufs{qmd.const_buffer_config};
const bool via_header_index{qmd.linked_tsc != 0};
2021-04-22 21:17:59 +02:00
const auto read_handle{[&](const auto& desc, u32 index) {
2021-04-21 00:48:45 +02:00
ASSERT(((qmd.const_buffer_enable_mask >> desc.cbuf_index) & 1) != 0);
2021-04-22 21:17:59 +02:00
const u32 index_offset{index << desc.size_shift};
const u32 offset{desc.cbuf_offset + index_offset};
const GPUVAddr addr{cbufs[desc.cbuf_index].Address() + offset};
2021-04-21 00:48:45 +02:00
if constexpr (std::is_same_v<decltype(desc), const Shader::TextureDescriptor&> ||
std::is_same_v<decltype(desc), const Shader::TextureBufferDescriptor&>) {
if (desc.has_secondary) {
ASSERT(((qmd.const_buffer_enable_mask >> desc.secondary_cbuf_index) & 1) != 0);
2021-04-22 21:17:59 +02:00
const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
2021-04-21 00:48:45 +02:00
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
2021-04-22 21:17:59 +02:00
secondary_offset};
2021-04-21 00:48:45 +02:00
const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
2021-04-22 21:17:59 +02:00
return TextureHandle{lhs_raw | rhs_raw, via_header_index};
2021-04-21 00:48:45 +02:00
}
}
return TextureHandle{gpu_memory.Read<u32>(addr), via_header_index};
2021-04-06 07:56:15 +02:00
}};
2021-04-15 02:36:36 +02:00
const auto add_image{[&](const auto& desc) {
2021-04-22 21:17:59 +02:00
for (u32 index = 0; index < desc.count; ++index) {
const TextureHandle handle{read_handle(desc, index)};
image_view_indices.push_back(handle.image);
}
2021-04-15 02:36:36 +02:00
}};
std::ranges::for_each(info.texture_buffer_descriptors, add_image);
std::ranges::for_each(info.image_buffer_descriptors, add_image);
2021-04-06 07:56:15 +02:00
for (const auto& desc : info.texture_descriptors) {
2021-04-22 21:17:59 +02:00
for (u32 index = 0; index < desc.count; ++index) {
const TextureHandle handle{read_handle(desc, index)};
image_view_indices.push_back(handle.image);
2021-04-22 21:17:59 +02:00
Sampler* const sampler = texture_cache.GetComputeSampler(handle.sampler);
samplers.push_back(sampler->Handle());
}
}
2021-04-15 02:36:36 +02:00
std::ranges::for_each(info.image_descriptors, add_image);
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
texture_cache.FillComputeImageViews(indices_span, image_view_ids);
buffer_cache.UnbindComputeTextureBuffers();
ImageId* texture_buffer_ids{image_view_ids.data()};
size_t index{};
2021-04-15 02:36:36 +02:00
const auto add_buffer{[&](const auto& desc) {
for (u32 i = 0; i < desc.count; ++i) {
2021-04-22 21:17:59 +02:00
bool is_written{false};
if constexpr (std::is_same_v<decltype(desc), const Shader::ImageBufferDescriptor&>) {
is_written = desc.is_written;
}
ImageView& image_view = texture_cache.GetImageView(*texture_buffer_ids);
buffer_cache.BindComputeTextureBuffer(index, image_view.GpuAddr(),
image_view.BufferSize(), image_view.format,
is_written);
++texture_buffer_ids;
++index;
2021-04-15 02:36:36 +02:00
}
}};
std::ranges::for_each(info.texture_buffer_descriptors, add_buffer);
std::ranges::for_each(info.image_buffer_descriptors, add_buffer);
buffer_cache.UpdateComputeBuffers();
buffer_cache.BindHostComputeBuffers();
const VkSampler* samplers_it{samplers.data()};
const ImageId* views_it{image_view_ids.data()};
PushImageDescriptors(info, samplers_it, views_it, texture_cache, update_descriptor_queue);
if (!is_built.load(std::memory_order::relaxed)) {
// Wait for the pipeline to be built
scheduler.Record([this](vk::CommandBuffer) {
std::unique_lock lock{build_mutex};
build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
});
}
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
scheduler.Record([this, descriptor_data](vk::CommandBuffer cmdbuf) {
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
if (!descriptor_set_layout) {
return;
}
const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
const vk::Device& dev{device.GetLogical()};
dev.UpdateDescriptorSet(descriptor_set, *descriptor_update_template, descriptor_data);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0,
descriptor_set, nullptr);
});
2021-02-17 04:59:28 +01:00
}
} // namespace Vulkan