1
0
Fork 0
forked from suyu/suyu

vk_compute_pipeline: Make use of designated initializers where applicable

This commit is contained in:
Lioncash 2020-07-16 17:32:12 -04:00
parent 757ddd8158
commit 5330ca396d

View file

@ -43,12 +43,13 @@ vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) { const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) {
// TODO(Rodrigo): Maybe make individual bindings here? // TODO(Rodrigo): Maybe make individual bindings here?
for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) { for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
VkDescriptorSetLayoutBinding& entry = bindings.emplace_back(); bindings.push_back({
entry.binding = binding++; .binding = binding++,
entry.descriptorType = descriptor_type; .descriptorType = descriptor_type,
entry.descriptorCount = 1; .descriptorCount = 1,
entry.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
entry.pImmutableSamplers = nullptr; .pImmutableSamplers = nullptr,
});
} }
}; };
add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size()); add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
@ -58,25 +59,25 @@ vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size()); add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size()); add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
VkDescriptorSetLayoutCreateInfo ci; return device.GetLogical().CreateDescriptorSetLayout({
ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
ci.pNext = nullptr; .pNext = nullptr,
ci.flags = 0; .flags = 0,
ci.bindingCount = static_cast<u32>(bindings.size()); .bindingCount = static_cast<u32>(bindings.size()),
ci.pBindings = bindings.data(); .pBindings = bindings.data(),
return device.GetLogical().CreateDescriptorSetLayout(ci); });
} }
vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const { vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const {
VkPipelineLayoutCreateInfo ci; return device.GetLogical().CreatePipelineLayout({
ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
ci.pNext = nullptr; .pNext = nullptr,
ci.flags = 0; .flags = 0,
ci.setLayoutCount = 1; .setLayoutCount = 1,
ci.pSetLayouts = descriptor_set_layout.address(); .pSetLayouts = descriptor_set_layout.address(),
ci.pushConstantRangeCount = 0; .pushConstantRangeCount = 0,
ci.pPushConstantRanges = nullptr; .pPushConstantRanges = nullptr,
return device.GetLogical().CreatePipelineLayout(ci); });
} }
vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const { vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const {
@ -89,59 +90,63 @@ vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplat
return {}; return {};
} }
VkDescriptorUpdateTemplateCreateInfoKHR ci; return device.GetLogical().CreateDescriptorUpdateTemplateKHR({
ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR; .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
ci.pNext = nullptr; .pNext = nullptr,
ci.flags = 0; .flags = 0,
ci.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size()); .descriptorUpdateEntryCount = static_cast<u32>(template_entries.size()),
ci.pDescriptorUpdateEntries = template_entries.data(); .pDescriptorUpdateEntries = template_entries.data(),
ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR; .templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
ci.descriptorSetLayout = *descriptor_set_layout; .descriptorSetLayout = *descriptor_set_layout,
ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
ci.pipelineLayout = *layout; .pipelineLayout = *layout,
ci.set = DESCRIPTOR_SET; .set = DESCRIPTOR_SET,
return device.GetLogical().CreateDescriptorUpdateTemplateKHR(ci); });
} }
vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const { vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
device.SaveShader(code); device.SaveShader(code);
VkShaderModuleCreateInfo ci; return device.GetLogical().CreateShaderModule({
ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
ci.pNext = nullptr; .pNext = nullptr,
ci.flags = 0; .flags = 0,
ci.codeSize = code.size() * sizeof(u32); .codeSize = code.size() * sizeof(u32),
ci.pCode = code.data(); .pCode = code.data(),
return device.GetLogical().CreateShaderModule(ci); });
} }
vk::Pipeline VKComputePipeline::CreatePipeline() const { vk::Pipeline VKComputePipeline::CreatePipeline() const {
VkComputePipelineCreateInfo ci;
VkPipelineShaderStageCreateInfo& stage_ci = ci.stage;
stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
stage_ci.pNext = nullptr;
stage_ci.flags = 0;
stage_ci.stage = VK_SHADER_STAGE_COMPUTE_BIT;
stage_ci.module = *shader_module;
stage_ci.pName = "main";
stage_ci.pSpecializationInfo = nullptr;
VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci; VkComputePipelineCreateInfo ci{
subgroup_size_ci.sType = .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT; .pNext = nullptr,
subgroup_size_ci.pNext = nullptr; .flags = 0,
subgroup_size_ci.requiredSubgroupSize = GuestWarpSize; .stage =
{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
.module = *shader_module,
.pName = "main",
.pSpecializationInfo = nullptr,
},
.layout = *layout,
.basePipelineHandle = nullptr,
.basePipelineIndex = 0,
};
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
.pNext = nullptr,
.requiredSubgroupSize = GuestWarpSize,
};
if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) { if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) {
stage_ci.pNext = &subgroup_size_ci; ci.stage.pNext = &subgroup_size_ci;
} }
ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
ci.pNext = nullptr;
ci.flags = 0;
ci.layout = *layout;
ci.basePipelineHandle = nullptr;
ci.basePipelineIndex = 0;
return device.GetLogical().CreateComputePipeline(ci); return device.GetLogical().CreateComputePipeline(ci);
} }