spirv: Rework storage buffers and shader memory
This commit is contained in:
parent
c070991def
commit
fa75b9b062
9 changed files with 581 additions and 500 deletions
|
@ -15,7 +15,7 @@
|
||||||
|
|
||||||
namespace Shader::Backend::SPIRV {
|
namespace Shader::Backend::SPIRV {
|
||||||
namespace {
|
namespace {
|
||||||
enum class CasFunctionType {
|
enum class Operation {
|
||||||
Increment,
|
Increment,
|
||||||
Decrement,
|
Decrement,
|
||||||
FPAdd,
|
FPAdd,
|
||||||
|
@ -23,44 +23,11 @@ enum class CasFunctionType {
|
||||||
FPMax,
|
FPMax,
|
||||||
};
|
};
|
||||||
|
|
||||||
Id CasFunction(EmitContext& ctx, CasFunctionType function_type, Id value_type) {
|
struct AttrInfo {
|
||||||
const Id func_type{ctx.TypeFunction(value_type, value_type, value_type)};
|
Id pointer;
|
||||||
const Id func{ctx.OpFunction(value_type, spv::FunctionControlMask::MaskNone, func_type)};
|
Id id;
|
||||||
const Id op_a{ctx.OpFunctionParameter(value_type)};
|
bool needs_cast;
|
||||||
const Id op_b{ctx.OpFunctionParameter(value_type)};
|
};
|
||||||
ctx.AddLabel();
|
|
||||||
Id result{};
|
|
||||||
switch (function_type) {
|
|
||||||
case CasFunctionType::Increment: {
|
|
||||||
const Id pred{ctx.OpUGreaterThanEqual(ctx.U1, op_a, op_b)};
|
|
||||||
const Id incr{ctx.OpIAdd(value_type, op_a, ctx.Constant(value_type, 1))};
|
|
||||||
result = ctx.OpSelect(value_type, pred, ctx.u32_zero_value, incr);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case CasFunctionType::Decrement: {
|
|
||||||
const Id lhs{ctx.OpIEqual(ctx.U1, op_a, ctx.Constant(value_type, 0u))};
|
|
||||||
const Id rhs{ctx.OpUGreaterThan(ctx.U1, op_a, op_b)};
|
|
||||||
const Id pred{ctx.OpLogicalOr(ctx.U1, lhs, rhs)};
|
|
||||||
const Id decr{ctx.OpISub(value_type, op_a, ctx.Constant(value_type, 1))};
|
|
||||||
result = ctx.OpSelect(value_type, pred, op_b, decr);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case CasFunctionType::FPAdd:
|
|
||||||
result = ctx.OpFAdd(value_type, op_a, op_b);
|
|
||||||
break;
|
|
||||||
case CasFunctionType::FPMin:
|
|
||||||
result = ctx.OpFMin(value_type, op_a, op_b);
|
|
||||||
break;
|
|
||||||
case CasFunctionType::FPMax:
|
|
||||||
result = ctx.OpFMax(value_type, op_a, op_b);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
ctx.OpReturnValue(result);
|
|
||||||
ctx.OpFunctionEnd();
|
|
||||||
return func;
|
|
||||||
}
|
|
||||||
|
|
||||||
Id ImageType(EmitContext& ctx, const TextureDescriptor& desc) {
|
Id ImageType(EmitContext& ctx, const TextureDescriptor& desc) {
|
||||||
const spv::ImageFormat format{spv::ImageFormat::Unknown};
|
const spv::ImageFormat format{spv::ImageFormat::Unknown};
|
||||||
|
@ -182,12 +149,6 @@ Id GetAttributeType(EmitContext& ctx, AttributeType type) {
|
||||||
throw InvalidArgument("Invalid attribute type {}", type);
|
throw InvalidArgument("Invalid attribute type {}", type);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct AttrInfo {
|
|
||||||
Id pointer;
|
|
||||||
Id id;
|
|
||||||
bool needs_cast;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::optional<AttrInfo> AttrTypes(EmitContext& ctx, u32 index) {
|
std::optional<AttrInfo> AttrTypes(EmitContext& ctx, u32 index) {
|
||||||
const AttributeType type{ctx.profile.generic_input_types.at(index)};
|
const AttributeType type{ctx.profile.generic_input_types.at(index)};
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
@ -203,6 +164,164 @@ std::optional<AttrInfo> AttrTypes(EmitContext& ctx, u32 index) {
|
||||||
throw InvalidArgument("Invalid attribute type {}", type);
|
throw InvalidArgument("Invalid attribute type {}", type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void DefineConstBuffers(EmitContext& ctx, const Info& info, Id UniformDefinitions::*member_type,
|
||||||
|
u32 binding, Id type, char type_char, u32 element_size) {
|
||||||
|
const Id array_type{ctx.TypeArray(type, ctx.Constant(ctx.U32[1], 65536U / element_size))};
|
||||||
|
ctx.Decorate(array_type, spv::Decoration::ArrayStride, element_size);
|
||||||
|
|
||||||
|
const Id struct_type{ctx.TypeStruct(array_type)};
|
||||||
|
ctx.Name(struct_type, fmt::format("cbuf_block_{}{}", type_char, element_size * CHAR_BIT));
|
||||||
|
ctx.Decorate(struct_type, spv::Decoration::Block);
|
||||||
|
ctx.MemberName(struct_type, 0, "data");
|
||||||
|
ctx.MemberDecorate(struct_type, 0, spv::Decoration::Offset, 0U);
|
||||||
|
|
||||||
|
const Id struct_pointer_type{ctx.TypePointer(spv::StorageClass::Uniform, struct_type)};
|
||||||
|
const Id uniform_type{ctx.TypePointer(spv::StorageClass::Uniform, type)};
|
||||||
|
ctx.uniform_types.*member_type = uniform_type;
|
||||||
|
|
||||||
|
for (const ConstantBufferDescriptor& desc : info.constant_buffer_descriptors) {
|
||||||
|
const Id id{ctx.AddGlobalVariable(struct_pointer_type, spv::StorageClass::Uniform)};
|
||||||
|
ctx.Decorate(id, spv::Decoration::Binding, binding);
|
||||||
|
ctx.Decorate(id, spv::Decoration::DescriptorSet, 0U);
|
||||||
|
ctx.Name(id, fmt::format("c{}", desc.index));
|
||||||
|
for (size_t i = 0; i < desc.count; ++i) {
|
||||||
|
ctx.cbufs[desc.index + i].*member_type = id;
|
||||||
|
}
|
||||||
|
if (ctx.profile.supported_spirv >= 0x00010400) {
|
||||||
|
ctx.interfaces.push_back(id);
|
||||||
|
}
|
||||||
|
binding += desc.count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DefineSsbos(EmitContext& ctx, StorageTypeDefinition& type_def,
|
||||||
|
Id StorageDefinitions::*member_type, const Info& info, u32 binding, Id type,
|
||||||
|
u32 stride) {
|
||||||
|
const Id array_type{ctx.TypeRuntimeArray(type)};
|
||||||
|
ctx.Decorate(array_type, spv::Decoration::ArrayStride, stride);
|
||||||
|
|
||||||
|
const Id struct_type{ctx.TypeStruct(array_type)};
|
||||||
|
ctx.Decorate(struct_type, spv::Decoration::Block);
|
||||||
|
ctx.MemberDecorate(struct_type, 0, spv::Decoration::Offset, 0U);
|
||||||
|
|
||||||
|
const Id struct_pointer{ctx.TypePointer(spv::StorageClass::StorageBuffer, struct_type)};
|
||||||
|
type_def.array = struct_pointer;
|
||||||
|
type_def.element = ctx.TypePointer(spv::StorageClass::StorageBuffer, type);
|
||||||
|
|
||||||
|
u32 index{};
|
||||||
|
for (const StorageBufferDescriptor& desc : info.storage_buffers_descriptors) {
|
||||||
|
const Id id{ctx.AddGlobalVariable(struct_pointer, spv::StorageClass::StorageBuffer)};
|
||||||
|
ctx.Decorate(id, spv::Decoration::Binding, binding);
|
||||||
|
ctx.Decorate(id, spv::Decoration::DescriptorSet, 0U);
|
||||||
|
ctx.Name(id, fmt::format("ssbo{}", index));
|
||||||
|
if (ctx.profile.supported_spirv >= 0x00010400) {
|
||||||
|
ctx.interfaces.push_back(id);
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < desc.count; ++i) {
|
||||||
|
ctx.ssbos[index + i].*member_type = id;
|
||||||
|
}
|
||||||
|
index += desc.count;
|
||||||
|
binding += desc.count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Id CasFunction(EmitContext& ctx, Operation operation, Id value_type) {
|
||||||
|
const Id func_type{ctx.TypeFunction(value_type, value_type, value_type)};
|
||||||
|
const Id func{ctx.OpFunction(value_type, spv::FunctionControlMask::MaskNone, func_type)};
|
||||||
|
const Id op_a{ctx.OpFunctionParameter(value_type)};
|
||||||
|
const Id op_b{ctx.OpFunctionParameter(value_type)};
|
||||||
|
ctx.AddLabel();
|
||||||
|
Id result{};
|
||||||
|
switch (operation) {
|
||||||
|
case Operation::Increment: {
|
||||||
|
const Id pred{ctx.OpUGreaterThanEqual(ctx.U1, op_a, op_b)};
|
||||||
|
const Id incr{ctx.OpIAdd(value_type, op_a, ctx.Constant(value_type, 1))};
|
||||||
|
result = ctx.OpSelect(value_type, pred, ctx.u32_zero_value, incr);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case Operation::Decrement: {
|
||||||
|
const Id lhs{ctx.OpIEqual(ctx.U1, op_a, ctx.Constant(value_type, 0u))};
|
||||||
|
const Id rhs{ctx.OpUGreaterThan(ctx.U1, op_a, op_b)};
|
||||||
|
const Id pred{ctx.OpLogicalOr(ctx.U1, lhs, rhs)};
|
||||||
|
const Id decr{ctx.OpISub(value_type, op_a, ctx.Constant(value_type, 1))};
|
||||||
|
result = ctx.OpSelect(value_type, pred, op_b, decr);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case Operation::FPAdd:
|
||||||
|
result = ctx.OpFAdd(value_type, op_a, op_b);
|
||||||
|
break;
|
||||||
|
case Operation::FPMin:
|
||||||
|
result = ctx.OpFMin(value_type, op_a, op_b);
|
||||||
|
break;
|
||||||
|
case Operation::FPMax:
|
||||||
|
result = ctx.OpFMax(value_type, op_a, op_b);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ctx.OpReturnValue(result);
|
||||||
|
ctx.OpFunctionEnd();
|
||||||
|
return func;
|
||||||
|
}
|
||||||
|
|
||||||
|
Id CasLoop(EmitContext& ctx, Operation operation, Id array_pointer, Id element_pointer,
|
||||||
|
Id value_type, Id memory_type, spv::Scope scope) {
|
||||||
|
const bool is_shared{scope == spv::Scope::Workgroup};
|
||||||
|
const bool is_struct{!is_shared || ctx.profile.support_explicit_workgroup_layout};
|
||||||
|
const Id cas_func{CasFunction(ctx, operation, value_type)};
|
||||||
|
const Id zero{ctx.u32_zero_value};
|
||||||
|
const Id scope_id{ctx.Constant(ctx.U32[1], static_cast<u32>(scope))};
|
||||||
|
|
||||||
|
const Id loop_header{ctx.OpLabel()};
|
||||||
|
const Id continue_block{ctx.OpLabel()};
|
||||||
|
const Id merge_block{ctx.OpLabel()};
|
||||||
|
const Id func_type{is_shared
|
||||||
|
? ctx.TypeFunction(value_type, ctx.U32[1], value_type)
|
||||||
|
: ctx.TypeFunction(value_type, ctx.U32[1], value_type, array_pointer)};
|
||||||
|
|
||||||
|
const Id func{ctx.OpFunction(value_type, spv::FunctionControlMask::MaskNone, func_type)};
|
||||||
|
const Id index{ctx.OpFunctionParameter(ctx.U32[1])};
|
||||||
|
const Id op_b{ctx.OpFunctionParameter(value_type)};
|
||||||
|
const Id base{is_shared ? ctx.shared_memory_u32 : ctx.OpFunctionParameter(array_pointer)};
|
||||||
|
ctx.AddLabel();
|
||||||
|
ctx.OpBranch(loop_header);
|
||||||
|
ctx.AddLabel(loop_header);
|
||||||
|
|
||||||
|
ctx.OpLoopMerge(merge_block, continue_block, spv::LoopControlMask::MaskNone);
|
||||||
|
ctx.OpBranch(continue_block);
|
||||||
|
|
||||||
|
ctx.AddLabel(continue_block);
|
||||||
|
const Id word_pointer{is_struct ? ctx.OpAccessChain(element_pointer, base, zero, index)
|
||||||
|
: ctx.OpAccessChain(element_pointer, base, index)};
|
||||||
|
if (value_type.value == ctx.F32[2].value) {
|
||||||
|
const Id u32_value{ctx.OpLoad(ctx.U32[1], word_pointer)};
|
||||||
|
const Id value{ctx.OpUnpackHalf2x16(ctx.F32[2], u32_value)};
|
||||||
|
const Id new_value{ctx.OpFunctionCall(value_type, cas_func, value, op_b)};
|
||||||
|
const Id u32_new_value{ctx.OpPackHalf2x16(ctx.U32[1], new_value)};
|
||||||
|
const Id atomic_res{ctx.OpAtomicCompareExchange(ctx.U32[1], word_pointer, scope_id, zero,
|
||||||
|
zero, u32_new_value, u32_value)};
|
||||||
|
const Id success{ctx.OpIEqual(ctx.U1, atomic_res, u32_value)};
|
||||||
|
ctx.OpBranchConditional(success, merge_block, loop_header);
|
||||||
|
|
||||||
|
ctx.AddLabel(merge_block);
|
||||||
|
ctx.OpReturnValue(ctx.OpUnpackHalf2x16(ctx.F32[2], atomic_res));
|
||||||
|
} else {
|
||||||
|
const Id value{ctx.OpLoad(memory_type, word_pointer)};
|
||||||
|
const bool matching_type{value_type.value == memory_type.value};
|
||||||
|
const Id bitcast_value{matching_type ? value : ctx.OpBitcast(value_type, value)};
|
||||||
|
const Id cal_res{ctx.OpFunctionCall(value_type, cas_func, bitcast_value, op_b)};
|
||||||
|
const Id new_value{matching_type ? cal_res : ctx.OpBitcast(memory_type, cal_res)};
|
||||||
|
const Id atomic_res{ctx.OpAtomicCompareExchange(ctx.U32[1], word_pointer, scope_id, zero,
|
||||||
|
zero, new_value, value)};
|
||||||
|
const Id success{ctx.OpIEqual(ctx.U1, atomic_res, value)};
|
||||||
|
ctx.OpBranchConditional(success, merge_block, loop_header);
|
||||||
|
|
||||||
|
ctx.AddLabel(merge_block);
|
||||||
|
ctx.OpReturnValue(ctx.OpBitcast(value_type, atomic_res));
|
||||||
|
}
|
||||||
|
ctx.OpFunctionEnd();
|
||||||
|
return func;
|
||||||
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name) {
|
void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name) {
|
||||||
|
@ -226,6 +345,7 @@ EmitContext::EmitContext(const Profile& profile_, IR::Program& program, u32& bin
|
||||||
DefineInterfaces(program.info);
|
DefineInterfaces(program.info);
|
||||||
DefineLocalMemory(program);
|
DefineLocalMemory(program);
|
||||||
DefineSharedMemory(program);
|
DefineSharedMemory(program);
|
||||||
|
DefineSharedMemoryFunctions(program);
|
||||||
DefineConstantBuffers(program.info, binding);
|
DefineConstantBuffers(program.info, binding);
|
||||||
DefineStorageBuffers(program.info, binding);
|
DefineStorageBuffers(program.info, binding);
|
||||||
DefineTextureBuffers(program.info, binding);
|
DefineTextureBuffers(program.info, binding);
|
||||||
|
@ -263,56 +383,6 @@ Id EmitContext::Def(const IR::Value& value) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitContext::CasLoop(Id function, CasPointerType pointer_type, Id value_type) {
|
|
||||||
const Id loop_header{OpLabel()};
|
|
||||||
const Id continue_block{OpLabel()};
|
|
||||||
const Id merge_block{OpLabel()};
|
|
||||||
const Id storage_type{pointer_type == CasPointerType::Shared ? shared_memory_u32_type
|
|
||||||
: storage_memory_u32};
|
|
||||||
const Id func_type{TypeFunction(value_type, U32[1], value_type, storage_type)};
|
|
||||||
const Id func{OpFunction(value_type, spv::FunctionControlMask::MaskNone, func_type)};
|
|
||||||
const Id index{OpFunctionParameter(U32[1])};
|
|
||||||
const Id op_b{OpFunctionParameter(value_type)};
|
|
||||||
const Id base{OpFunctionParameter(storage_type)};
|
|
||||||
AddLabel();
|
|
||||||
const Id one{Constant(U32[1], 1)};
|
|
||||||
OpBranch(loop_header);
|
|
||||||
AddLabel(loop_header);
|
|
||||||
OpLoopMerge(merge_block, continue_block, spv::LoopControlMask::MaskNone);
|
|
||||||
OpBranch(continue_block);
|
|
||||||
|
|
||||||
AddLabel(continue_block);
|
|
||||||
const Id word_pointer{pointer_type == CasPointerType::Shared
|
|
||||||
? OpAccessChain(shared_u32, base, index)
|
|
||||||
: OpAccessChain(storage_u32, base, u32_zero_value, index)};
|
|
||||||
if (value_type.value == F32[2].value) {
|
|
||||||
const Id u32_value{OpLoad(U32[1], word_pointer)};
|
|
||||||
const Id value{OpUnpackHalf2x16(F32[2], u32_value)};
|
|
||||||
const Id new_value{OpFunctionCall(value_type, function, value, op_b)};
|
|
||||||
const Id u32_new_value{OpPackHalf2x16(U32[1], new_value)};
|
|
||||||
const Id atomic_res{OpAtomicCompareExchange(U32[1], word_pointer, one, u32_zero_value,
|
|
||||||
u32_zero_value, u32_new_value, u32_value)};
|
|
||||||
const Id success{OpIEqual(U1, atomic_res, u32_value)};
|
|
||||||
OpBranchConditional(success, merge_block, loop_header);
|
|
||||||
|
|
||||||
AddLabel(merge_block);
|
|
||||||
OpReturnValue(OpUnpackHalf2x16(F32[2], atomic_res));
|
|
||||||
} else {
|
|
||||||
const Id value{OpLoad(U32[1], word_pointer)};
|
|
||||||
const Id new_value{OpBitcast(
|
|
||||||
U32[1], OpFunctionCall(value_type, function, OpBitcast(value_type, value), op_b))};
|
|
||||||
const Id atomic_res{OpAtomicCompareExchange(U32[1], word_pointer, one, u32_zero_value,
|
|
||||||
u32_zero_value, new_value, value)};
|
|
||||||
const Id success{OpIEqual(U1, atomic_res, value)};
|
|
||||||
OpBranchConditional(success, merge_block, loop_header);
|
|
||||||
|
|
||||||
AddLabel(merge_block);
|
|
||||||
OpReturnValue(OpBitcast(value_type, atomic_res));
|
|
||||||
}
|
|
||||||
OpFunctionEnd();
|
|
||||||
return func;
|
|
||||||
}
|
|
||||||
|
|
||||||
void EmitContext::DefineCommonTypes(const Info& info) {
|
void EmitContext::DefineCommonTypes(const Info& info) {
|
||||||
void_id = TypeVoid();
|
void_id = TypeVoid();
|
||||||
|
|
||||||
|
@ -397,27 +467,31 @@ void EmitContext::DefineSharedMemory(const IR::Program& program) {
|
||||||
Decorate(variable, spv::Decoration::Aliased);
|
Decorate(variable, spv::Decoration::Aliased);
|
||||||
interfaces.push_back(variable);
|
interfaces.push_back(variable);
|
||||||
|
|
||||||
return std::make_pair(variable, element_pointer);
|
return std::make_tuple(variable, element_pointer, pointer);
|
||||||
}};
|
}};
|
||||||
if (profile.support_explicit_workgroup_layout) {
|
if (profile.support_explicit_workgroup_layout) {
|
||||||
AddExtension("SPV_KHR_workgroup_memory_explicit_layout");
|
AddExtension("SPV_KHR_workgroup_memory_explicit_layout");
|
||||||
AddCapability(spv::Capability::WorkgroupMemoryExplicitLayoutKHR);
|
AddCapability(spv::Capability::WorkgroupMemoryExplicitLayoutKHR);
|
||||||
if (program.info.uses_int8) {
|
if (program.info.uses_int8) {
|
||||||
AddCapability(spv::Capability::WorkgroupMemoryExplicitLayout8BitAccessKHR);
|
AddCapability(spv::Capability::WorkgroupMemoryExplicitLayout8BitAccessKHR);
|
||||||
std::tie(shared_memory_u8, shared_u8) = make(U8, 1);
|
std::tie(shared_memory_u8, shared_u8, std::ignore) = make(U8, 1);
|
||||||
}
|
}
|
||||||
if (program.info.uses_int16) {
|
if (program.info.uses_int16) {
|
||||||
AddCapability(spv::Capability::WorkgroupMemoryExplicitLayout16BitAccessKHR);
|
AddCapability(spv::Capability::WorkgroupMemoryExplicitLayout16BitAccessKHR);
|
||||||
std::tie(shared_memory_u16, shared_u16) = make(U16, 2);
|
std::tie(shared_memory_u16, shared_u16, std::ignore) = make(U16, 2);
|
||||||
}
|
}
|
||||||
std::tie(shared_memory_u32, shared_u32) = make(U32[1], 4);
|
if (program.info.uses_int64) {
|
||||||
std::tie(shared_memory_u32x2, shared_u32x2) = make(U32[2], 8);
|
std::tie(shared_memory_u64, shared_u64, std::ignore) = make(U64, 8);
|
||||||
std::tie(shared_memory_u32x4, shared_u32x4) = make(U32[4], 16);
|
}
|
||||||
|
std::tie(shared_memory_u32, shared_u32, shared_memory_u32_type) = make(U32[1], 4);
|
||||||
|
std::tie(shared_memory_u32x2, shared_u32x2, std::ignore) = make(U32[2], 8);
|
||||||
|
std::tie(shared_memory_u32x4, shared_u32x4, std::ignore) = make(U32[4], 16);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const u32 num_elements{Common::DivCeil(program.shared_memory_size, 4U)};
|
const u32 num_elements{Common::DivCeil(program.shared_memory_size, 4U)};
|
||||||
const Id type{TypeArray(U32[1], Constant(U32[1], num_elements))};
|
const Id type{TypeArray(U32[1], Constant(U32[1], num_elements))};
|
||||||
shared_memory_u32_type = TypePointer(spv::StorageClass::Workgroup, type);
|
shared_memory_u32_type = TypePointer(spv::StorageClass::Workgroup, type);
|
||||||
|
|
||||||
shared_u32 = TypePointer(spv::StorageClass::Workgroup, U32[1]);
|
shared_u32 = TypePointer(spv::StorageClass::Workgroup, U32[1]);
|
||||||
shared_memory_u32 = AddGlobalVariable(shared_memory_u32_type, spv::StorageClass::Workgroup);
|
shared_memory_u32 = AddGlobalVariable(shared_memory_u32_type, spv::StorageClass::Workgroup);
|
||||||
interfaces.push_back(shared_memory_u32);
|
interfaces.push_back(shared_memory_u32);
|
||||||
|
@ -463,13 +537,16 @@ void EmitContext::DefineSharedMemory(const IR::Program& program) {
|
||||||
if (program.info.uses_int16) {
|
if (program.info.uses_int16) {
|
||||||
shared_store_u16_func = make_function(16, 16);
|
shared_store_u16_func = make_function(16, 16);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void EmitContext::DefineSharedMemoryFunctions(const IR::Program& program) {
|
||||||
if (program.info.uses_shared_increment) {
|
if (program.info.uses_shared_increment) {
|
||||||
const Id inc_func{CasFunction(*this, CasFunctionType::Increment, U32[1])};
|
increment_cas_shared = CasLoop(*this, Operation::Increment, shared_memory_u32_type,
|
||||||
increment_cas_shared = CasLoop(inc_func, CasPointerType::Shared, U32[1]);
|
shared_u32, U32[1], U32[1], spv::Scope::Workgroup);
|
||||||
}
|
}
|
||||||
if (program.info.uses_shared_decrement) {
|
if (program.info.uses_shared_decrement) {
|
||||||
const Id dec_func{CasFunction(*this, CasFunctionType::Decrement, U32[1])};
|
decrement_cas_shared = CasLoop(*this, Operation::Decrement, shared_memory_u32_type,
|
||||||
decrement_cas_shared = CasLoop(dec_func, CasPointerType::Shared, U32[1]);
|
shared_u32, U32[1], U32[1], spv::Scope::Workgroup);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -628,21 +705,24 @@ void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (True(info.used_constant_buffer_types & IR::Type::U8)) {
|
if (True(info.used_constant_buffer_types & IR::Type::U8)) {
|
||||||
DefineConstantBuffers(info, &UniformDefinitions::U8, binding, U8, 'u', sizeof(u8));
|
DefineConstBuffers(*this, info, &UniformDefinitions::U8, binding, U8, 'u', sizeof(u8));
|
||||||
DefineConstantBuffers(info, &UniformDefinitions::S8, binding, S8, 's', sizeof(s8));
|
DefineConstBuffers(*this, info, &UniformDefinitions::S8, binding, S8, 's', sizeof(s8));
|
||||||
}
|
}
|
||||||
if (True(info.used_constant_buffer_types & IR::Type::U16)) {
|
if (True(info.used_constant_buffer_types & IR::Type::U16)) {
|
||||||
DefineConstantBuffers(info, &UniformDefinitions::U16, binding, U16, 'u', sizeof(u16));
|
DefineConstBuffers(*this, info, &UniformDefinitions::U16, binding, U16, 'u', sizeof(u16));
|
||||||
DefineConstantBuffers(info, &UniformDefinitions::S16, binding, S16, 's', sizeof(s16));
|
DefineConstBuffers(*this, info, &UniformDefinitions::S16, binding, S16, 's', sizeof(s16));
|
||||||
}
|
}
|
||||||
if (True(info.used_constant_buffer_types & IR::Type::U32)) {
|
if (True(info.used_constant_buffer_types & IR::Type::U32)) {
|
||||||
DefineConstantBuffers(info, &UniformDefinitions::U32, binding, U32[1], 'u', sizeof(u32));
|
DefineConstBuffers(*this, info, &UniformDefinitions::U32, binding, U32[1], 'u',
|
||||||
|
sizeof(u32));
|
||||||
}
|
}
|
||||||
if (True(info.used_constant_buffer_types & IR::Type::F32)) {
|
if (True(info.used_constant_buffer_types & IR::Type::F32)) {
|
||||||
DefineConstantBuffers(info, &UniformDefinitions::F32, binding, F32[1], 'f', sizeof(f32));
|
DefineConstBuffers(*this, info, &UniformDefinitions::F32, binding, F32[1], 'f',
|
||||||
|
sizeof(f32));
|
||||||
}
|
}
|
||||||
if (True(info.used_constant_buffer_types & IR::Type::U32x2)) {
|
if (True(info.used_constant_buffer_types & IR::Type::U32x2)) {
|
||||||
DefineConstantBuffers(info, &UniformDefinitions::U32x2, binding, U32[2], 'u', sizeof(u64));
|
DefineConstBuffers(*this, info, &UniformDefinitions::U32x2, binding, U32[2], 'u',
|
||||||
|
sizeof(u32[2]));
|
||||||
}
|
}
|
||||||
for (const ConstantBufferDescriptor& desc : info.constant_buffer_descriptors) {
|
for (const ConstantBufferDescriptor& desc : info.constant_buffer_descriptors) {
|
||||||
binding += desc.count;
|
binding += desc.count;
|
||||||
|
@ -655,75 +735,83 @@ void EmitContext::DefineStorageBuffers(const Info& info, u32& binding) {
|
||||||
}
|
}
|
||||||
AddExtension("SPV_KHR_storage_buffer_storage_class");
|
AddExtension("SPV_KHR_storage_buffer_storage_class");
|
||||||
|
|
||||||
const Id array_type{TypeRuntimeArray(U32[1])};
|
if (True(info.used_storage_buffer_types & IR::Type::U8)) {
|
||||||
Decorate(array_type, spv::Decoration::ArrayStride, 4U);
|
DefineSsbos(*this, storage_types.U8, &StorageDefinitions::U8, info, binding, U8,
|
||||||
|
sizeof(u8));
|
||||||
const Id struct_type{TypeStruct(array_type)};
|
DefineSsbos(*this, storage_types.S8, &StorageDefinitions::S8, info, binding, S8,
|
||||||
Name(struct_type, "ssbo_block");
|
sizeof(u8));
|
||||||
Decorate(struct_type, spv::Decoration::Block);
|
|
||||||
MemberName(struct_type, 0, "data");
|
|
||||||
MemberDecorate(struct_type, 0, spv::Decoration::Offset, 0U);
|
|
||||||
|
|
||||||
storage_memory_u32 = TypePointer(spv::StorageClass::StorageBuffer, struct_type);
|
|
||||||
storage_u32 = TypePointer(spv::StorageClass::StorageBuffer, U32[1]);
|
|
||||||
|
|
||||||
u32 index{};
|
|
||||||
for (const StorageBufferDescriptor& desc : info.storage_buffers_descriptors) {
|
|
||||||
const Id id{AddGlobalVariable(storage_memory_u32, spv::StorageClass::StorageBuffer)};
|
|
||||||
Decorate(id, spv::Decoration::Binding, binding);
|
|
||||||
Decorate(id, spv::Decoration::DescriptorSet, 0U);
|
|
||||||
Name(id, fmt::format("ssbo{}", index));
|
|
||||||
if (profile.supported_spirv >= 0x00010400) {
|
|
||||||
interfaces.push_back(id);
|
|
||||||
}
|
}
|
||||||
std::fill_n(ssbos.data() + index, desc.count, id);
|
if (True(info.used_storage_buffer_types & IR::Type::U16)) {
|
||||||
index += desc.count;
|
DefineSsbos(*this, storage_types.U16, &StorageDefinitions::U16, info, binding, U16,
|
||||||
|
sizeof(u16));
|
||||||
|
DefineSsbos(*this, storage_types.S16, &StorageDefinitions::S16, info, binding, S16,
|
||||||
|
sizeof(u16));
|
||||||
|
}
|
||||||
|
if (True(info.used_storage_buffer_types & IR::Type::U32)) {
|
||||||
|
DefineSsbos(*this, storage_types.U32, &StorageDefinitions::U32, info, binding, U32[1],
|
||||||
|
sizeof(u32));
|
||||||
|
}
|
||||||
|
if (True(info.used_storage_buffer_types & IR::Type::F32)) {
|
||||||
|
DefineSsbos(*this, storage_types.F32, &StorageDefinitions::F32, info, binding, F32[1],
|
||||||
|
sizeof(f32));
|
||||||
|
}
|
||||||
|
if (True(info.used_storage_buffer_types & IR::Type::U64)) {
|
||||||
|
DefineSsbos(*this, storage_types.U64, &StorageDefinitions::U64, info, binding, U64,
|
||||||
|
sizeof(u64));
|
||||||
|
}
|
||||||
|
if (True(info.used_storage_buffer_types & IR::Type::U32x2)) {
|
||||||
|
DefineSsbos(*this, storage_types.U32x2, &StorageDefinitions::U32x2, info, binding, U32[2],
|
||||||
|
sizeof(u32[2]));
|
||||||
|
}
|
||||||
|
if (True(info.used_storage_buffer_types & IR::Type::U32x4)) {
|
||||||
|
DefineSsbos(*this, storage_types.U32x4, &StorageDefinitions::U32x4, info, binding, U32[4],
|
||||||
|
sizeof(u32[4]));
|
||||||
|
}
|
||||||
|
for (const StorageBufferDescriptor& desc : info.storage_buffers_descriptors) {
|
||||||
binding += desc.count;
|
binding += desc.count;
|
||||||
}
|
}
|
||||||
if (info.uses_global_increment) {
|
const bool needs_function{
|
||||||
|
info.uses_global_increment || info.uses_global_decrement || info.uses_atomic_f32_add ||
|
||||||
|
info.uses_atomic_f16x2_add || info.uses_atomic_f16x2_min || info.uses_atomic_f16x2_max ||
|
||||||
|
info.uses_atomic_f32x2_add || info.uses_atomic_f32x2_min || info.uses_atomic_f32x2_max};
|
||||||
|
if (needs_function) {
|
||||||
AddCapability(spv::Capability::VariablePointersStorageBuffer);
|
AddCapability(spv::Capability::VariablePointersStorageBuffer);
|
||||||
const Id inc_func{CasFunction(*this, CasFunctionType::Increment, U32[1])};
|
}
|
||||||
increment_cas_ssbo = CasLoop(inc_func, CasPointerType::Ssbo, U32[1]);
|
if (info.uses_global_increment) {
|
||||||
|
increment_cas_ssbo = CasLoop(*this, Operation::Increment, storage_types.U32.array,
|
||||||
|
storage_types.U32.element, U32[1], U32[1], spv::Scope::Device);
|
||||||
}
|
}
|
||||||
if (info.uses_global_decrement) {
|
if (info.uses_global_decrement) {
|
||||||
AddCapability(spv::Capability::VariablePointersStorageBuffer);
|
decrement_cas_ssbo = CasLoop(*this, Operation::Decrement, storage_types.U32.array,
|
||||||
const Id dec_func{CasFunction(*this, CasFunctionType::Decrement, U32[1])};
|
storage_types.U32.element, U32[1], U32[1], spv::Scope::Device);
|
||||||
decrement_cas_ssbo = CasLoop(dec_func, CasPointerType::Ssbo, U32[1]);
|
|
||||||
}
|
}
|
||||||
if (info.uses_atomic_f32_add) {
|
if (info.uses_atomic_f32_add) {
|
||||||
AddCapability(spv::Capability::VariablePointersStorageBuffer);
|
f32_add_cas = CasLoop(*this, Operation::FPAdd, storage_types.U32.array,
|
||||||
const Id add_func{CasFunction(*this, CasFunctionType::FPAdd, F32[1])};
|
storage_types.U32.element, F32[1], U32[1], spv::Scope::Device);
|
||||||
f32_add_cas = CasLoop(add_func, CasPointerType::Ssbo, F32[1]);
|
|
||||||
}
|
}
|
||||||
if (info.uses_atomic_f16x2_add) {
|
if (info.uses_atomic_f16x2_add) {
|
||||||
AddCapability(spv::Capability::VariablePointersStorageBuffer);
|
f16x2_add_cas = CasLoop(*this, Operation::FPAdd, storage_types.U32.array,
|
||||||
const Id add_func{CasFunction(*this, CasFunctionType::FPAdd, F16[2])};
|
storage_types.U32.element, F16[2], F16[2], spv::Scope::Device);
|
||||||
f16x2_add_cas = CasLoop(add_func, CasPointerType::Ssbo, F16[2]);
|
|
||||||
}
|
}
|
||||||
if (info.uses_atomic_f16x2_min) {
|
if (info.uses_atomic_f16x2_min) {
|
||||||
AddCapability(spv::Capability::VariablePointersStorageBuffer);
|
f16x2_min_cas = CasLoop(*this, Operation::FPMin, storage_types.U32.array,
|
||||||
const Id func{CasFunction(*this, CasFunctionType::FPMin, F16[2])};
|
storage_types.U32.element, F16[2], F16[2], spv::Scope::Device);
|
||||||
f16x2_min_cas = CasLoop(func, CasPointerType::Ssbo, F16[2]);
|
|
||||||
}
|
}
|
||||||
if (info.uses_atomic_f16x2_max) {
|
if (info.uses_atomic_f16x2_max) {
|
||||||
AddCapability(spv::Capability::VariablePointersStorageBuffer);
|
f16x2_max_cas = CasLoop(*this, Operation::FPMax, storage_types.U32.array,
|
||||||
const Id func{CasFunction(*this, CasFunctionType::FPMax, F16[2])};
|
storage_types.U32.element, F16[2], F16[2], spv::Scope::Device);
|
||||||
f16x2_max_cas = CasLoop(func, CasPointerType::Ssbo, F16[2]);
|
|
||||||
}
|
}
|
||||||
if (info.uses_atomic_f32x2_add) {
|
if (info.uses_atomic_f32x2_add) {
|
||||||
AddCapability(spv::Capability::VariablePointersStorageBuffer);
|
f32x2_add_cas = CasLoop(*this, Operation::FPAdd, storage_types.U32.array,
|
||||||
const Id add_func{CasFunction(*this, CasFunctionType::FPAdd, F32[2])};
|
storage_types.U32.element, F32[2], F32[2], spv::Scope::Device);
|
||||||
f32x2_add_cas = CasLoop(add_func, CasPointerType::Ssbo, F32[2]);
|
|
||||||
}
|
}
|
||||||
if (info.uses_atomic_f32x2_min) {
|
if (info.uses_atomic_f32x2_min) {
|
||||||
AddCapability(spv::Capability::VariablePointersStorageBuffer);
|
f32x2_min_cas = CasLoop(*this, Operation::FPMin, storage_types.U32.array,
|
||||||
const Id func{CasFunction(*this, CasFunctionType::FPMin, F32[2])};
|
storage_types.U32.element, F32[2], F32[2], spv::Scope::Device);
|
||||||
f32x2_min_cas = CasLoop(func, CasPointerType::Ssbo, F32[2]);
|
|
||||||
}
|
}
|
||||||
if (info.uses_atomic_f32x2_max) {
|
if (info.uses_atomic_f32x2_max) {
|
||||||
AddCapability(spv::Capability::VariablePointersStorageBuffer);
|
f32x2_max_cas = CasLoop(*this, Operation::FPMax, storage_types.U32.array,
|
||||||
const Id func{CasFunction(*this, CasFunctionType::FPMax, F32[2])};
|
storage_types.U32.element, F32[2], F32[2], spv::Scope::Device);
|
||||||
f32x2_max_cas = CasLoop(func, CasPointerType::Ssbo, F32[2]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -903,36 +991,6 @@ void EmitContext::DefineInputs(const Info& info) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitContext::DefineConstantBuffers(const Info& info, Id UniformDefinitions::*member_type,
|
|
||||||
u32 binding, Id type, char type_char, u32 element_size) {
|
|
||||||
const Id array_type{TypeArray(type, Constant(U32[1], 65536U / element_size))};
|
|
||||||
Decorate(array_type, spv::Decoration::ArrayStride, element_size);
|
|
||||||
|
|
||||||
const Id struct_type{TypeStruct(array_type)};
|
|
||||||
Name(struct_type, fmt::format("cbuf_block_{}{}", type_char, element_size * CHAR_BIT));
|
|
||||||
Decorate(struct_type, spv::Decoration::Block);
|
|
||||||
MemberName(struct_type, 0, "data");
|
|
||||||
MemberDecorate(struct_type, 0, spv::Decoration::Offset, 0U);
|
|
||||||
|
|
||||||
const Id struct_pointer_type{TypePointer(spv::StorageClass::Uniform, struct_type)};
|
|
||||||
const Id uniform_type{TypePointer(spv::StorageClass::Uniform, type)};
|
|
||||||
uniform_types.*member_type = uniform_type;
|
|
||||||
|
|
||||||
for (const ConstantBufferDescriptor& desc : info.constant_buffer_descriptors) {
|
|
||||||
const Id id{AddGlobalVariable(struct_pointer_type, spv::StorageClass::Uniform)};
|
|
||||||
Decorate(id, spv::Decoration::Binding, binding);
|
|
||||||
Decorate(id, spv::Decoration::DescriptorSet, 0U);
|
|
||||||
Name(id, fmt::format("c{}", desc.index));
|
|
||||||
for (size_t i = 0; i < desc.count; ++i) {
|
|
||||||
cbufs[desc.index + i].*member_type = id;
|
|
||||||
}
|
|
||||||
if (profile.supported_spirv >= 0x00010400) {
|
|
||||||
interfaces.push_back(id);
|
|
||||||
}
|
|
||||||
binding += desc.count;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void EmitContext::DefineOutputs(const Info& info) {
|
void EmitContext::DefineOutputs(const Info& info) {
|
||||||
if (info.stores_position || stage == Stage::VertexB) {
|
if (info.stores_position || stage == Stage::VertexB) {
|
||||||
output_position = DefineOutput(*this, F32[4], spv::BuiltIn::Position);
|
output_position = DefineOutput(*this, F32[4], spv::BuiltIn::Position);
|
||||||
|
|
|
@ -50,6 +50,35 @@ struct UniformDefinitions {
|
||||||
Id U32x2{};
|
Id U32x2{};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct StorageTypeDefinition {
|
||||||
|
Id array{};
|
||||||
|
Id element{};
|
||||||
|
};
|
||||||
|
|
||||||
|
struct StorageTypeDefinitions {
|
||||||
|
StorageTypeDefinition U8{};
|
||||||
|
StorageTypeDefinition S8{};
|
||||||
|
StorageTypeDefinition U16{};
|
||||||
|
StorageTypeDefinition S16{};
|
||||||
|
StorageTypeDefinition U32{};
|
||||||
|
StorageTypeDefinition U64{};
|
||||||
|
StorageTypeDefinition F32{};
|
||||||
|
StorageTypeDefinition U32x2{};
|
||||||
|
StorageTypeDefinition U32x4{};
|
||||||
|
};
|
||||||
|
|
||||||
|
struct StorageDefinitions {
|
||||||
|
Id U8{};
|
||||||
|
Id S8{};
|
||||||
|
Id U16{};
|
||||||
|
Id S16{};
|
||||||
|
Id U32{};
|
||||||
|
Id F32{};
|
||||||
|
Id U64{};
|
||||||
|
Id U32x2{};
|
||||||
|
Id U32x4{};
|
||||||
|
};
|
||||||
|
|
||||||
class EmitContext final : public Sirit::Module {
|
class EmitContext final : public Sirit::Module {
|
||||||
public:
|
public:
|
||||||
explicit EmitContext(const Profile& profile, IR::Program& program, u32& binding);
|
explicit EmitContext(const Profile& profile, IR::Program& program, u32& binding);
|
||||||
|
@ -78,12 +107,14 @@ public:
|
||||||
Id f32_zero_value{};
|
Id f32_zero_value{};
|
||||||
|
|
||||||
UniformDefinitions uniform_types;
|
UniformDefinitions uniform_types;
|
||||||
|
StorageTypeDefinitions storage_types;
|
||||||
|
|
||||||
Id private_u32{};
|
Id private_u32{};
|
||||||
|
|
||||||
Id shared_u8{};
|
Id shared_u8{};
|
||||||
Id shared_u16{};
|
Id shared_u16{};
|
||||||
Id shared_u32{};
|
Id shared_u32{};
|
||||||
|
Id shared_u64{};
|
||||||
Id shared_u32x2{};
|
Id shared_u32x2{};
|
||||||
Id shared_u32x4{};
|
Id shared_u32x4{};
|
||||||
|
|
||||||
|
@ -93,14 +124,11 @@ public:
|
||||||
|
|
||||||
Id output_f32{};
|
Id output_f32{};
|
||||||
|
|
||||||
Id storage_u32{};
|
|
||||||
Id storage_memory_u32{};
|
|
||||||
|
|
||||||
Id image_buffer_type{};
|
Id image_buffer_type{};
|
||||||
Id sampled_texture_buffer_type{};
|
Id sampled_texture_buffer_type{};
|
||||||
|
|
||||||
std::array<UniformDefinitions, Info::MAX_CBUFS> cbufs{};
|
std::array<UniformDefinitions, Info::MAX_CBUFS> cbufs{};
|
||||||
std::array<Id, Info::MAX_SSBOS> ssbos{};
|
std::array<StorageDefinitions, Info::MAX_SSBOS> ssbos{};
|
||||||
std::vector<Id> texture_buffers;
|
std::vector<Id> texture_buffers;
|
||||||
std::vector<TextureDefinition> textures;
|
std::vector<TextureDefinition> textures;
|
||||||
std::vector<ImageDefinition> images;
|
std::vector<ImageDefinition> images;
|
||||||
|
@ -136,8 +164,10 @@ public:
|
||||||
Id shared_memory_u8{};
|
Id shared_memory_u8{};
|
||||||
Id shared_memory_u16{};
|
Id shared_memory_u16{};
|
||||||
Id shared_memory_u32{};
|
Id shared_memory_u32{};
|
||||||
|
Id shared_memory_u64{};
|
||||||
Id shared_memory_u32x2{};
|
Id shared_memory_u32x2{};
|
||||||
Id shared_memory_u32x4{};
|
Id shared_memory_u32x4{};
|
||||||
|
|
||||||
Id shared_memory_u32_type{};
|
Id shared_memory_u32_type{};
|
||||||
|
|
||||||
Id shared_store_u8_func{};
|
Id shared_store_u8_func{};
|
||||||
|
@ -167,16 +197,12 @@ public:
|
||||||
std::vector<Id> interfaces;
|
std::vector<Id> interfaces;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
enum class CasPointerType {
|
|
||||||
Shared,
|
|
||||||
Ssbo,
|
|
||||||
};
|
|
||||||
|
|
||||||
void DefineCommonTypes(const Info& info);
|
void DefineCommonTypes(const Info& info);
|
||||||
void DefineCommonConstants();
|
void DefineCommonConstants();
|
||||||
void DefineInterfaces(const Info& info);
|
void DefineInterfaces(const Info& info);
|
||||||
void DefineLocalMemory(const IR::Program& program);
|
void DefineLocalMemory(const IR::Program& program);
|
||||||
void DefineSharedMemory(const IR::Program& program);
|
void DefineSharedMemory(const IR::Program& program);
|
||||||
|
void DefineSharedMemoryFunctions(const IR::Program& program);
|
||||||
void DefineConstantBuffers(const Info& info, u32& binding);
|
void DefineConstantBuffers(const Info& info, u32& binding);
|
||||||
void DefineStorageBuffers(const Info& info, u32& binding);
|
void DefineStorageBuffers(const Info& info, u32& binding);
|
||||||
void DefineTextureBuffers(const Info& info, u32& binding);
|
void DefineTextureBuffers(const Info& info, u32& binding);
|
||||||
|
@ -185,13 +211,8 @@ private:
|
||||||
void DefineAttributeMemAccess(const Info& info);
|
void DefineAttributeMemAccess(const Info& info);
|
||||||
void DefineLabels(IR::Program& program);
|
void DefineLabels(IR::Program& program);
|
||||||
|
|
||||||
void DefineConstantBuffers(const Info& info, Id UniformDefinitions::*member_type, u32 binding,
|
|
||||||
Id type, char type_char, u32 element_size);
|
|
||||||
|
|
||||||
void DefineInputs(const Info& info);
|
void DefineInputs(const Info& info);
|
||||||
void DefineOutputs(const Info& info);
|
void DefineOutputs(const Info& info);
|
||||||
|
|
||||||
[[nodiscard]] Id CasLoop(Id function, CasPointerType pointer_type, Id value_type);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Shader::Backend::SPIRV
|
} // namespace Shader::Backend::SPIRV
|
||||||
|
|
|
@ -276,7 +276,7 @@ void SetupCapabilities(const Profile& profile, const Info& info, EmitContext& ct
|
||||||
ctx.AddCapability(spv::Capability::SubgroupVoteKHR);
|
ctx.AddCapability(spv::Capability::SubgroupVoteKHR);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (info.uses_64_bit_atomics && profile.support_int64_atomics) {
|
if (info.uses_int64_bit_atomics && profile.support_int64_atomics) {
|
||||||
ctx.AddCapability(spv::Capability::Int64Atomics);
|
ctx.AddCapability(spv::Capability::Int64Atomics);
|
||||||
}
|
}
|
||||||
if (info.uses_typeless_image_reads && profile.support_typeless_image_loads) {
|
if (info.uses_typeless_image_reads && profile.support_typeless_image_loads) {
|
||||||
|
|
|
@ -89,17 +89,21 @@ void EmitWriteGlobalS16(EmitContext& ctx);
|
||||||
void EmitWriteGlobal32(EmitContext& ctx);
|
void EmitWriteGlobal32(EmitContext& ctx);
|
||||||
void EmitWriteGlobal64(EmitContext& ctx);
|
void EmitWriteGlobal64(EmitContext& ctx);
|
||||||
void EmitWriteGlobal128(EmitContext& ctx);
|
void EmitWriteGlobal128(EmitContext& ctx);
|
||||||
void EmitLoadStorageU8(EmitContext& ctx);
|
Id EmitLoadStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
|
||||||
void EmitLoadStorageS8(EmitContext& ctx);
|
Id EmitLoadStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
|
||||||
void EmitLoadStorageU16(EmitContext& ctx);
|
Id EmitLoadStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
|
||||||
void EmitLoadStorageS16(EmitContext& ctx);
|
Id EmitLoadStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
|
||||||
Id EmitLoadStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
|
Id EmitLoadStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
|
||||||
Id EmitLoadStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
|
Id EmitLoadStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
|
||||||
Id EmitLoadStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
|
Id EmitLoadStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
|
||||||
void EmitWriteStorageU8(EmitContext& ctx);
|
void EmitWriteStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
void EmitWriteStorageS8(EmitContext& ctx);
|
Id value);
|
||||||
void EmitWriteStorageU16(EmitContext& ctx);
|
void EmitWriteStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
void EmitWriteStorageS16(EmitContext& ctx);
|
Id value);
|
||||||
|
void EmitWriteStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
|
Id value);
|
||||||
|
void EmitWriteStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
|
Id value);
|
||||||
void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value);
|
Id value);
|
||||||
void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
|
|
|
@ -6,11 +6,12 @@
|
||||||
|
|
||||||
namespace Shader::Backend::SPIRV {
|
namespace Shader::Backend::SPIRV {
|
||||||
namespace {
|
namespace {
|
||||||
|
Id SharedPointer(EmitContext& ctx, Id offset, u32 index_offset = 0) {
|
||||||
Id GetSharedPointer(EmitContext& ctx, Id offset, u32 index_offset = 0) {
|
|
||||||
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
||||||
const Id shifted_value{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||||
const Id index{ctx.OpIAdd(ctx.U32[1], shifted_value, ctx.Constant(ctx.U32[1], index_offset))};
|
if (index_offset > 0) {
|
||||||
|
index = ctx.OpIAdd(ctx.U32[1], index, ctx.Constant(ctx.U32[1], index_offset));
|
||||||
|
}
|
||||||
return ctx.profile.support_explicit_workgroup_layout
|
return ctx.profile.support_explicit_workgroup_layout
|
||||||
? ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, ctx.u32_zero_value, index)
|
? ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, ctx.u32_zero_value, index)
|
||||||
: ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, index);
|
: ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, index);
|
||||||
|
@ -30,340 +31,258 @@ Id StorageIndex(EmitContext& ctx, const IR::Value& offset, size_t element_size)
|
||||||
return ctx.OpShiftRightLogical(ctx.U32[1], index, shift_id);
|
return ctx.OpShiftRightLogical(ctx.U32[1], index, shift_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
Id GetStoragePointer(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id StoragePointer(EmitContext& ctx, const StorageTypeDefinition& type_def,
|
||||||
u32 index_offset = 0) {
|
Id StorageDefinitions::*member_ptr, const IR::Value& binding,
|
||||||
// TODO: Support reinterpreting bindings, guaranteed to be aligned
|
const IR::Value& offset, size_t element_size) {
|
||||||
if (!binding.IsImmediate()) {
|
if (!binding.IsImmediate()) {
|
||||||
throw NotImplementedException("Dynamic storage buffer indexing");
|
throw NotImplementedException("Dynamic storage buffer indexing");
|
||||||
}
|
}
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
const Id ssbo{ctx.ssbos[binding.U32()].*member_ptr};
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
const Id index{StorageIndex(ctx, offset, element_size)};
|
||||||
const Id index{ctx.OpIAdd(ctx.U32[1], base_index, ctx.Constant(ctx.U32[1], index_offset))};
|
return ctx.OpAccessChain(type_def.element, ssbo, ctx.u32_zero_value, index);
|
||||||
return ctx.OpAccessChain(ctx.storage_u32, ssbo, ctx.u32_zero_value, index);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<Id, Id> GetAtomicArgs(EmitContext& ctx) {
|
std::pair<Id, Id> AtomicArgs(EmitContext& ctx) {
|
||||||
const Id scope{ctx.Constant(ctx.U32[1], static_cast<u32>(spv::Scope::Device))};
|
const Id scope{ctx.Constant(ctx.U32[1], static_cast<u32>(spv::Scope::Device))};
|
||||||
const Id semantics{ctx.u32_zero_value};
|
const Id semantics{ctx.u32_zero_value};
|
||||||
return {scope, semantics};
|
return {scope, semantics};
|
||||||
}
|
}
|
||||||
|
|
||||||
Id LoadU64(EmitContext& ctx, Id pointer_1, Id pointer_2) {
|
Id SharedAtomicU32(EmitContext& ctx, Id offset, Id value,
|
||||||
const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
|
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) {
|
||||||
const Id value_2{ctx.OpLoad(ctx.U32[1], pointer_2)};
|
const Id pointer{SharedPointer(ctx, offset)};
|
||||||
const Id original_composite{ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2)};
|
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||||
return ctx.OpBitcast(ctx.U64, original_composite);
|
return (ctx.*atomic_func)(ctx.U32[1], pointer, scope, semantics, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void StoreResult(EmitContext& ctx, Id pointer_1, Id pointer_2, Id result) {
|
Id StorageAtomicU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
|
||||||
const Id composite{ctx.OpBitcast(ctx.U32[2], result)};
|
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) {
|
||||||
ctx.OpStore(pointer_1, ctx.OpCompositeExtract(ctx.U32[1], composite, 0));
|
const Id pointer{StoragePointer(ctx, ctx.storage_types.U32, &StorageDefinitions::U32, binding,
|
||||||
ctx.OpStore(pointer_2, ctx.OpCompositeExtract(ctx.U32[1], composite, 1));
|
offset, sizeof(u32))};
|
||||||
|
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||||
|
return (ctx.*atomic_func)(ctx.U32[1], pointer, scope, semantics, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
Id StorageAtomicU64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
|
||||||
|
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id),
|
||||||
|
Id (Sirit::Module::*non_atomic_func)(Id, Id, Id)) {
|
||||||
|
if (ctx.profile.support_int64_atomics) {
|
||||||
|
const Id pointer{StoragePointer(ctx, ctx.storage_types.U64, &StorageDefinitions::U64,
|
||||||
|
binding, offset, sizeof(u64))};
|
||||||
|
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||||
|
return (ctx.*atomic_func)(ctx.U64, pointer, scope, semantics, value);
|
||||||
|
}
|
||||||
|
// LOG_WARNING(..., "Int64 Atomics not supported, fallback to non-atomic");
|
||||||
|
const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
|
||||||
|
binding, offset, sizeof(u32[2]))};
|
||||||
|
const Id original_value{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))};
|
||||||
|
const Id result{(ctx.*non_atomic_func)(ctx.U64, value, original_value)};
|
||||||
|
ctx.OpStore(pointer, result);
|
||||||
|
return original_value;
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
Id EmitSharedAtomicIAdd32(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicIAdd32(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id pointer{GetSharedPointer(ctx, pointer_offset)};
|
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicIAdd);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicIAdd(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitSharedAtomicSMin32(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicSMin32(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id pointer{GetSharedPointer(ctx, pointer_offset)};
|
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicSMin);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicSMin(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitSharedAtomicUMin32(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicUMin32(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id pointer{GetSharedPointer(ctx, pointer_offset)};
|
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicUMin);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicUMin(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitSharedAtomicSMax32(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicSMax32(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id pointer{GetSharedPointer(ctx, pointer_offset)};
|
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicSMax);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicSMax(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitSharedAtomicUMax32(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicUMax32(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id pointer{GetSharedPointer(ctx, pointer_offset)};
|
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicUMax);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicUMax(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitSharedAtomicInc32(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicInc32(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
||||||
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], pointer_offset, shift_id)};
|
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||||
return ctx.OpFunctionCall(ctx.U32[1], ctx.increment_cas_shared, index, value,
|
return ctx.OpFunctionCall(ctx.U32[1], ctx.increment_cas_shared, index, value);
|
||||||
ctx.shared_memory_u32);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitSharedAtomicDec32(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicDec32(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
||||||
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], pointer_offset, shift_id)};
|
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||||
return ctx.OpFunctionCall(ctx.U32[1], ctx.decrement_cas_shared, index, value,
|
return ctx.OpFunctionCall(ctx.U32[1], ctx.decrement_cas_shared, index, value);
|
||||||
ctx.shared_memory_u32);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitSharedAtomicAnd32(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicAnd32(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id pointer{GetSharedPointer(ctx, pointer_offset)};
|
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicAnd);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicAnd(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitSharedAtomicOr32(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicOr32(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id pointer{GetSharedPointer(ctx, pointer_offset)};
|
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicOr);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicOr(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitSharedAtomicXor32(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicXor32(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id pointer{GetSharedPointer(ctx, pointer_offset)};
|
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicXor);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicXor(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitSharedAtomicExchange32(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicExchange32(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id pointer{GetSharedPointer(ctx, pointer_offset)};
|
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicExchange);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicExchange(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitSharedAtomicExchange64(EmitContext& ctx, Id pointer_offset, Id value) {
|
Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
|
||||||
const Id pointer_1{GetSharedPointer(ctx, pointer_offset)};
|
if (ctx.profile.support_int64_atomics && ctx.profile.support_explicit_workgroup_layout) {
|
||||||
if (ctx.profile.support_int64_atomics) {
|
const Id shift_id{ctx.Constant(ctx.U32[1], 3U)};
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||||
return ctx.OpAtomicExchange(ctx.U64, pointer_1, scope, semantics, value);
|
const Id pointer{
|
||||||
|
ctx.OpAccessChain(ctx.shared_u64, ctx.shared_memory_u64, ctx.u32_zero_value, index)};
|
||||||
|
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||||
|
return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
|
||||||
}
|
}
|
||||||
// LOG_WARNING(Render_Vulkan, "Int64 Atomics not supported, fallback to non-atomic");
|
// LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic");
|
||||||
const Id pointer_2{GetSharedPointer(ctx, pointer_offset, 1)};
|
const Id pointer_1{SharedPointer(ctx, offset, 0)};
|
||||||
const Id original_value{LoadU64(ctx, pointer_1, pointer_2)};
|
const Id pointer_2{SharedPointer(ctx, offset, 1)};
|
||||||
StoreResult(ctx, pointer_1, pointer_2, value);
|
const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
|
||||||
return original_value;
|
const Id value_2{ctx.OpLoad(ctx.U32[1], pointer_2)};
|
||||||
|
const Id new_vector{ctx.OpBitcast(ctx.U32[2], value)};
|
||||||
|
ctx.OpStore(pointer_1, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 0U));
|
||||||
|
ctx.OpStore(pointer_2, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 1U));
|
||||||
|
return ctx.OpBitcast(ctx.U64, ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2));
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicIAdd(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicSMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicSMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMin);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicSMin(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicUMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicUMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMin);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicUMin(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicSMax32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicSMax32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMax);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicSMax(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicUMax32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicUMax32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMax);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicUMax(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicInc32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicInc32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
const Id ssbo{ctx.ssbos[binding.U32()].U32};
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
||||||
return ctx.OpFunctionCall(ctx.U32[1], ctx.increment_cas_ssbo, base_index, value, ssbo);
|
return ctx.OpFunctionCall(ctx.U32[1], ctx.increment_cas_ssbo, base_index, value, ssbo);
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicDec32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicDec32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
const Id ssbo{ctx.ssbos[binding.U32()].U32};
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
||||||
return ctx.OpFunctionCall(ctx.U32[1], ctx.decrement_cas_ssbo, base_index, value, ssbo);
|
return ctx.OpFunctionCall(ctx.U32[1], ctx.decrement_cas_ssbo, base_index, value, ssbo);
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicAnd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicAnd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicAnd);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicAnd(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicOr32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicOr32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicOr);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicOr(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicXor32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicXor32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicXor);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicXor(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicExchange32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicExchange32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicExchange);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicExchange(ctx.U32[1], pointer, scope, semantics, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicIAdd64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicIAdd64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer_1{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd,
|
||||||
if (ctx.profile.support_int64_atomics) {
|
&Sirit::Module::OpIAdd);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicIAdd(ctx.U64, pointer_1, scope, semantics, value);
|
|
||||||
}
|
|
||||||
// LOG_WARNING(Render_Vulkan, "Int64 Atomics not supported, fallback to non-atomic");
|
|
||||||
const Id pointer_2{GetStoragePointer(ctx, binding, offset, 1)};
|
|
||||||
const Id original_value{LoadU64(ctx, pointer_1, pointer_2)};
|
|
||||||
const Id result{ctx.OpIAdd(ctx.U64, value, original_value)};
|
|
||||||
StoreResult(ctx, pointer_1, pointer_2, result);
|
|
||||||
return original_value;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicSMin64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicSMin64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer_1{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMin,
|
||||||
if (ctx.profile.support_int64_atomics) {
|
&Sirit::Module::OpSMin);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicSMin(ctx.U64, pointer_1, scope, semantics, value);
|
|
||||||
}
|
|
||||||
// LOG_WARNING(Render_Vulkan, "Int64 Atomics not supported, fallback to non-atomic");
|
|
||||||
const Id pointer_2{GetStoragePointer(ctx, binding, offset, 1)};
|
|
||||||
const Id original_value{LoadU64(ctx, pointer_1, pointer_2)};
|
|
||||||
const Id result{ctx.OpSMin(ctx.U64, value, original_value)};
|
|
||||||
StoreResult(ctx, pointer_1, pointer_2, result);
|
|
||||||
return original_value;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicUMin64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicUMin64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer_1{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMin,
|
||||||
if (ctx.profile.support_int64_atomics) {
|
&Sirit::Module::OpUMin);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicUMin(ctx.U64, pointer_1, scope, semantics, value);
|
|
||||||
}
|
|
||||||
// LOG_WARNING(Render_Vulkan, "Int64 Atomics not supported, fallback to non-atomic");
|
|
||||||
const Id pointer_2{GetStoragePointer(ctx, binding, offset, 1)};
|
|
||||||
const Id original_value{LoadU64(ctx, pointer_1, pointer_2)};
|
|
||||||
const Id result{ctx.OpUMin(ctx.U64, value, original_value)};
|
|
||||||
StoreResult(ctx, pointer_1, pointer_2, result);
|
|
||||||
return original_value;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicSMax64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicSMax64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer_1{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMax,
|
||||||
if (ctx.profile.support_int64_atomics) {
|
&Sirit::Module::OpSMax);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicSMax(ctx.U64, pointer_1, scope, semantics, value);
|
|
||||||
}
|
|
||||||
// LOG_WARNING(Render_Vulkan, "Int64 Atomics not supported, fallback to non-atomic");
|
|
||||||
const Id pointer_2{GetStoragePointer(ctx, binding, offset, 1)};
|
|
||||||
const Id original_value{LoadU64(ctx, pointer_1, pointer_2)};
|
|
||||||
const Id result{ctx.OpSMax(ctx.U64, value, original_value)};
|
|
||||||
StoreResult(ctx, pointer_1, pointer_2, result);
|
|
||||||
return original_value;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicUMax64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicUMax64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer_1{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMax,
|
||||||
if (ctx.profile.support_int64_atomics) {
|
&Sirit::Module::OpUMax);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicUMax(ctx.U64, pointer_1, scope, semantics, value);
|
|
||||||
}
|
|
||||||
// LOG_WARNING(Render_Vulkan, "Int64 Atomics not supported, fallback to non-atomic");
|
|
||||||
const Id pointer_2{GetStoragePointer(ctx, binding, offset, 1)};
|
|
||||||
const Id original_value{LoadU64(ctx, pointer_1, pointer_2)};
|
|
||||||
const Id result{ctx.OpUMax(ctx.U64, value, original_value)};
|
|
||||||
StoreResult(ctx, pointer_1, pointer_2, result);
|
|
||||||
return original_value;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicAnd64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicAnd64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer_1{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicAnd,
|
||||||
if (ctx.profile.support_int64_atomics) {
|
&Sirit::Module::OpBitwiseAnd);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicAnd(ctx.U64, pointer_1, scope, semantics, value);
|
|
||||||
}
|
|
||||||
// LOG_WARNING(Render_Vulkan, "Int64 Atomics not supported, fallback to non-atomic");
|
|
||||||
const Id pointer_2{GetStoragePointer(ctx, binding, offset, 1)};
|
|
||||||
const Id original_value{LoadU64(ctx, pointer_1, pointer_2)};
|
|
||||||
const Id result{ctx.OpBitwiseAnd(ctx.U64, value, original_value)};
|
|
||||||
StoreResult(ctx, pointer_1, pointer_2, result);
|
|
||||||
return original_value;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicOr64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicOr64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer_1{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicOr,
|
||||||
if (ctx.profile.support_int64_atomics) {
|
&Sirit::Module::OpBitwiseOr);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicOr(ctx.U64, pointer_1, scope, semantics, value);
|
|
||||||
}
|
|
||||||
// LOG_WARNING(Render_Vulkan, "Int64 Atomics not supported, fallback to non-atomic");
|
|
||||||
const Id pointer_2{GetStoragePointer(ctx, binding, offset, 1)};
|
|
||||||
const Id original_value{LoadU64(ctx, pointer_1, pointer_2)};
|
|
||||||
const Id result{ctx.OpBitwiseOr(ctx.U64, value, original_value)};
|
|
||||||
StoreResult(ctx, pointer_1, pointer_2, result);
|
|
||||||
return original_value;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicXor64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicXor64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer_1{GetStoragePointer(ctx, binding, offset)};
|
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicXor,
|
||||||
if (ctx.profile.support_int64_atomics) {
|
&Sirit::Module::OpBitwiseXor);
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
|
||||||
return ctx.OpAtomicXor(ctx.U64, pointer_1, scope, semantics, value);
|
|
||||||
}
|
|
||||||
// LOG_WARNING(Render_Vulkan, "Int64 Atomics not supported, fallback to non-atomic");
|
|
||||||
const Id pointer_2{GetStoragePointer(ctx, binding, offset, 1)};
|
|
||||||
const Id original_value{LoadU64(ctx, pointer_1, pointer_2)};
|
|
||||||
const Id result{ctx.OpBitwiseXor(ctx.U64, value, original_value)};
|
|
||||||
StoreResult(ctx, pointer_1, pointer_2, result);
|
|
||||||
return original_value;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id pointer_1{GetStoragePointer(ctx, binding, offset)};
|
|
||||||
if (ctx.profile.support_int64_atomics) {
|
if (ctx.profile.support_int64_atomics) {
|
||||||
const auto [scope, semantics]{GetAtomicArgs(ctx)};
|
const Id pointer{StoragePointer(ctx, ctx.storage_types.U64, &StorageDefinitions::U64,
|
||||||
return ctx.OpAtomicExchange(ctx.U64, pointer_1, scope, semantics, value);
|
binding, offset, sizeof(u64))};
|
||||||
|
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||||
|
return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
|
||||||
}
|
}
|
||||||
// LOG_WARNING(Render_Vulkan, "Int64 Atomics not supported, fallback to non-atomic");
|
// LOG_WARNING(..., "Int64 Atomics not supported, fallback to non-atomic");
|
||||||
const Id pointer_2{GetStoragePointer(ctx, binding, offset, 1)};
|
const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
|
||||||
const Id original_value{LoadU64(ctx, pointer_1, pointer_2)};
|
binding, offset, sizeof(u32[2]))};
|
||||||
StoreResult(ctx, pointer_1, pointer_2, value);
|
const Id original{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))};
|
||||||
return original_value;
|
ctx.OpStore(pointer, value);
|
||||||
|
return original;
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
const Id ssbo{ctx.ssbos[binding.U32()].U32};
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
||||||
return ctx.OpFunctionCall(ctx.F32[1], ctx.f32_add_cas, base_index, value, ssbo);
|
return ctx.OpFunctionCall(ctx.F32[1], ctx.f32_add_cas, base_index, value, ssbo);
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
const Id ssbo{ctx.ssbos[binding.U32()].U32};
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
||||||
const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_add_cas, base_index, value, ssbo)};
|
const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_add_cas, base_index, value, ssbo)};
|
||||||
return ctx.OpBitcast(ctx.U32[1], result);
|
return ctx.OpBitcast(ctx.U32[1], result);
|
||||||
|
@ -371,7 +290,7 @@ Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const I
|
||||||
|
|
||||||
Id EmitStorageAtomicAddF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicAddF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
const Id ssbo{ctx.ssbos[binding.U32()].U32};
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
||||||
const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_add_cas, base_index, value, ssbo)};
|
const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_add_cas, base_index, value, ssbo)};
|
||||||
return ctx.OpPackHalf2x16(ctx.U32[1], result);
|
return ctx.OpPackHalf2x16(ctx.U32[1], result);
|
||||||
|
@ -379,7 +298,7 @@ Id EmitStorageAtomicAddF32x2(EmitContext& ctx, const IR::Value& binding, const I
|
||||||
|
|
||||||
Id EmitStorageAtomicMinF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicMinF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
const Id ssbo{ctx.ssbos[binding.U32()].U32};
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
||||||
const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_min_cas, base_index, value, ssbo)};
|
const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_min_cas, base_index, value, ssbo)};
|
||||||
return ctx.OpBitcast(ctx.U32[1], result);
|
return ctx.OpBitcast(ctx.U32[1], result);
|
||||||
|
@ -387,7 +306,7 @@ Id EmitStorageAtomicMinF16x2(EmitContext& ctx, const IR::Value& binding, const I
|
||||||
|
|
||||||
Id EmitStorageAtomicMinF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicMinF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
const Id ssbo{ctx.ssbos[binding.U32()].U32};
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
||||||
const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_min_cas, base_index, value, ssbo)};
|
const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_min_cas, base_index, value, ssbo)};
|
||||||
return ctx.OpPackHalf2x16(ctx.U32[1], result);
|
return ctx.OpPackHalf2x16(ctx.U32[1], result);
|
||||||
|
@ -395,7 +314,7 @@ Id EmitStorageAtomicMinF32x2(EmitContext& ctx, const IR::Value& binding, const I
|
||||||
|
|
||||||
Id EmitStorageAtomicMaxF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicMaxF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
const Id ssbo{ctx.ssbos[binding.U32()].U32};
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
||||||
const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_max_cas, base_index, value, ssbo)};
|
const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_max_cas, base_index, value, ssbo)};
|
||||||
return ctx.OpBitcast(ctx.U32[1], result);
|
return ctx.OpBitcast(ctx.U32[1], result);
|
||||||
|
@ -403,7 +322,7 @@ Id EmitStorageAtomicMaxF16x2(EmitContext& ctx, const IR::Value& binding, const I
|
||||||
|
|
||||||
Id EmitStorageAtomicMaxF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id EmitStorageAtomicMaxF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
const Id ssbo{ctx.ssbos[binding.U32()].U32};
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
||||||
const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_max_cas, base_index, value, ssbo)};
|
const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_max_cas, base_index, value, ssbo)};
|
||||||
return ctx.OpPackHalf2x16(ctx.U32[1], result);
|
return ctx.OpPackHalf2x16(ctx.U32[1], result);
|
||||||
|
|
|
@ -22,29 +22,29 @@ Id StorageIndex(EmitContext& ctx, const IR::Value& offset, size_t element_size)
|
||||||
return ctx.OpShiftRightLogical(ctx.U32[1], index, shift_id);
|
return ctx.OpShiftRightLogical(ctx.U32[1], index, shift_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitLoadStorage(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
Id StoragePointer(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
u32 num_components) {
|
const StorageTypeDefinition& type_def, size_t element_size,
|
||||||
// TODO: Support reinterpreting bindings, guaranteed to be aligned
|
Id StorageDefinitions::*member_ptr) {
|
||||||
if (!binding.IsImmediate()) {
|
if (!binding.IsImmediate()) {
|
||||||
throw NotImplementedException("Dynamic storage buffer indexing");
|
throw NotImplementedException("Dynamic storage buffer indexing");
|
||||||
}
|
}
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
const Id ssbo{ctx.ssbos[binding.U32()].*member_ptr};
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
const Id index{StorageIndex(ctx, offset, element_size)};
|
||||||
std::array<Id, 4> components;
|
return ctx.OpAccessChain(type_def.element, ssbo, ctx.u32_zero_value, index);
|
||||||
for (u32 element = 0; element < num_components; ++element) {
|
|
||||||
Id index{base_index};
|
|
||||||
if (element > 0) {
|
|
||||||
index = ctx.OpIAdd(ctx.U32[1], base_index, ctx.Constant(ctx.U32[1], element));
|
|
||||||
}
|
}
|
||||||
const Id pointer{ctx.OpAccessChain(ctx.storage_u32, ssbo, ctx.u32_zero_value, index)};
|
|
||||||
components[element] = ctx.OpLoad(ctx.U32[1], pointer);
|
Id LoadStorage(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id result_type,
|
||||||
}
|
const StorageTypeDefinition& type_def, size_t element_size,
|
||||||
if (num_components == 1) {
|
Id StorageDefinitions::*member_ptr) {
|
||||||
return components[0];
|
const Id pointer{StoragePointer(ctx, binding, offset, type_def, element_size, member_ptr)};
|
||||||
} else {
|
return ctx.OpLoad(result_type, pointer);
|
||||||
const std::span components_span(components.data(), num_components);
|
|
||||||
return ctx.OpCompositeConstruct(ctx.U32[num_components], components_span);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void WriteStorage(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
|
||||||
|
const StorageTypeDefinition& type_def, size_t element_size,
|
||||||
|
Id StorageDefinitions::*member_ptr) {
|
||||||
|
const Id pointer{StoragePointer(ctx, binding, offset, type_def, element_size, member_ptr)};
|
||||||
|
ctx.OpStore(pointer, value);
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
|
@ -104,92 +104,85 @@ void EmitWriteGlobal128(EmitContext&) {
|
||||||
throw NotImplementedException("SPIR-V Instruction");
|
throw NotImplementedException("SPIR-V Instruction");
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitLoadStorageU8(EmitContext&) {
|
Id EmitLoadStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||||
throw NotImplementedException("SPIR-V Instruction");
|
return ctx.OpUConvert(ctx.U32[1],
|
||||||
|
LoadStorage(ctx, binding, offset, ctx.U8, ctx.storage_types.U8,
|
||||||
|
sizeof(u8), &StorageDefinitions::U8));
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitLoadStorageS8(EmitContext&) {
|
Id EmitLoadStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||||
throw NotImplementedException("SPIR-V Instruction");
|
return ctx.OpSConvert(ctx.U32[1],
|
||||||
|
LoadStorage(ctx, binding, offset, ctx.S8, ctx.storage_types.S8,
|
||||||
|
sizeof(s8), &StorageDefinitions::S8));
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitLoadStorageU16(EmitContext&) {
|
Id EmitLoadStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||||
throw NotImplementedException("SPIR-V Instruction");
|
return ctx.OpUConvert(ctx.U32[1],
|
||||||
|
LoadStorage(ctx, binding, offset, ctx.U16, ctx.storage_types.U16,
|
||||||
|
sizeof(u16), &StorageDefinitions::U16));
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitLoadStorageS16(EmitContext&) {
|
Id EmitLoadStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||||
throw NotImplementedException("SPIR-V Instruction");
|
return ctx.OpSConvert(ctx.U32[1],
|
||||||
|
LoadStorage(ctx, binding, offset, ctx.S16, ctx.storage_types.S16,
|
||||||
|
sizeof(s16), &StorageDefinitions::S16));
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitLoadStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
Id EmitLoadStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||||
return EmitLoadStorage(ctx, binding, offset, 1);
|
return LoadStorage(ctx, binding, offset, ctx.U32[1], ctx.storage_types.U32, sizeof(u32),
|
||||||
|
&StorageDefinitions::U32);
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitLoadStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
Id EmitLoadStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||||
return EmitLoadStorage(ctx, binding, offset, 2);
|
return LoadStorage(ctx, binding, offset, ctx.U32[2], ctx.storage_types.U32x2, sizeof(u32[2]),
|
||||||
|
&StorageDefinitions::U32x2);
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitLoadStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
Id EmitLoadStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||||
return EmitLoadStorage(ctx, binding, offset, 4);
|
return LoadStorage(ctx, binding, offset, ctx.U32[4], ctx.storage_types.U32x4, sizeof(u32[4]),
|
||||||
|
&StorageDefinitions::U32x4);
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitWriteStorageU8(EmitContext&) {
|
void EmitWriteStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
throw NotImplementedException("SPIR-V Instruction");
|
Id value) {
|
||||||
|
WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.U8, value), ctx.storage_types.U8,
|
||||||
|
sizeof(u8), &StorageDefinitions::U8);
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitWriteStorageS8(EmitContext&) {
|
void EmitWriteStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
throw NotImplementedException("SPIR-V Instruction");
|
Id value) {
|
||||||
|
WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.S8, value), ctx.storage_types.S8,
|
||||||
|
sizeof(s8), &StorageDefinitions::S8);
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitWriteStorageU16(EmitContext&) {
|
void EmitWriteStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
throw NotImplementedException("SPIR-V Instruction");
|
Id value) {
|
||||||
|
WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.U16, value), ctx.storage_types.U16,
|
||||||
|
sizeof(u16), &StorageDefinitions::U16);
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitWriteStorageS16(EmitContext&) {
|
void EmitWriteStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
throw NotImplementedException("SPIR-V Instruction");
|
Id value) {
|
||||||
|
WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.S16, value), ctx.storage_types.S16,
|
||||||
|
sizeof(s16), &StorageDefinitions::S16);
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
if (!binding.IsImmediate()) {
|
WriteStorage(ctx, binding, offset, value, ctx.storage_types.U32, sizeof(u32),
|
||||||
throw NotImplementedException("Dynamic storage buffer indexing");
|
&StorageDefinitions::U32);
|
||||||
}
|
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
|
||||||
const Id index{StorageIndex(ctx, offset, sizeof(u32))};
|
|
||||||
const Id pointer{ctx.OpAccessChain(ctx.storage_u32, ssbo, ctx.u32_zero_value, index)};
|
|
||||||
ctx.OpStore(pointer, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
if (!binding.IsImmediate()) {
|
WriteStorage(ctx, binding, offset, value, ctx.storage_types.U32x2, sizeof(u32[2]),
|
||||||
throw NotImplementedException("Dynamic storage buffer indexing");
|
&StorageDefinitions::U32x2);
|
||||||
}
|
|
||||||
// TODO: Support reinterpreting bindings, guaranteed to be aligned
|
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
|
||||||
const Id low_index{StorageIndex(ctx, offset, sizeof(u32))};
|
|
||||||
const Id high_index{ctx.OpIAdd(ctx.U32[1], low_index, ctx.Constant(ctx.U32[1], 1U))};
|
|
||||||
const Id low_pointer{ctx.OpAccessChain(ctx.storage_u32, ssbo, ctx.u32_zero_value, low_index)};
|
|
||||||
const Id high_pointer{ctx.OpAccessChain(ctx.storage_u32, ssbo, ctx.u32_zero_value, high_index)};
|
|
||||||
ctx.OpStore(low_pointer, ctx.OpCompositeExtract(ctx.U32[1], value, 0U));
|
|
||||||
ctx.OpStore(high_pointer, ctx.OpCompositeExtract(ctx.U32[1], value, 1U));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitWriteStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
void EmitWriteStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||||
Id value) {
|
Id value) {
|
||||||
if (!binding.IsImmediate()) {
|
WriteStorage(ctx, binding, offset, value, ctx.storage_types.U32x4, sizeof(u32[4]),
|
||||||
throw NotImplementedException("Dynamic storage buffer indexing");
|
&StorageDefinitions::U32x4);
|
||||||
}
|
|
||||||
// TODO: Support reinterpreting bindings, guaranteed to be aligned
|
|
||||||
const Id ssbo{ctx.ssbos[binding.U32()]};
|
|
||||||
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
|
|
||||||
for (u32 element = 0; element < 4; ++element) {
|
|
||||||
Id index = base_index;
|
|
||||||
if (element > 0) {
|
|
||||||
index = ctx.OpIAdd(ctx.U32[1], base_index, ctx.Constant(ctx.U32[1], element));
|
|
||||||
}
|
|
||||||
const Id pointer{ctx.OpAccessChain(ctx.storage_u32, ssbo, ctx.u32_zero_value, index)};
|
|
||||||
ctx.OpStore(pointer, ctx.OpCompositeExtract(ctx.U32[1], value, element));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Shader::Backend::SPIRV
|
} // namespace Shader::Backend::SPIRV
|
||||||
|
|
|
@ -315,6 +315,23 @@ void VisitUsages(Info& info, IR::Inst& inst) {
|
||||||
case IR::Opcode::ConvertF32U64:
|
case IR::Opcode::ConvertF32U64:
|
||||||
case IR::Opcode::ConvertF64U64:
|
case IR::Opcode::ConvertF64U64:
|
||||||
case IR::Opcode::SharedAtomicExchange64:
|
case IR::Opcode::SharedAtomicExchange64:
|
||||||
|
case IR::Opcode::GlobalAtomicIAdd64:
|
||||||
|
case IR::Opcode::GlobalAtomicSMin64:
|
||||||
|
case IR::Opcode::GlobalAtomicUMin64:
|
||||||
|
case IR::Opcode::GlobalAtomicSMax64:
|
||||||
|
case IR::Opcode::GlobalAtomicUMax64:
|
||||||
|
case IR::Opcode::GlobalAtomicAnd64:
|
||||||
|
case IR::Opcode::GlobalAtomicOr64:
|
||||||
|
case IR::Opcode::GlobalAtomicXor64:
|
||||||
|
case IR::Opcode::GlobalAtomicExchange64:
|
||||||
|
case IR::Opcode::StorageAtomicIAdd64:
|
||||||
|
case IR::Opcode::StorageAtomicSMin64:
|
||||||
|
case IR::Opcode::StorageAtomicUMin64:
|
||||||
|
case IR::Opcode::StorageAtomicSMax64:
|
||||||
|
case IR::Opcode::StorageAtomicUMax64:
|
||||||
|
case IR::Opcode::StorageAtomicAnd64:
|
||||||
|
case IR::Opcode::StorageAtomicOr64:
|
||||||
|
case IR::Opcode::StorageAtomicXor64:
|
||||||
info.uses_int64 = true;
|
info.uses_int64 = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -457,46 +474,91 @@ void VisitUsages(Info& info, IR::Inst& inst) {
|
||||||
case IR::Opcode::FSwizzleAdd:
|
case IR::Opcode::FSwizzleAdd:
|
||||||
info.uses_fswzadd = true;
|
info.uses_fswzadd = true;
|
||||||
break;
|
break;
|
||||||
|
case IR::Opcode::LoadStorageU8:
|
||||||
|
case IR::Opcode::LoadStorageS8:
|
||||||
|
case IR::Opcode::WriteStorageU8:
|
||||||
|
case IR::Opcode::WriteStorageS8:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U8;
|
||||||
|
break;
|
||||||
|
case IR::Opcode::LoadStorageU16:
|
||||||
|
case IR::Opcode::LoadStorageS16:
|
||||||
|
case IR::Opcode::WriteStorageU16:
|
||||||
|
case IR::Opcode::WriteStorageS16:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U16;
|
||||||
|
break;
|
||||||
|
case IR::Opcode::LoadStorage32:
|
||||||
|
case IR::Opcode::WriteStorage32:
|
||||||
|
case IR::Opcode::StorageAtomicIAdd32:
|
||||||
|
case IR::Opcode::StorageAtomicSMin32:
|
||||||
|
case IR::Opcode::StorageAtomicUMin32:
|
||||||
|
case IR::Opcode::StorageAtomicSMax32:
|
||||||
|
case IR::Opcode::StorageAtomicUMax32:
|
||||||
|
case IR::Opcode::StorageAtomicAnd32:
|
||||||
|
case IR::Opcode::StorageAtomicOr32:
|
||||||
|
case IR::Opcode::StorageAtomicXor32:
|
||||||
|
case IR::Opcode::StorageAtomicExchange32:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32;
|
||||||
|
break;
|
||||||
|
case IR::Opcode::LoadStorage64:
|
||||||
|
case IR::Opcode::WriteStorage64:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32x2;
|
||||||
|
break;
|
||||||
|
case IR::Opcode::LoadStorage128:
|
||||||
|
case IR::Opcode::WriteStorage128:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32x4;
|
||||||
|
break;
|
||||||
case IR::Opcode::SharedAtomicInc32:
|
case IR::Opcode::SharedAtomicInc32:
|
||||||
info.uses_shared_increment = true;
|
info.uses_shared_increment = true;
|
||||||
break;
|
break;
|
||||||
case IR::Opcode::SharedAtomicDec32:
|
case IR::Opcode::SharedAtomicDec32:
|
||||||
info.uses_shared_decrement = true;
|
info.uses_shared_decrement = true;
|
||||||
break;
|
break;
|
||||||
|
case IR::Opcode::SharedAtomicExchange64:
|
||||||
|
info.uses_int64_bit_atomics = true;
|
||||||
|
break;
|
||||||
case IR::Opcode::GlobalAtomicInc32:
|
case IR::Opcode::GlobalAtomicInc32:
|
||||||
case IR::Opcode::StorageAtomicInc32:
|
case IR::Opcode::StorageAtomicInc32:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32;
|
||||||
info.uses_global_increment = true;
|
info.uses_global_increment = true;
|
||||||
break;
|
break;
|
||||||
case IR::Opcode::GlobalAtomicDec32:
|
case IR::Opcode::GlobalAtomicDec32:
|
||||||
case IR::Opcode::StorageAtomicDec32:
|
case IR::Opcode::StorageAtomicDec32:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32;
|
||||||
info.uses_global_decrement = true;
|
info.uses_global_decrement = true;
|
||||||
break;
|
break;
|
||||||
case IR::Opcode::GlobalAtomicAddF32:
|
case IR::Opcode::GlobalAtomicAddF32:
|
||||||
case IR::Opcode::StorageAtomicAddF32:
|
case IR::Opcode::StorageAtomicAddF32:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32;
|
||||||
info.uses_atomic_f32_add = true;
|
info.uses_atomic_f32_add = true;
|
||||||
break;
|
break;
|
||||||
case IR::Opcode::GlobalAtomicAddF16x2:
|
case IR::Opcode::GlobalAtomicAddF16x2:
|
||||||
case IR::Opcode::StorageAtomicAddF16x2:
|
case IR::Opcode::StorageAtomicAddF16x2:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32;
|
||||||
info.uses_atomic_f16x2_add = true;
|
info.uses_atomic_f16x2_add = true;
|
||||||
break;
|
break;
|
||||||
case IR::Opcode::GlobalAtomicAddF32x2:
|
case IR::Opcode::GlobalAtomicAddF32x2:
|
||||||
case IR::Opcode::StorageAtomicAddF32x2:
|
case IR::Opcode::StorageAtomicAddF32x2:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32;
|
||||||
info.uses_atomic_f32x2_add = true;
|
info.uses_atomic_f32x2_add = true;
|
||||||
break;
|
break;
|
||||||
case IR::Opcode::GlobalAtomicMinF16x2:
|
case IR::Opcode::GlobalAtomicMinF16x2:
|
||||||
case IR::Opcode::StorageAtomicMinF16x2:
|
case IR::Opcode::StorageAtomicMinF16x2:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32;
|
||||||
info.uses_atomic_f16x2_min = true;
|
info.uses_atomic_f16x2_min = true;
|
||||||
break;
|
break;
|
||||||
case IR::Opcode::GlobalAtomicMinF32x2:
|
case IR::Opcode::GlobalAtomicMinF32x2:
|
||||||
case IR::Opcode::StorageAtomicMinF32x2:
|
case IR::Opcode::StorageAtomicMinF32x2:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32;
|
||||||
info.uses_atomic_f32x2_min = true;
|
info.uses_atomic_f32x2_min = true;
|
||||||
break;
|
break;
|
||||||
case IR::Opcode::GlobalAtomicMaxF16x2:
|
case IR::Opcode::GlobalAtomicMaxF16x2:
|
||||||
case IR::Opcode::StorageAtomicMaxF16x2:
|
case IR::Opcode::StorageAtomicMaxF16x2:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32;
|
||||||
info.uses_atomic_f16x2_max = true;
|
info.uses_atomic_f16x2_max = true;
|
||||||
break;
|
break;
|
||||||
case IR::Opcode::GlobalAtomicMaxF32x2:
|
case IR::Opcode::GlobalAtomicMaxF32x2:
|
||||||
case IR::Opcode::StorageAtomicMaxF32x2:
|
case IR::Opcode::StorageAtomicMaxF32x2:
|
||||||
|
info.used_storage_buffer_types |= IR::Type::U32;
|
||||||
info.uses_atomic_f32x2_max = true;
|
info.uses_atomic_f32x2_max = true;
|
||||||
break;
|
break;
|
||||||
case IR::Opcode::GlobalAtomicIAdd64:
|
case IR::Opcode::GlobalAtomicIAdd64:
|
||||||
|
@ -516,11 +578,8 @@ void VisitUsages(Info& info, IR::Inst& inst) {
|
||||||
case IR::Opcode::StorageAtomicAnd64:
|
case IR::Opcode::StorageAtomicAnd64:
|
||||||
case IR::Opcode::StorageAtomicOr64:
|
case IR::Opcode::StorageAtomicOr64:
|
||||||
case IR::Opcode::StorageAtomicXor64:
|
case IR::Opcode::StorageAtomicXor64:
|
||||||
info.uses_64_bit_atomics = true;
|
info.used_storage_buffer_types |= IR::Type::U64;
|
||||||
break;
|
info.uses_int64_bit_atomics = true;
|
||||||
case IR::Opcode::SharedAtomicExchange64:
|
|
||||||
info.uses_64_bit_atomics = true;
|
|
||||||
info.uses_shared_memory_u32x2 = true;
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -141,10 +141,10 @@ struct Info {
|
||||||
bool uses_atomic_f32x2_add{};
|
bool uses_atomic_f32x2_add{};
|
||||||
bool uses_atomic_f32x2_min{};
|
bool uses_atomic_f32x2_min{};
|
||||||
bool uses_atomic_f32x2_max{};
|
bool uses_atomic_f32x2_max{};
|
||||||
bool uses_64_bit_atomics{};
|
bool uses_int64_bit_atomics{};
|
||||||
bool uses_shared_memory_u32x2{};
|
|
||||||
|
|
||||||
IR::Type used_constant_buffer_types{};
|
IR::Type used_constant_buffer_types{};
|
||||||
|
IR::Type used_storage_buffer_types{};
|
||||||
|
|
||||||
u32 constant_buffer_mask{};
|
u32 constant_buffer_mask{};
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,7 @@ constexpr std::array REQUIRED_EXTENSIONS{
|
||||||
VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME,
|
VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME,
|
||||||
VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME,
|
VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME,
|
||||||
VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME,
|
VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME,
|
||||||
|
VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME,
|
||||||
VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
|
VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
|
||||||
VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
|
VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
|
||||||
VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
|
VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
|
||||||
|
@ -313,6 +314,14 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
|
||||||
};
|
};
|
||||||
SetNext(next, host_query_reset);
|
SetNext(next, host_query_reset);
|
||||||
|
|
||||||
|
VkPhysicalDeviceVariablePointerFeaturesKHR variable_pointers{
|
||||||
|
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR,
|
||||||
|
.pNext = nullptr,
|
||||||
|
.variablePointersStorageBuffer = VK_TRUE,
|
||||||
|
.variablePointers = VK_TRUE,
|
||||||
|
};
|
||||||
|
SetNext(next, variable_pointers);
|
||||||
|
|
||||||
VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT demote{
|
VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT demote{
|
||||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT,
|
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT,
|
||||||
.pNext = nullptr,
|
.pNext = nullptr,
|
||||||
|
@ -399,6 +408,17 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
|
||||||
LOG_INFO(Render_Vulkan, "Device doesn't support extended dynamic state");
|
LOG_INFO(Render_Vulkan, "Device doesn't support extended dynamic state");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VkPhysicalDeviceShaderAtomicInt64FeaturesKHR atomic_int64;
|
||||||
|
if (ext_shader_atomic_int64) {
|
||||||
|
atomic_int64 = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR,
|
||||||
|
.pNext = nullptr,
|
||||||
|
.shaderBufferInt64Atomics = VK_TRUE,
|
||||||
|
.shaderSharedInt64Atomics = VK_TRUE,
|
||||||
|
};
|
||||||
|
SetNext(next, atomic_int64);
|
||||||
|
}
|
||||||
|
|
||||||
VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR workgroup_layout;
|
VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR workgroup_layout;
|
||||||
if (khr_workgroup_memory_explicit_layout) {
|
if (khr_workgroup_memory_explicit_layout) {
|
||||||
workgroup_layout = {
|
workgroup_layout = {
|
||||||
|
@ -624,9 +644,13 @@ void Device::CheckSuitability(bool requires_swapchain) const {
|
||||||
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT;
|
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT;
|
||||||
demote.pNext = nullptr;
|
demote.pNext = nullptr;
|
||||||
|
|
||||||
|
VkPhysicalDeviceVariablePointerFeaturesKHR variable_pointers{};
|
||||||
|
variable_pointers.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR;
|
||||||
|
variable_pointers.pNext = &demote;
|
||||||
|
|
||||||
VkPhysicalDeviceRobustness2FeaturesEXT robustness2{};
|
VkPhysicalDeviceRobustness2FeaturesEXT robustness2{};
|
||||||
robustness2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT;
|
robustness2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT;
|
||||||
robustness2.pNext = &demote;
|
robustness2.pNext = &variable_pointers;
|
||||||
|
|
||||||
VkPhysicalDeviceFeatures2KHR features2{};
|
VkPhysicalDeviceFeatures2KHR features2{};
|
||||||
features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
|
features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
|
||||||
|
@ -654,6 +678,9 @@ void Device::CheckSuitability(bool requires_swapchain) const {
|
||||||
std::make_pair(features.shaderStorageImageWriteWithoutFormat,
|
std::make_pair(features.shaderStorageImageWriteWithoutFormat,
|
||||||
"shaderStorageImageWriteWithoutFormat"),
|
"shaderStorageImageWriteWithoutFormat"),
|
||||||
std::make_pair(demote.shaderDemoteToHelperInvocation, "shaderDemoteToHelperInvocation"),
|
std::make_pair(demote.shaderDemoteToHelperInvocation, "shaderDemoteToHelperInvocation"),
|
||||||
|
std::make_pair(variable_pointers.variablePointers, "variablePointers"),
|
||||||
|
std::make_pair(variable_pointers.variablePointersStorageBuffer,
|
||||||
|
"variablePointersStorageBuffer"),
|
||||||
std::make_pair(robustness2.robustBufferAccess2, "robustBufferAccess2"),
|
std::make_pair(robustness2.robustBufferAccess2, "robustBufferAccess2"),
|
||||||
std::make_pair(robustness2.robustImageAccess2, "robustImageAccess2"),
|
std::make_pair(robustness2.robustImageAccess2, "robustImageAccess2"),
|
||||||
std::make_pair(robustness2.nullDescriptor, "nullDescriptor"),
|
std::make_pair(robustness2.nullDescriptor, "nullDescriptor"),
|
||||||
|
|
Loading…
Reference in a new issue