2018-12-20 23:09:21 +01:00
|
|
|
// Copyright 2018 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#include <cstring>
|
2020-01-08 17:13:05 +01:00
|
|
|
#include <limits>
|
2018-12-20 23:09:21 +01:00
|
|
|
#include <set>
|
|
|
|
|
|
|
|
#include <fmt/format.h>
|
|
|
|
|
2018-12-21 07:18:54 +01:00
|
|
|
#include "common/assert.h"
|
2018-12-20 23:09:21 +01:00
|
|
|
#include "common/common_types.h"
|
|
|
|
#include "video_core/engines/shader_bytecode.h"
|
|
|
|
#include "video_core/engines/shader_header.h"
|
2019-06-25 01:46:49 +02:00
|
|
|
#include "video_core/shader/control_flow.h"
|
2019-06-05 03:44:06 +02:00
|
|
|
#include "video_core/shader/node_helper.h"
|
2018-12-20 23:09:21 +01:00
|
|
|
#include "video_core/shader/shader_ir.h"
|
|
|
|
|
|
|
|
namespace VideoCommon::Shader {
|
|
|
|
|
|
|
|
using Tegra::Shader::Instruction;
|
|
|
|
using Tegra::Shader::OpCode;
|
|
|
|
|
2018-12-21 07:39:46 +01:00
|
|
|
namespace {
|
|
|
|
|
2018-12-20 23:09:21 +01:00
|
|
|
/**
|
|
|
|
* Returns whether the instruction at the specified offset is a 'sched' instruction.
|
|
|
|
* Sched instructions always appear before a sequence of 3 instructions.
|
|
|
|
*/
|
|
|
|
constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
|
|
|
|
constexpr u32 SchedPeriod = 4;
|
|
|
|
u32 absolute_offset = offset - main_offset;
|
|
|
|
|
|
|
|
return (absolute_offset % SchedPeriod) == 0;
|
|
|
|
}
|
|
|
|
|
2020-01-08 16:46:36 +01:00
|
|
|
void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile* gpu_driver,
|
2020-01-24 15:44:34 +01:00
|
|
|
const std::list<Sampler>& used_samplers) {
|
2020-01-08 16:46:36 +01:00
|
|
|
if (gpu_driver == nullptr) {
|
2020-01-24 15:44:34 +01:00
|
|
|
LOG_CRITICAL(HW_GPU, "GPU driver profile has not been created yet");
|
2020-01-08 16:46:36 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (gpu_driver->TextureHandlerSizeKnown() || used_samplers.size() <= 1) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
u32 count{};
|
|
|
|
std::vector<u32> bound_offsets;
|
|
|
|
for (const auto& sampler : used_samplers) {
|
|
|
|
if (sampler.IsBindless()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
++count;
|
|
|
|
bound_offsets.emplace_back(sampler.GetOffset());
|
|
|
|
}
|
|
|
|
if (count > 1) {
|
|
|
|
gpu_driver->DeduceTextureHandlerSize(std::move(bound_offsets));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-24 15:44:34 +01:00
|
|
|
std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce,
|
2020-01-05 23:36:21 +01:00
|
|
|
VideoCore::GuestDriverProfile* gpu_driver,
|
2020-01-24 15:44:34 +01:00
|
|
|
const std::list<Sampler>& used_samplers) {
|
2020-01-05 23:36:21 +01:00
|
|
|
if (gpu_driver == nullptr) {
|
|
|
|
LOG_CRITICAL(HW_GPU, "GPU Driver profile has not been created yet");
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
const u32 base_offset = sampler_to_deduce.GetOffset();
|
2020-01-08 17:13:05 +01:00
|
|
|
u32 max_offset{std::numeric_limits<u32>::max()};
|
2020-01-05 23:36:21 +01:00
|
|
|
for (const auto& sampler : used_samplers) {
|
|
|
|
if (sampler.IsBindless()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (sampler.GetOffset() > base_offset) {
|
|
|
|
max_offset = std::min(sampler.GetOffset(), max_offset);
|
|
|
|
}
|
|
|
|
}
|
2020-01-08 17:13:05 +01:00
|
|
|
if (max_offset == std::numeric_limits<u32>::max()) {
|
2020-01-05 23:36:21 +01:00
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
return ((max_offset - base_offset) * 4) / gpu_driver->GetTextureHandlerSize();
|
|
|
|
}
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
} // Anonymous namespace
|
2018-12-21 07:39:46 +01:00
|
|
|
|
2019-06-29 07:44:07 +02:00
|
|
|
class ASTDecoder {
|
|
|
|
public:
|
|
|
|
ASTDecoder(ShaderIR& ir) : ir(ir) {}
|
|
|
|
|
|
|
|
void operator()(ASTProgram& ast) {
|
|
|
|
ASTNode current = ast.nodes.GetFirst();
|
|
|
|
while (current) {
|
|
|
|
Visit(current);
|
|
|
|
current = current->GetNext();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void operator()(ASTIfThen& ast) {
|
|
|
|
ASTNode current = ast.nodes.GetFirst();
|
|
|
|
while (current) {
|
|
|
|
Visit(current);
|
|
|
|
current = current->GetNext();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void operator()(ASTIfElse& ast) {
|
|
|
|
ASTNode current = ast.nodes.GetFirst();
|
|
|
|
while (current) {
|
|
|
|
Visit(current);
|
|
|
|
current = current->GetNext();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void operator()(ASTBlockEncoded& ast) {}
|
|
|
|
|
|
|
|
void operator()(ASTBlockDecoded& ast) {}
|
|
|
|
|
|
|
|
void operator()(ASTVarSet& ast) {}
|
|
|
|
|
|
|
|
void operator()(ASTLabel& ast) {}
|
|
|
|
|
|
|
|
void operator()(ASTGoto& ast) {}
|
|
|
|
|
|
|
|
void operator()(ASTDoWhile& ast) {
|
|
|
|
ASTNode current = ast.nodes.GetFirst();
|
|
|
|
while (current) {
|
|
|
|
Visit(current);
|
|
|
|
current = current->GetNext();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void operator()(ASTReturn& ast) {}
|
|
|
|
|
|
|
|
void operator()(ASTBreak& ast) {}
|
|
|
|
|
|
|
|
void Visit(ASTNode& node) {
|
|
|
|
std::visit(*this, *node->GetInnerData());
|
|
|
|
if (node->IsBlockEncoded()) {
|
|
|
|
auto block = std::get_if<ASTBlockEncoded>(node->GetInnerData());
|
|
|
|
NodeBlock bb = ir.DecodeRange(block->start, block->end);
|
2019-10-04 23:23:16 +02:00
|
|
|
node->TransformBlockEncoded(std::move(bb));
|
2019-06-29 07:44:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
ShaderIR& ir;
|
|
|
|
};
|
|
|
|
|
2018-12-20 23:09:21 +01:00
|
|
|
void ShaderIR::Decode() {
|
|
|
|
std::memcpy(&header, program_code.data(), sizeof(Tegra::Shader::Header));
|
|
|
|
|
2019-06-29 07:44:07 +02:00
|
|
|
decompiled = false;
|
2019-09-25 04:34:18 +02:00
|
|
|
auto info = ScanFlow(program_code, main_offset, settings, locker);
|
2019-08-16 22:25:02 +02:00
|
|
|
auto& shader_info = *info;
|
|
|
|
coverage_begin = shader_info.start;
|
|
|
|
coverage_end = shader_info.end;
|
|
|
|
switch (shader_info.settings.depth) {
|
|
|
|
case CompileDepth::FlowStack: {
|
2019-06-25 01:46:49 +02:00
|
|
|
for (const auto& block : shader_info.blocks) {
|
|
|
|
basic_blocks.insert({block.start, DecodeRange(block.start, block.end + 1)});
|
|
|
|
}
|
2019-08-16 22:25:02 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case CompileDepth::NoFlowStack: {
|
|
|
|
disable_flow_stack = true;
|
|
|
|
const auto insert_block = [this](NodeBlock& nodes, u32 label) {
|
|
|
|
if (label == static_cast<u32>(exit_branch)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
basic_blocks.insert({label, nodes});
|
|
|
|
};
|
|
|
|
const auto& blocks = shader_info.blocks;
|
|
|
|
NodeBlock current_block;
|
|
|
|
u32 current_label = static_cast<u32>(exit_branch);
|
|
|
|
for (auto& block : blocks) {
|
|
|
|
if (shader_info.labels.count(block.start) != 0) {
|
|
|
|
insert_block(current_block, current_label);
|
|
|
|
current_block.clear();
|
|
|
|
current_label = block.start;
|
|
|
|
}
|
|
|
|
if (!block.ignore_branch) {
|
|
|
|
DecodeRangeInner(current_block, block.start, block.end);
|
|
|
|
InsertControlFlow(current_block, block);
|
|
|
|
} else {
|
|
|
|
DecodeRangeInner(current_block, block.start, block.end + 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
insert_block(current_block, current_label);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case CompileDepth::DecompileBackwards:
|
|
|
|
case CompileDepth::FullDecompile: {
|
|
|
|
program_manager = std::move(shader_info.manager);
|
|
|
|
disable_flow_stack = true;
|
|
|
|
decompiled = true;
|
|
|
|
ASTDecoder decoder{*this};
|
|
|
|
ASTNode program = GetASTProgram();
|
|
|
|
decoder.Visit(program);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
LOG_CRITICAL(HW_GPU, "Unknown decompilation mode!");
|
|
|
|
[[fallthrough]];
|
|
|
|
case CompileDepth::BruteForce: {
|
2019-11-08 21:08:07 +01:00
|
|
|
const auto shader_end = static_cast<u32>(program_code.size());
|
2019-08-16 22:25:02 +02:00
|
|
|
coverage_begin = main_offset;
|
|
|
|
coverage_end = shader_end;
|
2019-11-08 21:08:07 +01:00
|
|
|
for (u32 label = main_offset; label < shader_end; ++label) {
|
2019-08-16 22:25:02 +02:00
|
|
|
basic_blocks.insert({label, DecodeRange(label, label + 1)});
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2018-12-20 23:09:21 +01:00
|
|
|
}
|
2019-08-16 22:25:02 +02:00
|
|
|
if (settings.depth != shader_info.settings.depth) {
|
|
|
|
LOG_WARNING(
|
|
|
|
HW_GPU, "Decompiling to this setting \"{}\" failed, downgrading to this setting \"{}\"",
|
|
|
|
CompileDepthAsString(settings.depth), CompileDepthAsString(shader_info.settings.depth));
|
2018-12-20 23:09:21 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-30 06:09:40 +01:00
|
|
|
NodeBlock ShaderIR::DecodeRange(u32 begin, u32 end) {
|
|
|
|
NodeBlock basic_block;
|
2019-06-25 19:03:51 +02:00
|
|
|
DecodeRangeInner(basic_block, begin, end);
|
|
|
|
return basic_block;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ShaderIR::DecodeRangeInner(NodeBlock& bb, u32 begin, u32 end) {
|
2018-12-20 23:09:21 +01:00
|
|
|
for (u32 pc = begin; pc < (begin > end ? MAX_PROGRAM_LENGTH : end);) {
|
2019-06-25 19:03:51 +02:00
|
|
|
pc = DecodeInstr(bb, pc);
|
2018-12-20 23:09:21 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-25 17:10:45 +02:00
|
|
|
void ShaderIR::InsertControlFlow(NodeBlock& bb, const ShaderBlock& block) {
|
2019-07-16 16:37:11 +02:00
|
|
|
const auto apply_conditions = [&](const Condition& cond, Node n) -> Node {
|
2019-06-25 17:10:45 +02:00
|
|
|
Node result = n;
|
|
|
|
if (cond.cc != ConditionCode::T) {
|
|
|
|
result = Conditional(GetConditionCode(cond.cc), {result});
|
|
|
|
}
|
|
|
|
if (cond.predicate != Pred::UnusedIndex) {
|
|
|
|
u32 pred = static_cast<u32>(cond.predicate);
|
2019-06-26 02:15:40 +02:00
|
|
|
const bool is_neg = pred > 7;
|
|
|
|
if (is_neg) {
|
2019-06-25 17:10:45 +02:00
|
|
|
pred -= 8;
|
2019-06-26 02:15:40 +02:00
|
|
|
}
|
2019-06-25 17:10:45 +02:00
|
|
|
result = Conditional(GetPredicate(pred, is_neg), {result});
|
|
|
|
}
|
|
|
|
return result;
|
2019-07-16 16:37:11 +02:00
|
|
|
};
|
2019-09-24 04:55:25 +02:00
|
|
|
if (std::holds_alternative<SingleBranch>(*block.branch)) {
|
|
|
|
auto branch = std::get_if<SingleBranch>(block.branch.get());
|
|
|
|
if (branch->address < 0) {
|
|
|
|
if (branch->kill) {
|
|
|
|
Node n = Operation(OperationCode::Discard);
|
|
|
|
n = apply_conditions(branch->condition, n);
|
|
|
|
bb.push_back(n);
|
|
|
|
global_code.push_back(n);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Node n = Operation(OperationCode::Exit);
|
|
|
|
n = apply_conditions(branch->condition, n);
|
2019-06-25 17:10:45 +02:00
|
|
|
bb.push_back(n);
|
|
|
|
global_code.push_back(n);
|
|
|
|
return;
|
|
|
|
}
|
2019-09-24 04:55:25 +02:00
|
|
|
Node n = Operation(OperationCode::Branch, Immediate(branch->address));
|
|
|
|
n = apply_conditions(branch->condition, n);
|
2019-06-25 17:10:45 +02:00
|
|
|
bb.push_back(n);
|
|
|
|
global_code.push_back(n);
|
|
|
|
return;
|
|
|
|
}
|
2019-09-24 04:55:25 +02:00
|
|
|
auto multi_branch = std::get_if<MultiBranch>(block.branch.get());
|
|
|
|
Node op_a = GetRegister(multi_branch->gpr);
|
|
|
|
for (auto& branch_case : multi_branch->branches) {
|
|
|
|
Node n = Operation(OperationCode::Branch, Immediate(branch_case.address));
|
|
|
|
Node op_b = Immediate(branch_case.cmp_value);
|
2019-09-25 04:34:18 +02:00
|
|
|
Node condition =
|
|
|
|
GetPredicateComparisonInteger(Tegra::Shader::PredCondition::Equal, false, op_a, op_b);
|
2019-09-24 04:55:25 +02:00
|
|
|
auto result = Conditional(condition, {n});
|
|
|
|
bb.push_back(result);
|
|
|
|
global_code.push_back(result);
|
|
|
|
}
|
2019-06-25 17:10:45 +02:00
|
|
|
}
|
|
|
|
|
2019-01-30 06:09:40 +01:00
|
|
|
u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) {
|
2018-12-20 23:09:21 +01:00
|
|
|
// Ignore sched instructions when generating code.
|
|
|
|
if (IsSchedInstruction(pc, main_offset)) {
|
|
|
|
return pc + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Instruction instr = {program_code[pc]};
|
|
|
|
const auto opcode = OpCode::Decode(instr);
|
2019-07-05 20:13:14 +02:00
|
|
|
const u32 nv_address = ConvertAddressToNvidiaSpace(pc);
|
2018-12-20 23:09:21 +01:00
|
|
|
|
|
|
|
// Decoding failure
|
|
|
|
if (!opcode) {
|
|
|
|
UNIMPLEMENTED_MSG("Unhandled instruction: {0:x}", instr.value);
|
2019-07-05 20:13:14 +02:00
|
|
|
bb.push_back(Comment(fmt::format("{:05x} Unimplemented Shader instruction (0x{:016x})",
|
|
|
|
nv_address, instr.value)));
|
2018-12-20 23:09:21 +01:00
|
|
|
return pc + 1;
|
|
|
|
}
|
|
|
|
|
2019-07-05 20:13:14 +02:00
|
|
|
bb.push_back(Comment(
|
|
|
|
fmt::format("{:05x} {} (0x{:016x})", nv_address, opcode->get().GetName(), instr.value)));
|
2018-12-20 23:09:21 +01:00
|
|
|
|
|
|
|
using Tegra::Shader::Pred;
|
|
|
|
UNIMPLEMENTED_IF_MSG(instr.pred.full_pred == Pred::NeverExecute,
|
|
|
|
"NeverExecute predicate not implemented");
|
|
|
|
|
2019-01-30 06:09:40 +01:00
|
|
|
static const std::map<OpCode::Type, u32 (ShaderIR::*)(NodeBlock&, u32)> decoders = {
|
|
|
|
{OpCode::Type::Arithmetic, &ShaderIR::DecodeArithmetic},
|
|
|
|
{OpCode::Type::ArithmeticImmediate, &ShaderIR::DecodeArithmeticImmediate},
|
|
|
|
{OpCode::Type::Bfe, &ShaderIR::DecodeBfe},
|
|
|
|
{OpCode::Type::Bfi, &ShaderIR::DecodeBfi},
|
|
|
|
{OpCode::Type::Shift, &ShaderIR::DecodeShift},
|
|
|
|
{OpCode::Type::ArithmeticInteger, &ShaderIR::DecodeArithmeticInteger},
|
|
|
|
{OpCode::Type::ArithmeticIntegerImmediate, &ShaderIR::DecodeArithmeticIntegerImmediate},
|
|
|
|
{OpCode::Type::ArithmeticHalf, &ShaderIR::DecodeArithmeticHalf},
|
|
|
|
{OpCode::Type::ArithmeticHalfImmediate, &ShaderIR::DecodeArithmeticHalfImmediate},
|
|
|
|
{OpCode::Type::Ffma, &ShaderIR::DecodeFfma},
|
|
|
|
{OpCode::Type::Hfma2, &ShaderIR::DecodeHfma2},
|
|
|
|
{OpCode::Type::Conversion, &ShaderIR::DecodeConversion},
|
2019-08-10 04:50:21 +02:00
|
|
|
{OpCode::Type::Warp, &ShaderIR::DecodeWarp},
|
2019-01-30 06:09:40 +01:00
|
|
|
{OpCode::Type::Memory, &ShaderIR::DecodeMemory},
|
2019-02-22 06:19:45 +01:00
|
|
|
{OpCode::Type::Texture, &ShaderIR::DecodeTexture},
|
2019-04-27 07:07:18 +02:00
|
|
|
{OpCode::Type::Image, &ShaderIR::DecodeImage},
|
2019-01-30 06:09:40 +01:00
|
|
|
{OpCode::Type::FloatSetPredicate, &ShaderIR::DecodeFloatSetPredicate},
|
|
|
|
{OpCode::Type::IntegerSetPredicate, &ShaderIR::DecodeIntegerSetPredicate},
|
|
|
|
{OpCode::Type::HalfSetPredicate, &ShaderIR::DecodeHalfSetPredicate},
|
|
|
|
{OpCode::Type::PredicateSetRegister, &ShaderIR::DecodePredicateSetRegister},
|
|
|
|
{OpCode::Type::PredicateSetPredicate, &ShaderIR::DecodePredicateSetPredicate},
|
|
|
|
{OpCode::Type::RegisterSetPredicate, &ShaderIR::DecodeRegisterSetPredicate},
|
|
|
|
{OpCode::Type::FloatSet, &ShaderIR::DecodeFloatSet},
|
|
|
|
{OpCode::Type::IntegerSet, &ShaderIR::DecodeIntegerSet},
|
|
|
|
{OpCode::Type::HalfSet, &ShaderIR::DecodeHalfSet},
|
|
|
|
{OpCode::Type::Video, &ShaderIR::DecodeVideo},
|
|
|
|
{OpCode::Type::Xmad, &ShaderIR::DecodeXmad},
|
|
|
|
};
|
2018-12-29 00:00:36 +01:00
|
|
|
|
|
|
|
std::vector<Node> tmp_block;
|
2018-12-20 23:09:21 +01:00
|
|
|
if (const auto decoder = decoders.find(opcode->get().GetType()); decoder != decoders.end()) {
|
2019-01-30 05:56:33 +01:00
|
|
|
pc = (this->*decoder->second)(tmp_block, pc);
|
2018-12-20 23:09:21 +01:00
|
|
|
} else {
|
2019-01-30 05:56:33 +01:00
|
|
|
pc = DecodeOther(tmp_block, pc);
|
2018-12-20 23:09:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Some instructions (like SSY) don't have a predicate field, they are always unconditionally
|
|
|
|
// executed.
|
|
|
|
const bool can_be_predicated = OpCode::IsPredicatedInstruction(opcode->get().GetId());
|
|
|
|
const auto pred_index = static_cast<u32>(instr.pred.pred_index);
|
|
|
|
|
|
|
|
if (can_be_predicated && pred_index != static_cast<u32>(Pred::UnusedIndex)) {
|
2019-01-30 05:56:33 +01:00
|
|
|
const Node conditional =
|
|
|
|
Conditional(GetPredicate(pred_index, instr.negate_pred != 0), std::move(tmp_block));
|
|
|
|
global_code.push_back(conditional);
|
|
|
|
bb.push_back(conditional);
|
2018-12-20 23:09:21 +01:00
|
|
|
} else {
|
2018-12-29 00:00:36 +01:00
|
|
|
for (auto& node : tmp_block) {
|
2019-01-30 05:56:33 +01:00
|
|
|
global_code.push_back(node);
|
|
|
|
bb.push_back(node);
|
2018-12-20 23:09:21 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pc + 1;
|
|
|
|
}
|
|
|
|
|
2020-01-03 21:16:29 +01:00
|
|
|
void ShaderIR::PostDecode() {
|
|
|
|
// Deduce texture handler size if needed
|
2020-01-08 16:46:36 +01:00
|
|
|
auto gpu_driver = locker.AccessGuestDriverProfile();
|
2020-01-05 17:08:39 +01:00
|
|
|
DeduceTextureHandlerSize(gpu_driver, used_samplers);
|
2020-01-05 23:36:21 +01:00
|
|
|
// Deduce Indexed Samplers
|
2020-01-24 15:44:34 +01:00
|
|
|
if (!uses_indexed_samplers) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
for (auto& sampler : used_samplers) {
|
|
|
|
if (!sampler.IsIndexed()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (const auto size = TryDeduceSamplerSize(sampler, gpu_driver, used_samplers)) {
|
|
|
|
sampler.SetSize(*size);
|
|
|
|
} else {
|
|
|
|
LOG_CRITICAL(HW_GPU, "Failed to deduce size of indexed sampler");
|
|
|
|
sampler.SetSize(1);
|
2020-01-05 23:36:21 +01:00
|
|
|
}
|
|
|
|
}
|
2020-01-03 21:16:29 +01:00
|
|
|
}
|
|
|
|
|
2019-04-03 09:33:36 +02:00
|
|
|
} // namespace VideoCommon::Shader
|