2018-08-23 23:30:27 +02:00
|
|
|
// Copyright 2018 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
#include <mutex>
|
|
|
|
#include <thread>
|
2018-12-09 22:33:10 +01:00
|
|
|
#include <boost/functional/hash.hpp>
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "common/assert.h"
|
2018-12-09 22:33:10 +01:00
|
|
|
#include "common/hash.h"
|
2019-04-06 22:59:56 +02:00
|
|
|
#include "common/scope_exit.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "core/core.h"
|
2019-04-06 22:59:56 +02:00
|
|
|
#include "core/frontend/emu_window.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "video_core/engines/maxwell_3d.h"
|
2019-04-06 05:59:54 +02:00
|
|
|
#include "video_core/memory_manager.h"
|
2018-11-08 12:08:00 +01:00
|
|
|
#include "video_core/renderer_opengl/gl_rasterizer.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "video_core/renderer_opengl/gl_shader_cache.h"
|
2018-12-26 05:57:14 +01:00
|
|
|
#include "video_core/renderer_opengl/gl_shader_decompiler.h"
|
2019-01-14 04:58:15 +01:00
|
|
|
#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
|
2018-10-29 01:54:08 +01:00
|
|
|
#include "video_core/renderer_opengl/utils.h"
|
2018-12-21 02:29:15 +01:00
|
|
|
#include "video_core/shader/shader_ir.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
|
|
|
|
namespace OpenGL {
|
|
|
|
|
2018-12-21 02:29:15 +01:00
|
|
|
using VideoCommon::Shader::ProgramCode;
|
|
|
|
|
2019-07-15 03:25:13 +02:00
|
|
|
// One UBO is always reserved for emulation values on staged shaders
|
|
|
|
constexpr u32 STAGE_RESERVED_UBOS = 1;
|
2019-01-14 04:58:15 +01:00
|
|
|
|
|
|
|
struct UnspecializedShader {
|
|
|
|
std::string code;
|
|
|
|
GLShader::ShaderEntries entries;
|
2019-07-15 03:25:13 +02:00
|
|
|
ProgramType program_type;
|
2019-01-14 04:58:15 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2018-08-23 23:30:27 +02:00
|
|
|
/// Gets the address for the specified shader stage program
|
2019-05-30 19:01:40 +02:00
|
|
|
GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) {
|
|
|
|
const auto& gpu{system.GPU().Maxwell3D()};
|
2019-02-24 06:15:35 +01:00
|
|
|
const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
|
|
|
|
return gpu.regs.code_address.CodeAddress() + shader_config.offset;
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets the shader program code from memory for the specified address
|
2019-04-16 16:11:35 +02:00
|
|
|
ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr,
|
|
|
|
const u8* host_ptr) {
|
2018-12-21 02:29:15 +01:00
|
|
|
ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
|
2019-03-27 19:59:00 +01:00
|
|
|
ASSERT_OR_EXECUTE(host_ptr != nullptr, {
|
|
|
|
std::fill(program_code.begin(), program_code.end(), 0);
|
|
|
|
return program_code;
|
|
|
|
});
|
2019-04-16 16:11:35 +02:00
|
|
|
memory_manager.ReadBlockUnsafe(gpu_addr, program_code.data(),
|
|
|
|
program_code.size() * sizeof(u64));
|
2018-08-23 23:30:27 +02:00
|
|
|
return program_code;
|
|
|
|
}
|
|
|
|
|
2019-01-05 05:00:06 +01:00
|
|
|
/// Gets the shader type from a Maxwell program type
|
2019-07-15 03:25:13 +02:00
|
|
|
constexpr GLenum GetShaderType(ProgramType program_type) {
|
2019-01-05 05:00:06 +01:00
|
|
|
switch (program_type) {
|
2019-07-15 03:25:13 +02:00
|
|
|
case ProgramType::VertexA:
|
|
|
|
case ProgramType::VertexB:
|
2019-01-05 05:00:06 +01:00
|
|
|
return GL_VERTEX_SHADER;
|
2019-07-15 03:25:13 +02:00
|
|
|
case ProgramType::Geometry:
|
2019-01-05 05:00:06 +01:00
|
|
|
return GL_GEOMETRY_SHADER;
|
2019-07-15 03:25:13 +02:00
|
|
|
case ProgramType::Fragment:
|
2019-01-05 05:00:06 +01:00
|
|
|
return GL_FRAGMENT_SHADER;
|
2019-07-15 03:25:13 +02:00
|
|
|
case ProgramType::Compute:
|
|
|
|
return GL_COMPUTE_SHADER;
|
2019-01-05 05:00:06 +01:00
|
|
|
default:
|
|
|
|
return GL_NONE;
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-14 04:58:15 +01:00
|
|
|
/// Gets if the current instruction offset is a scheduler instruction
|
|
|
|
constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
|
|
|
|
// Sched instructions appear once every 4 instructions.
|
|
|
|
constexpr std::size_t SchedPeriod = 4;
|
|
|
|
const std::size_t absolute_offset = offset - main_offset;
|
|
|
|
return (absolute_offset % SchedPeriod) == 0;
|
|
|
|
}
|
2018-08-23 23:30:27 +02:00
|
|
|
|
2019-01-14 04:58:15 +01:00
|
|
|
/// Describes primitive behavior on geometry shaders
|
|
|
|
constexpr std::tuple<const char*, const char*, u32> GetPrimitiveDescription(GLenum primitive_mode) {
|
|
|
|
switch (primitive_mode) {
|
|
|
|
case GL_POINTS:
|
|
|
|
return {"points", "Points", 1};
|
|
|
|
case GL_LINES:
|
|
|
|
case GL_LINE_STRIP:
|
|
|
|
return {"lines", "Lines", 2};
|
|
|
|
case GL_LINES_ADJACENCY:
|
|
|
|
case GL_LINE_STRIP_ADJACENCY:
|
|
|
|
return {"lines_adjacency", "LinesAdj", 4};
|
|
|
|
case GL_TRIANGLES:
|
|
|
|
case GL_TRIANGLE_STRIP:
|
|
|
|
case GL_TRIANGLE_FAN:
|
|
|
|
return {"triangles", "Triangles", 3};
|
|
|
|
case GL_TRIANGLES_ADJACENCY:
|
|
|
|
case GL_TRIANGLE_STRIP_ADJACENCY:
|
|
|
|
return {"triangles_adjacency", "TrianglesAdj", 6};
|
|
|
|
default:
|
|
|
|
return {"points", "Invalid", 1};
|
|
|
|
}
|
|
|
|
}
|
2018-08-23 23:30:27 +02:00
|
|
|
|
2019-07-15 07:27:18 +02:00
|
|
|
ProgramType GetProgramType(Maxwell::ShaderProgram program) {
|
2019-07-15 03:25:13 +02:00
|
|
|
switch (program) {
|
|
|
|
case Maxwell::ShaderProgram::VertexA:
|
|
|
|
return ProgramType::VertexA;
|
|
|
|
case Maxwell::ShaderProgram::VertexB:
|
|
|
|
return ProgramType::VertexB;
|
|
|
|
case Maxwell::ShaderProgram::TesselationControl:
|
|
|
|
return ProgramType::TessellationControl;
|
|
|
|
case Maxwell::ShaderProgram::TesselationEval:
|
|
|
|
return ProgramType::TessellationEval;
|
|
|
|
case Maxwell::ShaderProgram::Geometry:
|
|
|
|
return ProgramType::Geometry;
|
|
|
|
case Maxwell::ShaderProgram::Fragment:
|
|
|
|
return ProgramType::Fragment;
|
|
|
|
}
|
|
|
|
UNREACHABLE();
|
2019-07-15 07:27:18 +02:00
|
|
|
return {};
|
2019-07-15 03:25:13 +02:00
|
|
|
}
|
|
|
|
|
2019-01-14 04:58:15 +01:00
|
|
|
/// Calculates the size of a program stream
|
|
|
|
std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) {
|
|
|
|
constexpr std::size_t start_offset = 10;
|
2019-07-05 03:10:59 +02:00
|
|
|
// This is the encoded version of BRA that jumps to itself. All Nvidia
|
|
|
|
// shaders end with one.
|
|
|
|
constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
|
2019-06-21 03:22:20 +02:00
|
|
|
constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
|
2019-01-14 04:58:15 +01:00
|
|
|
std::size_t offset = start_offset;
|
|
|
|
std::size_t size = start_offset * sizeof(u64);
|
|
|
|
while (offset < program.size()) {
|
|
|
|
const u64 instruction = program[offset];
|
|
|
|
if (!IsSchedInstruction(offset, start_offset)) {
|
2019-07-05 03:10:59 +02:00
|
|
|
if ((instruction & mask) == self_jumping_branch) {
|
2019-01-14 04:58:15 +01:00
|
|
|
// End on Maxwell's "nop" instruction
|
|
|
|
break;
|
|
|
|
}
|
2019-06-20 21:02:53 +02:00
|
|
|
if (instruction == 0) {
|
|
|
|
break;
|
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
size += sizeof(u64);
|
|
|
|
offset++;
|
|
|
|
}
|
|
|
|
// The last instruction is included in the program size
|
|
|
|
return std::min(size + sizeof(u64), program.size() * sizeof(u64));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Hashes one (or two) program streams
|
2019-07-15 03:25:13 +02:00
|
|
|
u64 GetUniqueIdentifier(ProgramType program_type, const ProgramCode& code,
|
2019-06-25 13:57:32 +02:00
|
|
|
const ProgramCode& code_b, std::size_t size_a = 0, std::size_t size_b = 0) {
|
|
|
|
if (size_a == 0) {
|
|
|
|
size_a = CalculateProgramSize(code);
|
|
|
|
}
|
|
|
|
u64 unique_identifier = Common::CityHash64(reinterpret_cast<const char*>(code.data()), size_a);
|
2019-07-15 03:25:13 +02:00
|
|
|
if (program_type != ProgramType::VertexA) {
|
2019-01-14 04:58:15 +01:00
|
|
|
return unique_identifier;
|
|
|
|
}
|
|
|
|
// VertexA programs include two programs
|
|
|
|
|
|
|
|
std::size_t seed = 0;
|
|
|
|
boost::hash_combine(seed, unique_identifier);
|
|
|
|
|
2019-06-25 13:57:32 +02:00
|
|
|
if (size_b == 0) {
|
|
|
|
size_b = CalculateProgramSize(code_b);
|
|
|
|
}
|
|
|
|
const u64 identifier_b =
|
|
|
|
Common::CityHash64(reinterpret_cast<const char*>(code_b.data()), size_b);
|
2019-01-14 04:58:15 +01:00
|
|
|
boost::hash_combine(seed, identifier_b);
|
|
|
|
return static_cast<u64>(seed);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates an unspecialized program from code streams
|
2019-07-15 03:25:13 +02:00
|
|
|
GLShader::ProgramResult CreateProgram(const Device& device, ProgramType program_type,
|
2019-04-10 23:03:52 +02:00
|
|
|
ProgramCode program_code, ProgramCode program_code_b) {
|
2019-01-31 20:44:11 +01:00
|
|
|
GLShader::ShaderSetup setup(program_code);
|
2019-06-25 13:57:32 +02:00
|
|
|
setup.program.size_a = CalculateProgramSize(program_code);
|
|
|
|
setup.program.size_b = 0;
|
2019-07-15 03:25:13 +02:00
|
|
|
if (program_type == ProgramType::VertexA) {
|
2018-08-23 23:30:27 +02:00
|
|
|
// VertexB is always enabled, so when VertexA is enabled, we have two vertex shaders.
|
|
|
|
// Conventional HW does not support this, so we combine VertexA and VertexB into one
|
|
|
|
// stage here.
|
2019-01-31 20:44:11 +01:00
|
|
|
setup.SetProgramB(program_code_b);
|
2019-06-25 13:57:32 +02:00
|
|
|
setup.program.size_b = CalculateProgramSize(program_code_b);
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
2019-06-25 13:57:32 +02:00
|
|
|
setup.program.unique_identifier = GetUniqueIdentifier(
|
|
|
|
program_type, program_code, program_code_b, setup.program.size_a, setup.program.size_b);
|
2019-01-14 04:58:15 +01:00
|
|
|
|
|
|
|
switch (program_type) {
|
2019-07-15 03:25:13 +02:00
|
|
|
case ProgramType::VertexA:
|
|
|
|
case ProgramType::VertexB:
|
2019-04-10 23:03:52 +02:00
|
|
|
return GLShader::GenerateVertexShader(device, setup);
|
2019-07-15 03:25:13 +02:00
|
|
|
case ProgramType::Geometry:
|
2019-04-10 23:03:52 +02:00
|
|
|
return GLShader::GenerateGeometryShader(device, setup);
|
2019-07-15 03:25:13 +02:00
|
|
|
case ProgramType::Fragment:
|
2019-04-10 23:03:52 +02:00
|
|
|
return GLShader::GenerateFragmentShader(device, setup);
|
2019-07-15 03:25:13 +02:00
|
|
|
case ProgramType::Compute:
|
|
|
|
return GLShader::GenerateComputeShader(device, setup);
|
2018-08-23 23:30:27 +02:00
|
|
|
default:
|
2019-07-15 03:25:13 +02:00
|
|
|
UNIMPLEMENTED_MSG("Unimplemented program_type={}", static_cast<u32>(program_type));
|
2019-01-14 04:58:15 +01:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEntries& entries,
|
2019-07-15 03:25:13 +02:00
|
|
|
ProgramType program_type, const ProgramVariant& variant,
|
2019-04-29 02:08:31 +02:00
|
|
|
bool hint_retrievable = false) {
|
|
|
|
auto base_bindings{variant.base_bindings};
|
|
|
|
const auto primitive_mode{variant.primitive_mode};
|
|
|
|
const auto texture_buffer_usage{variant.texture_buffer_usage};
|
|
|
|
|
2019-05-27 05:51:35 +02:00
|
|
|
std::string source = "#version 430 core\n"
|
2019-08-10 04:50:21 +02:00
|
|
|
"#extension GL_ARB_separate_shader_objects : enable\n"
|
|
|
|
"#extension GL_NV_gpu_shader5 : enable\n"
|
|
|
|
"#extension GL_NV_shader_thread_group : enable\n";
|
2019-07-08 01:36:42 +02:00
|
|
|
if (entries.shader_viewport_layer_array) {
|
|
|
|
source += "#extension GL_ARB_shader_viewport_layer_array : enable\n";
|
|
|
|
}
|
2019-07-15 03:25:13 +02:00
|
|
|
if (program_type == ProgramType::Compute) {
|
|
|
|
source += "#extension GL_ARB_compute_variable_group_size : require\n";
|
|
|
|
}
|
|
|
|
source += '\n';
|
|
|
|
|
|
|
|
if (program_type != ProgramType::Compute) {
|
|
|
|
source += fmt::format("#define EMULATION_UBO_BINDING {}\n", base_bindings.cbuf++);
|
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
|
|
|
for (const auto& cbuf : entries.const_buffers) {
|
|
|
|
source +=
|
|
|
|
fmt::format("#define CBUF_BINDING_{} {}\n", cbuf.GetIndex(), base_bindings.cbuf++);
|
|
|
|
}
|
|
|
|
for (const auto& gmem : entries.global_memory_entries) {
|
|
|
|
source += fmt::format("#define GMEM_BINDING_{}_{} {}\n", gmem.GetCbufIndex(),
|
|
|
|
gmem.GetCbufOffset(), base_bindings.gmem++);
|
|
|
|
}
|
|
|
|
for (const auto& sampler : entries.samplers) {
|
|
|
|
source += fmt::format("#define SAMPLER_BINDING_{} {}\n", sampler.GetIndex(),
|
|
|
|
base_bindings.sampler++);
|
|
|
|
}
|
2019-04-27 07:37:15 +02:00
|
|
|
for (const auto& image : entries.images) {
|
|
|
|
source +=
|
|
|
|
fmt::format("#define IMAGE_BINDING_{} {}\n", image.GetIndex(), base_bindings.image++);
|
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-04-29 02:08:31 +02:00
|
|
|
// Transform 1D textures to texture samplers by declaring its preprocessor macros.
|
|
|
|
for (std::size_t i = 0; i < texture_buffer_usage.size(); ++i) {
|
|
|
|
if (!texture_buffer_usage.test(i)) {
|
|
|
|
continue;
|
|
|
|
}
|
2019-07-18 06:16:15 +02:00
|
|
|
source += fmt::format("#define SAMPLER_{}_IS_BUFFER\n", i);
|
|
|
|
}
|
|
|
|
if (texture_buffer_usage.any()) {
|
|
|
|
source += '\n';
|
2019-04-29 02:08:31 +02:00
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-07-15 03:25:13 +02:00
|
|
|
if (program_type == ProgramType::Geometry) {
|
2019-01-15 06:42:25 +01:00
|
|
|
const auto [glsl_topology, debug_name, max_vertices] =
|
|
|
|
GetPrimitiveDescription(primitive_mode);
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-07-18 06:16:15 +02:00
|
|
|
source += "layout (" + std::string(glsl_topology) + ") in;\n\n";
|
2019-01-14 04:58:15 +01:00
|
|
|
source += "#define MAX_VERTEX_INPUT " + std::to_string(max_vertices) + '\n';
|
|
|
|
}
|
2019-07-15 03:25:13 +02:00
|
|
|
if (program_type == ProgramType::Compute) {
|
|
|
|
source += "layout (local_size_variable) in;\n";
|
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-07-18 06:16:15 +02:00
|
|
|
source += '\n';
|
2019-01-14 04:58:15 +01:00
|
|
|
source += code;
|
|
|
|
|
|
|
|
OGLShader shader;
|
|
|
|
shader.Create(source.c_str(), GetShaderType(program_type));
|
|
|
|
|
|
|
|
auto program = std::make_shared<OGLProgram>();
|
|
|
|
program->Create(true, hint_retrievable, shader.handle);
|
|
|
|
return program;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::set<GLenum> GetSupportedFormats() {
|
|
|
|
std::set<GLenum> supported_formats;
|
|
|
|
|
|
|
|
GLint num_formats{};
|
|
|
|
glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &num_formats);
|
|
|
|
|
|
|
|
std::vector<GLint> formats(num_formats);
|
|
|
|
glGetIntegerv(GL_PROGRAM_BINARY_FORMATS, formats.data());
|
|
|
|
|
|
|
|
for (const GLint format : formats)
|
|
|
|
supported_formats.insert(static_cast<GLenum>(format));
|
|
|
|
return supported_formats;
|
|
|
|
}
|
|
|
|
|
2019-04-10 23:03:52 +02:00
|
|
|
} // Anonymous namespace
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-07-15 03:25:13 +02:00
|
|
|
CachedShader::CachedShader(const ShaderParameters& params, ProgramType program_type,
|
2019-05-31 02:56:37 +02:00
|
|
|
GLShader::ProgramResult result)
|
|
|
|
: RasterizerCacheObject{params.host_ptr}, host_ptr{params.host_ptr}, cpu_addr{params.cpu_addr},
|
|
|
|
unique_identifier{params.unique_identifier}, program_type{program_type},
|
|
|
|
disk_cache{params.disk_cache}, precompiled_programs{params.precompiled_programs},
|
|
|
|
entries{result.second}, code{std::move(result.first)}, shader_length{entries.shader_length} {}
|
|
|
|
|
|
|
|
Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params,
|
|
|
|
Maxwell::ShaderProgram program_type,
|
|
|
|
ProgramCode&& program_code,
|
|
|
|
ProgramCode&& program_code_b) {
|
|
|
|
const auto code_size{CalculateProgramSize(program_code)};
|
|
|
|
const auto code_size_b{CalculateProgramSize(program_code_b)};
|
2019-07-15 03:25:13 +02:00
|
|
|
auto result{
|
|
|
|
CreateProgram(params.device, GetProgramType(program_type), program_code, program_code_b)};
|
2019-05-31 02:56:37 +02:00
|
|
|
if (result.first.empty()) {
|
2019-01-14 04:58:15 +01:00
|
|
|
// TODO(Rodrigo): Unimplemented shader stages hit here, avoid using these for now
|
2019-05-31 02:56:37 +02:00
|
|
|
return {};
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
|
2019-05-31 02:56:37 +02:00
|
|
|
params.disk_cache.SaveRaw(ShaderDiskCacheRaw(
|
2019-07-15 03:25:13 +02:00
|
|
|
params.unique_identifier, GetProgramType(program_type),
|
|
|
|
static_cast<u32>(code_size / sizeof(u64)), static_cast<u32>(code_size_b / sizeof(u64)),
|
|
|
|
std::move(program_code), std::move(program_code_b)));
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-07-15 03:25:13 +02:00
|
|
|
return std::shared_ptr<CachedShader>(
|
|
|
|
new CachedShader(params, GetProgramType(program_type), std::move(result)));
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
|
2019-05-31 02:56:37 +02:00
|
|
|
Shader CachedShader::CreateStageFromCache(const ShaderParameters& params,
|
|
|
|
Maxwell::ShaderProgram program_type,
|
|
|
|
GLShader::ProgramResult result) {
|
2019-07-15 03:25:13 +02:00
|
|
|
return std::shared_ptr<CachedShader>(
|
|
|
|
new CachedShader(params, GetProgramType(program_type), std::move(result)));
|
|
|
|
}
|
|
|
|
|
2019-07-16 01:33:51 +02:00
|
|
|
Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode&& code) {
|
2019-07-15 03:25:13 +02:00
|
|
|
auto result{CreateProgram(params.device, ProgramType::Compute, code, {})};
|
|
|
|
|
|
|
|
const auto code_size{CalculateProgramSize(code)};
|
|
|
|
params.disk_cache.SaveRaw(ShaderDiskCacheRaw(params.unique_identifier, ProgramType::Compute,
|
|
|
|
static_cast<u32>(code_size / sizeof(u64)), 0,
|
|
|
|
std::move(code), {}));
|
|
|
|
|
|
|
|
return std::shared_ptr<CachedShader>(
|
|
|
|
new CachedShader(params, ProgramType::Compute, std::move(result)));
|
|
|
|
}
|
|
|
|
|
|
|
|
Shader CachedShader::CreateKernelFromCache(const ShaderParameters& params,
|
|
|
|
GLShader::ProgramResult result) {
|
|
|
|
return std::shared_ptr<CachedShader>(
|
|
|
|
new CachedShader(params, ProgramType::Compute, std::move(result)));
|
2019-01-05 05:00:06 +01:00
|
|
|
}
|
2018-08-23 23:30:27 +02:00
|
|
|
|
2019-04-29 02:08:31 +02:00
|
|
|
std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(const ProgramVariant& variant) {
|
2019-01-05 05:00:06 +01:00
|
|
|
GLuint handle{};
|
2019-07-15 03:25:13 +02:00
|
|
|
if (program_type == ProgramType::Geometry) {
|
2019-04-29 02:08:31 +02:00
|
|
|
handle = GetGeometryShader(variant);
|
2018-10-07 04:17:31 +02:00
|
|
|
} else {
|
2019-04-29 02:08:31 +02:00
|
|
|
const auto [entry, is_cache_miss] = programs.try_emplace(variant);
|
2019-01-05 05:00:06 +01:00
|
|
|
auto& program = entry->second;
|
|
|
|
if (is_cache_miss) {
|
2019-04-29 02:08:31 +02:00
|
|
|
program = TryLoadProgram(variant);
|
2019-01-14 04:58:15 +01:00
|
|
|
if (!program) {
|
2019-04-29 02:08:31 +02:00
|
|
|
program = SpecializeShader(code, entries, program_type, variant);
|
|
|
|
disk_cache.SaveUsage(GetUsage(variant));
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
2019-01-05 05:00:06 +01:00
|
|
|
|
2019-02-24 06:15:35 +01:00
|
|
|
LabelGLObject(GL_PROGRAM, program->handle, cpu_addr);
|
2019-01-05 05:00:06 +01:00
|
|
|
}
|
|
|
|
|
2019-01-14 04:58:15 +01:00
|
|
|
handle = program->handle;
|
2018-10-07 04:17:31 +02:00
|
|
|
}
|
2019-01-05 05:00:06 +01:00
|
|
|
|
2019-07-15 03:25:13 +02:00
|
|
|
auto base_bindings = variant.base_bindings;
|
|
|
|
base_bindings.cbuf += static_cast<u32>(entries.const_buffers.size());
|
|
|
|
if (program_type != ProgramType::Compute) {
|
|
|
|
base_bindings.cbuf += STAGE_RESERVED_UBOS;
|
|
|
|
}
|
2019-01-05 05:00:06 +01:00
|
|
|
base_bindings.gmem += static_cast<u32>(entries.global_memory_entries.size());
|
|
|
|
base_bindings.sampler += static_cast<u32>(entries.samplers.size());
|
|
|
|
|
|
|
|
return {handle, base_bindings};
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
|
2019-04-29 02:08:31 +02:00
|
|
|
GLuint CachedShader::GetGeometryShader(const ProgramVariant& variant) {
|
|
|
|
const auto [entry, is_cache_miss] = geometry_programs.try_emplace(variant);
|
2019-01-05 05:00:06 +01:00
|
|
|
auto& programs = entry->second;
|
|
|
|
|
2019-04-29 02:08:31 +02:00
|
|
|
switch (variant.primitive_mode) {
|
2019-01-05 05:00:06 +01:00
|
|
|
case GL_POINTS:
|
2019-04-29 02:08:31 +02:00
|
|
|
return LazyGeometryProgram(programs.points, variant);
|
2019-01-05 05:00:06 +01:00
|
|
|
case GL_LINES:
|
|
|
|
case GL_LINE_STRIP:
|
2019-04-29 02:08:31 +02:00
|
|
|
return LazyGeometryProgram(programs.lines, variant);
|
2019-01-05 05:00:06 +01:00
|
|
|
case GL_LINES_ADJACENCY:
|
|
|
|
case GL_LINE_STRIP_ADJACENCY:
|
2019-04-29 02:08:31 +02:00
|
|
|
return LazyGeometryProgram(programs.lines_adjacency, variant);
|
2019-01-05 05:00:06 +01:00
|
|
|
case GL_TRIANGLES:
|
|
|
|
case GL_TRIANGLE_STRIP:
|
|
|
|
case GL_TRIANGLE_FAN:
|
2019-04-29 02:08:31 +02:00
|
|
|
return LazyGeometryProgram(programs.triangles, variant);
|
2019-01-05 05:00:06 +01:00
|
|
|
case GL_TRIANGLES_ADJACENCY:
|
|
|
|
case GL_TRIANGLE_STRIP_ADJACENCY:
|
2019-04-29 02:08:31 +02:00
|
|
|
return LazyGeometryProgram(programs.triangles_adjacency, variant);
|
2019-01-05 05:00:06 +01:00
|
|
|
default:
|
|
|
|
UNREACHABLE_MSG("Unknown primitive mode.");
|
2019-04-29 02:08:31 +02:00
|
|
|
return LazyGeometryProgram(programs.points, variant);
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-29 02:08:31 +02:00
|
|
|
GLuint CachedShader::LazyGeometryProgram(CachedProgram& target_program,
|
|
|
|
const ProgramVariant& variant) {
|
2019-01-14 04:58:15 +01:00
|
|
|
if (target_program) {
|
|
|
|
return target_program->handle;
|
|
|
|
}
|
2019-04-29 02:08:31 +02:00
|
|
|
const auto [glsl_name, debug_name, vertices] = GetPrimitiveDescription(variant.primitive_mode);
|
|
|
|
target_program = TryLoadProgram(variant);
|
2019-01-14 04:58:15 +01:00
|
|
|
if (!target_program) {
|
2019-04-29 02:08:31 +02:00
|
|
|
target_program = SpecializeShader(code, entries, program_type, variant);
|
|
|
|
disk_cache.SaveUsage(GetUsage(variant));
|
2018-10-07 04:17:31 +02:00
|
|
|
}
|
2018-11-10 06:41:33 +01:00
|
|
|
|
2019-02-24 06:15:35 +01:00
|
|
|
LabelGLObject(GL_PROGRAM, target_program->handle, cpu_addr, debug_name);
|
2019-01-14 04:58:15 +01:00
|
|
|
|
|
|
|
return target_program->handle;
|
2018-10-07 04:17:31 +02:00
|
|
|
};
|
|
|
|
|
2019-04-29 02:08:31 +02:00
|
|
|
CachedProgram CachedShader::TryLoadProgram(const ProgramVariant& variant) const {
|
|
|
|
const auto found = precompiled_programs.find(GetUsage(variant));
|
2019-01-14 04:58:15 +01:00
|
|
|
if (found == precompiled_programs.end()) {
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
return found->second;
|
2018-12-09 22:33:10 +01:00
|
|
|
}
|
|
|
|
|
2019-04-29 02:08:31 +02:00
|
|
|
ShaderDiskCacheUsage CachedShader::GetUsage(const ProgramVariant& variant) const {
|
|
|
|
ShaderDiskCacheUsage usage;
|
|
|
|
usage.unique_identifier = unique_identifier;
|
|
|
|
usage.variant = variant;
|
|
|
|
return usage;
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
|
2019-04-10 23:03:52 +02:00
|
|
|
ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system,
|
2019-04-06 22:59:56 +02:00
|
|
|
Core::Frontend::EmuWindow& emu_window, const Device& device)
|
2019-05-30 19:01:40 +02:00
|
|
|
: RasterizerCache{rasterizer}, system{system}, emu_window{emu_window}, device{device},
|
|
|
|
disk_cache{system} {}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-01-21 20:38:23 +01:00
|
|
|
void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading,
|
|
|
|
const VideoCore::DiskResourceLoadCallback& callback) {
|
2019-01-15 06:17:38 +01:00
|
|
|
const auto transferable = disk_cache.LoadTransferable();
|
|
|
|
if (!transferable) {
|
2019-01-14 04:58:15 +01:00
|
|
|
return;
|
|
|
|
}
|
2019-04-06 22:59:56 +02:00
|
|
|
const auto [raws, shader_usages] = *transferable;
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-01-15 06:17:38 +01:00
|
|
|
auto [decompiled, dumps] = disk_cache.LoadPrecompiled();
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-01-15 06:17:38 +01:00
|
|
|
const auto supported_formats{GetSupportedFormats()};
|
2019-04-06 22:59:56 +02:00
|
|
|
const auto unspecialized_shaders{
|
2019-01-21 20:38:23 +01:00
|
|
|
GenerateUnspecializedShaders(stop_loading, callback, raws, decompiled)};
|
2019-04-06 22:59:56 +02:00
|
|
|
if (stop_loading) {
|
2019-01-21 20:38:23 +01:00
|
|
|
return;
|
2019-04-06 22:59:56 +02:00
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-02-25 11:47:23 +01:00
|
|
|
// Track if precompiled cache was altered during loading to know if we have to serialize the
|
|
|
|
// virtual precompiled cache file back to the hard drive
|
|
|
|
bool precompiled_cache_altered = false;
|
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
// Inform the frontend about shader build initialization
|
|
|
|
if (callback) {
|
|
|
|
callback(VideoCore::LoadCallbackStage::Build, 0, shader_usages.size());
|
|
|
|
}
|
2019-01-21 20:38:23 +01:00
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
std::mutex mutex;
|
|
|
|
std::size_t built_shaders = 0; // It doesn't have be atomic since it's used behind a mutex
|
|
|
|
std::atomic_bool compilation_failed = false;
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
const auto Worker = [&](Core::Frontend::GraphicsContext* context, std::size_t begin,
|
2019-04-26 01:10:20 +02:00
|
|
|
std::size_t end, const std::vector<ShaderDiskCacheUsage>& shader_usages,
|
|
|
|
const ShaderDumpsMap& dumps) {
|
2019-04-06 22:59:56 +02:00
|
|
|
context->MakeCurrent();
|
|
|
|
SCOPE_EXIT({ return context->DoneCurrent(); });
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
for (std::size_t i = begin; i < end; ++i) {
|
|
|
|
if (stop_loading || compilation_failed) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
const auto& usage{shader_usages[i]};
|
|
|
|
LOG_INFO(Render_OpenGL, "Building shader {:016x} (index {} of {})",
|
|
|
|
usage.unique_identifier, i, shader_usages.size());
|
|
|
|
|
|
|
|
const auto& unspecialized{unspecialized_shaders.at(usage.unique_identifier)};
|
|
|
|
const auto dump{dumps.find(usage)};
|
|
|
|
|
|
|
|
CachedProgram shader;
|
|
|
|
if (dump != dumps.end()) {
|
|
|
|
// If the shader is dumped, attempt to load it with
|
|
|
|
shader = GeneratePrecompiledProgram(dump->second, supported_formats);
|
|
|
|
if (!shader) {
|
|
|
|
compilation_failed = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2019-01-15 05:07:57 +01:00
|
|
|
if (!shader) {
|
2019-04-06 22:59:56 +02:00
|
|
|
shader = SpecializeShader(unspecialized.code, unspecialized.entries,
|
2019-04-29 02:08:31 +02:00
|
|
|
unspecialized.program_type, usage.variant, true);
|
2019-01-15 05:07:57 +01:00
|
|
|
}
|
2019-04-06 22:59:56 +02:00
|
|
|
|
|
|
|
std::scoped_lock lock(mutex);
|
|
|
|
if (callback) {
|
|
|
|
callback(VideoCore::LoadCallbackStage::Build, ++built_shaders,
|
|
|
|
shader_usages.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
precompiled_programs.emplace(usage, std::move(shader));
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
2019-04-06 22:59:56 +02:00
|
|
|
};
|
|
|
|
|
2019-04-26 01:10:20 +02:00
|
|
|
const auto num_workers{static_cast<std::size_t>(std::thread::hardware_concurrency() + 1)};
|
2019-04-06 22:59:56 +02:00
|
|
|
const std::size_t bucket_size{shader_usages.size() / num_workers};
|
|
|
|
std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> contexts(num_workers);
|
|
|
|
std::vector<std::thread> threads(num_workers);
|
|
|
|
for (std::size_t i = 0; i < num_workers; ++i) {
|
|
|
|
const bool is_last_worker = i + 1 == num_workers;
|
|
|
|
const std::size_t start{bucket_size * i};
|
|
|
|
const std::size_t end{is_last_worker ? shader_usages.size() : start + bucket_size};
|
|
|
|
|
|
|
|
// On some platforms the shared context has to be created from the GUI thread
|
|
|
|
contexts[i] = emu_window.CreateSharedContext();
|
2019-04-26 01:10:20 +02:00
|
|
|
threads[i] = std::thread(Worker, contexts[i].get(), start, end, shader_usages, dumps);
|
2019-04-06 22:59:56 +02:00
|
|
|
}
|
|
|
|
for (auto& thread : threads) {
|
|
|
|
thread.join();
|
|
|
|
}
|
2019-01-21 20:38:23 +01:00
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
if (compilation_failed) {
|
|
|
|
// Invalidate the precompiled cache if a shader dumped shader was rejected
|
|
|
|
disk_cache.InvalidatePrecompiled();
|
|
|
|
dumps.clear();
|
|
|
|
precompiled_cache_altered = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (stop_loading) {
|
|
|
|
return;
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(Rodrigo): Do state tracking for transferable shaders and do a dummy draw before
|
|
|
|
// precompiling them
|
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
for (std::size_t i = 0; i < shader_usages.size(); ++i) {
|
|
|
|
const auto& usage{shader_usages[i]};
|
2019-01-15 05:07:57 +01:00
|
|
|
if (dumps.find(usage) == dumps.end()) {
|
2019-04-06 22:59:56 +02:00
|
|
|
const auto& program{precompiled_programs.at(usage)};
|
2019-01-15 05:07:57 +01:00
|
|
|
disk_cache.SaveDump(usage, program->handle);
|
2019-02-25 11:47:23 +01:00
|
|
|
precompiled_cache_altered = true;
|
2018-12-09 22:33:10 +01:00
|
|
|
}
|
|
|
|
}
|
2019-02-25 11:47:23 +01:00
|
|
|
|
|
|
|
if (precompiled_cache_altered) {
|
|
|
|
disk_cache.SaveVirtualPrecompiledFile();
|
|
|
|
}
|
2018-12-09 22:33:10 +01:00
|
|
|
}
|
|
|
|
|
2019-01-14 04:58:15 +01:00
|
|
|
CachedProgram ShaderCacheOpenGL::GeneratePrecompiledProgram(
|
2019-01-15 05:07:57 +01:00
|
|
|
const ShaderDiskCacheDump& dump, const std::set<GLenum>& supported_formats) {
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-01-15 05:07:57 +01:00
|
|
|
if (supported_formats.find(dump.binary_format) == supported_formats.end()) {
|
2019-01-14 04:58:15 +01:00
|
|
|
LOG_INFO(Render_OpenGL, "Precompiled cache entry with unsupported format - removing");
|
|
|
|
return {};
|
2018-12-09 22:33:10 +01:00
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
|
|
|
CachedProgram shader = std::make_shared<OGLProgram>();
|
|
|
|
shader->handle = glCreateProgram();
|
2019-01-16 08:37:35 +01:00
|
|
|
glProgramParameteri(shader->handle, GL_PROGRAM_SEPARABLE, GL_TRUE);
|
2019-01-15 05:07:57 +01:00
|
|
|
glProgramBinary(shader->handle, dump.binary_format, dump.binary.data(),
|
|
|
|
static_cast<GLsizei>(dump.binary.size()));
|
2019-01-14 04:58:15 +01:00
|
|
|
|
|
|
|
GLint link_status{};
|
|
|
|
glGetProgramiv(shader->handle, GL_LINK_STATUS, &link_status);
|
|
|
|
if (link_status == GL_FALSE) {
|
|
|
|
LOG_INFO(Render_OpenGL, "Precompiled cache rejected by the driver - removing");
|
2019-01-15 05:07:57 +01:00
|
|
|
return {};
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return shader;
|
2018-12-09 22:33:10 +01:00
|
|
|
}
|
|
|
|
|
2019-02-03 01:14:36 +01:00
|
|
|
std::unordered_map<u64, UnspecializedShader> ShaderCacheOpenGL::GenerateUnspecializedShaders(
|
2019-01-21 20:38:23 +01:00
|
|
|
const std::atomic_bool& stop_loading, const VideoCore::DiskResourceLoadCallback& callback,
|
2019-01-15 05:07:57 +01:00
|
|
|
const std::vector<ShaderDiskCacheRaw>& raws,
|
2019-02-03 01:14:36 +01:00
|
|
|
const std::unordered_map<u64, ShaderDiskCacheDecompiled>& decompiled) {
|
|
|
|
std::unordered_map<u64, UnspecializedShader> unspecialized;
|
2019-01-15 05:07:57 +01:00
|
|
|
|
2019-04-10 23:03:52 +02:00
|
|
|
if (callback) {
|
2019-01-21 20:38:23 +01:00
|
|
|
callback(VideoCore::LoadCallbackStage::Decompile, 0, raws.size());
|
2019-04-10 23:03:52 +02:00
|
|
|
}
|
2019-01-21 20:38:23 +01:00
|
|
|
|
|
|
|
for (std::size_t i = 0; i < raws.size(); ++i) {
|
2019-04-10 23:03:52 +02:00
|
|
|
if (stop_loading) {
|
2019-01-21 20:38:23 +01:00
|
|
|
return {};
|
2019-04-10 23:03:52 +02:00
|
|
|
}
|
2019-01-21 20:38:23 +01:00
|
|
|
const auto& raw{raws[i]};
|
2019-04-10 23:03:52 +02:00
|
|
|
const u64 unique_identifier{raw.GetUniqueIdentifier()};
|
|
|
|
const u64 calculated_hash{
|
|
|
|
GetUniqueIdentifier(raw.GetProgramType(), raw.GetProgramCode(), raw.GetProgramCodeB())};
|
2019-01-15 05:07:57 +01:00
|
|
|
if (unique_identifier != calculated_hash) {
|
2019-01-14 04:58:15 +01:00
|
|
|
LOG_ERROR(
|
|
|
|
Render_OpenGL,
|
|
|
|
"Invalid hash in entry={:016x} (obtained hash={:016x}) - removing shader cache",
|
|
|
|
raw.GetUniqueIdentifier(), calculated_hash);
|
|
|
|
disk_cache.InvalidateTransferable();
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2019-01-15 05:07:57 +01:00
|
|
|
GLShader::ProgramResult result;
|
|
|
|
if (const auto it = decompiled.find(unique_identifier); it != decompiled.end()) {
|
|
|
|
// If it's stored in the precompiled file, avoid decompiling it here
|
|
|
|
const auto& stored_decompiled{it->second};
|
|
|
|
result = {stored_decompiled.code, stored_decompiled.entries};
|
|
|
|
} else {
|
|
|
|
// Otherwise decompile the shader at boot and save the result to the decompiled file
|
2019-04-10 23:03:52 +02:00
|
|
|
result = CreateProgram(device, raw.GetProgramType(), raw.GetProgramCode(),
|
|
|
|
raw.GetProgramCodeB());
|
2019-01-15 05:07:57 +01:00
|
|
|
disk_cache.SaveDecompiled(unique_identifier, result.first, result.second);
|
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-01-15 05:07:57 +01:00
|
|
|
precompiled_shaders.insert({unique_identifier, result});
|
2018-11-08 12:08:00 +01:00
|
|
|
|
2019-01-14 04:58:15 +01:00
|
|
|
unspecialized.insert(
|
|
|
|
{raw.GetUniqueIdentifier(),
|
|
|
|
{std::move(result.first), std::move(result.second), raw.GetProgramType()}});
|
2019-01-21 20:38:23 +01:00
|
|
|
|
2019-04-10 23:03:52 +02:00
|
|
|
if (callback) {
|
2019-01-21 20:38:23 +01:00
|
|
|
callback(VideoCore::LoadCallbackStage::Decompile, i, raws.size());
|
2019-04-10 23:03:52 +02:00
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
return unspecialized;
|
|
|
|
}
|
2019-01-14 02:05:53 +01:00
|
|
|
|
2018-08-23 23:30:27 +02:00
|
|
|
Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
|
2019-07-10 21:38:31 +02:00
|
|
|
if (!system.GPU().Maxwell3D().dirty.shaders) {
|
2019-05-30 18:55:11 +02:00
|
|
|
return last_shaders[static_cast<std::size_t>(program)];
|
2019-01-06 07:58:43 +01:00
|
|
|
}
|
|
|
|
|
2019-05-30 19:01:40 +02:00
|
|
|
auto& memory_manager{system.GPU().MemoryManager()};
|
|
|
|
const GPUVAddr program_addr{GetShaderAddress(system, program)};
|
2018-08-23 23:30:27 +02:00
|
|
|
|
|
|
|
// Look up shader in the cache based on address
|
2019-05-30 18:55:11 +02:00
|
|
|
const auto host_ptr{memory_manager.GetPointer(program_addr)};
|
2019-02-19 02:58:32 +01:00
|
|
|
Shader shader{TryGet(host_ptr)};
|
2019-05-30 18:55:11 +02:00
|
|
|
if (shader) {
|
|
|
|
return last_shaders[static_cast<std::size_t>(program)] = shader;
|
|
|
|
}
|
2018-08-23 23:30:27 +02:00
|
|
|
|
2019-05-30 18:55:11 +02:00
|
|
|
// No shader found - create a new one
|
|
|
|
ProgramCode program_code{GetShaderCode(memory_manager, program_addr, host_ptr)};
|
|
|
|
ProgramCode program_code_b;
|
2019-07-15 03:25:13 +02:00
|
|
|
const bool is_program_a{program == Maxwell::ShaderProgram::VertexA};
|
|
|
|
if (is_program_a) {
|
2019-05-30 19:01:40 +02:00
|
|
|
const GPUVAddr program_addr_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)};
|
2019-05-30 18:55:11 +02:00
|
|
|
program_code_b = GetShaderCode(memory_manager, program_addr_b,
|
|
|
|
memory_manager.GetPointer(program_addr_b));
|
|
|
|
}
|
|
|
|
|
2019-07-15 03:25:13 +02:00
|
|
|
const auto unique_identifier =
|
|
|
|
GetUniqueIdentifier(GetProgramType(program), program_code, program_code_b);
|
2019-05-31 02:56:37 +02:00
|
|
|
const auto cpu_addr{*memory_manager.GpuToCpuAddress(program_addr)};
|
|
|
|
const ShaderParameters params{disk_cache, precompiled_programs, device, cpu_addr,
|
|
|
|
host_ptr, unique_identifier};
|
|
|
|
|
2019-05-30 18:55:11 +02:00
|
|
|
const auto found = precompiled_shaders.find(unique_identifier);
|
2019-05-31 02:56:37 +02:00
|
|
|
if (found == precompiled_shaders.end()) {
|
|
|
|
shader = CachedShader::CreateStageFromMemory(params, program, std::move(program_code),
|
|
|
|
std::move(program_code_b));
|
2019-05-30 18:55:11 +02:00
|
|
|
} else {
|
2019-05-31 02:56:37 +02:00
|
|
|
shader = CachedShader::CreateStageFromCache(params, program, found->second);
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
2019-05-30 18:55:11 +02:00
|
|
|
Register(shader);
|
2018-08-23 23:30:27 +02:00
|
|
|
|
2019-05-30 18:55:11 +02:00
|
|
|
return last_shaders[static_cast<std::size_t>(program)] = shader;
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
|
2019-07-15 03:25:13 +02:00
|
|
|
Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
|
|
|
|
auto& memory_manager{system.GPU().MemoryManager()};
|
|
|
|
const auto host_ptr{memory_manager.GetPointer(code_addr)};
|
|
|
|
auto kernel = TryGet(host_ptr);
|
|
|
|
if (kernel) {
|
|
|
|
return kernel;
|
|
|
|
}
|
|
|
|
|
|
|
|
// No kernel found - create a new one
|
2019-07-15 21:29:25 +02:00
|
|
|
auto code{GetShaderCode(memory_manager, code_addr, host_ptr)};
|
2019-07-15 03:25:13 +02:00
|
|
|
const auto unique_identifier{GetUniqueIdentifier(ProgramType::Compute, code, {})};
|
|
|
|
const auto cpu_addr{*memory_manager.GpuToCpuAddress(code_addr)};
|
|
|
|
const ShaderParameters params{disk_cache, precompiled_programs, device, cpu_addr,
|
|
|
|
host_ptr, unique_identifier};
|
|
|
|
|
|
|
|
const auto found = precompiled_shaders.find(unique_identifier);
|
|
|
|
if (found == precompiled_shaders.end()) {
|
|
|
|
kernel = CachedShader::CreateKernelFromMemory(params, std::move(code));
|
|
|
|
} else {
|
|
|
|
kernel = CachedShader::CreateKernelFromCache(params, found->second);
|
|
|
|
}
|
|
|
|
|
|
|
|
Register(kernel);
|
|
|
|
return kernel;
|
|
|
|
}
|
|
|
|
|
2019-04-23 23:19:28 +02:00
|
|
|
} // namespace OpenGL
|