2018-08-23 23:30:27 +02:00
|
|
|
// Copyright 2018 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
#include <atomic>
|
|
|
|
#include <functional>
|
2019-04-06 22:59:56 +02:00
|
|
|
#include <mutex>
|
2019-09-25 04:34:18 +02:00
|
|
|
#include <optional>
|
|
|
|
#include <string>
|
2019-04-06 22:59:56 +02:00
|
|
|
#include <thread>
|
2019-09-25 04:34:18 +02:00
|
|
|
#include <unordered_set>
|
2020-02-26 20:13:47 +01:00
|
|
|
|
2018-12-09 22:33:10 +01:00
|
|
|
#include <boost/functional/hash.hpp>
|
2020-02-26 20:13:47 +01:00
|
|
|
|
2019-11-13 03:39:45 +01:00
|
|
|
#include "common/alignment.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "common/assert.h"
|
2019-11-13 03:39:45 +01:00
|
|
|
#include "common/logging/log.h"
|
2019-04-06 22:59:56 +02:00
|
|
|
#include "common/scope_exit.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "core/core.h"
|
2019-04-06 22:59:56 +02:00
|
|
|
#include "core/frontend/emu_window.h"
|
2019-09-23 21:40:58 +02:00
|
|
|
#include "video_core/engines/kepler_compute.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "video_core/engines/maxwell_3d.h"
|
2019-11-18 22:35:21 +01:00
|
|
|
#include "video_core/engines/shader_type.h"
|
2019-04-06 05:59:54 +02:00
|
|
|
#include "video_core/memory_manager.h"
|
2018-11-08 12:08:00 +01:00
|
|
|
#include "video_core/renderer_opengl/gl_rasterizer.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "video_core/renderer_opengl/gl_shader_cache.h"
|
2018-12-26 05:57:14 +01:00
|
|
|
#include "video_core/renderer_opengl/gl_shader_decompiler.h"
|
2019-01-14 04:58:15 +01:00
|
|
|
#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
|
2019-12-29 06:03:05 +01:00
|
|
|
#include "video_core/renderer_opengl/gl_state_tracker.h"
|
2018-10-29 01:54:08 +01:00
|
|
|
#include "video_core/renderer_opengl/utils.h"
|
2018-12-21 02:29:15 +01:00
|
|
|
#include "video_core/shader/shader_ir.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
|
|
|
|
namespace OpenGL {
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
using Tegra::Engines::ShaderType;
|
|
|
|
using VideoCommon::Shader::ConstBufferLocker;
|
2018-12-21 02:29:15 +01:00
|
|
|
using VideoCommon::Shader::ProgramCode;
|
2019-09-25 04:34:18 +02:00
|
|
|
using VideoCommon::Shader::ShaderIR;
|
|
|
|
|
|
|
|
namespace {
|
2018-12-21 02:29:15 +01:00
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
constexpr u32 STAGE_MAIN_OFFSET = 10;
|
|
|
|
constexpr u32 KERNEL_MAIN_OFFSET = 0;
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{};
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2018-08-23 23:30:27 +02:00
|
|
|
/// Gets the address for the specified shader stage program
|
2019-05-30 19:01:40 +02:00
|
|
|
GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) {
|
|
|
|
const auto& gpu{system.GPU().Maxwell3D()};
|
2019-02-24 06:15:35 +01:00
|
|
|
const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
|
|
|
|
return gpu.regs.code_address.CodeAddress() + shader_config.offset;
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
/// Gets if the current instruction offset is a scheduler instruction
|
|
|
|
constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
|
|
|
|
// Sched instructions appear once every 4 instructions.
|
|
|
|
constexpr std::size_t SchedPeriod = 4;
|
|
|
|
const std::size_t absolute_offset = offset - main_offset;
|
|
|
|
return (absolute_offset % SchedPeriod) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Calculates the size of a program stream
|
2020-02-26 20:13:47 +01:00
|
|
|
std::size_t CalculateProgramSize(const ProgramCode& program) {
|
2019-09-25 04:34:18 +02:00
|
|
|
constexpr std::size_t start_offset = 10;
|
|
|
|
// This is the encoded version of BRA that jumps to itself. All Nvidia
|
|
|
|
// shaders end with one.
|
|
|
|
constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
|
|
|
|
constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
|
|
|
|
std::size_t offset = start_offset;
|
|
|
|
while (offset < program.size()) {
|
|
|
|
const u64 instruction = program[offset];
|
|
|
|
if (!IsSchedInstruction(offset, start_offset)) {
|
|
|
|
if ((instruction & mask) == self_jumping_branch) {
|
|
|
|
// End on Maxwell's "nop" instruction
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (instruction == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
offset++;
|
|
|
|
}
|
|
|
|
// The last instruction is included in the program size
|
|
|
|
return std::min(offset + 1, program.size());
|
|
|
|
}
|
|
|
|
|
2018-08-23 23:30:27 +02:00
|
|
|
/// Gets the shader program code from memory for the specified address
|
2019-04-16 16:11:35 +02:00
|
|
|
ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr,
|
|
|
|
const u8* host_ptr) {
|
2019-11-18 22:35:21 +01:00
|
|
|
ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
|
2019-03-27 19:59:00 +01:00
|
|
|
ASSERT_OR_EXECUTE(host_ptr != nullptr, {
|
2019-11-18 22:35:21 +01:00
|
|
|
std::fill(code.begin(), code.end(), 0);
|
|
|
|
return code;
|
2019-03-27 19:59:00 +01:00
|
|
|
});
|
2019-11-18 22:35:21 +01:00
|
|
|
memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64));
|
|
|
|
code.resize(CalculateProgramSize(code));
|
|
|
|
return code;
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
|
2019-01-05 05:00:06 +01:00
|
|
|
/// Gets the shader type from a Maxwell program type
|
2019-11-18 22:35:21 +01:00
|
|
|
constexpr GLenum GetGLShaderType(ShaderType shader_type) {
|
|
|
|
switch (shader_type) {
|
|
|
|
case ShaderType::Vertex:
|
2019-01-05 05:00:06 +01:00
|
|
|
return GL_VERTEX_SHADER;
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::Geometry:
|
2019-01-05 05:00:06 +01:00
|
|
|
return GL_GEOMETRY_SHADER;
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::Fragment:
|
2019-01-05 05:00:06 +01:00
|
|
|
return GL_FRAGMENT_SHADER;
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::Compute:
|
2019-07-15 03:25:13 +02:00
|
|
|
return GL_COMPUTE_SHADER;
|
2019-01-05 05:00:06 +01:00
|
|
|
default:
|
|
|
|
return GL_NONE;
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-14 04:58:15 +01:00
|
|
|
/// Hashes one (or two) program streams
|
2019-11-18 22:35:21 +01:00
|
|
|
u64 GetUniqueIdentifier(ShaderType shader_type, bool is_a, const ProgramCode& code,
|
2020-02-26 20:13:47 +01:00
|
|
|
const ProgramCode& code_b = {}) {
|
2019-09-25 04:34:18 +02:00
|
|
|
u64 unique_identifier = boost::hash_value(code);
|
2019-11-18 22:35:21 +01:00
|
|
|
if (is_a) {
|
2019-09-25 04:34:18 +02:00
|
|
|
// VertexA programs include two programs
|
|
|
|
boost::hash_combine(unique_identifier, boost::hash_value(code_b));
|
2019-06-25 13:57:32 +02:00
|
|
|
}
|
2019-09-25 04:34:18 +02:00
|
|
|
return unique_identifier;
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
|
2019-11-18 22:35:21 +01:00
|
|
|
constexpr const char* GetShaderTypeName(ShaderType shader_type) {
|
|
|
|
switch (shader_type) {
|
|
|
|
case ShaderType::Vertex:
|
2019-09-25 04:34:18 +02:00
|
|
|
return "VS";
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::TesselationControl:
|
|
|
|
return "HS";
|
|
|
|
case ShaderType::TesselationEval:
|
|
|
|
return "DS";
|
|
|
|
case ShaderType::Geometry:
|
2019-09-25 04:34:18 +02:00
|
|
|
return "GS";
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::Fragment:
|
2019-09-25 04:34:18 +02:00
|
|
|
return "FS";
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::Compute:
|
2019-09-25 04:34:18 +02:00
|
|
|
return "CS";
|
|
|
|
}
|
|
|
|
return "UNK";
|
|
|
|
}
|
|
|
|
|
2019-11-18 22:35:21 +01:00
|
|
|
constexpr ShaderType GetShaderType(Maxwell::ShaderProgram program_type) {
|
2019-09-25 04:34:18 +02:00
|
|
|
switch (program_type) {
|
2019-11-18 22:35:21 +01:00
|
|
|
case Maxwell::ShaderProgram::VertexA:
|
|
|
|
case Maxwell::ShaderProgram::VertexB:
|
|
|
|
return ShaderType::Vertex;
|
|
|
|
case Maxwell::ShaderProgram::TesselationControl:
|
|
|
|
return ShaderType::TesselationControl;
|
|
|
|
case Maxwell::ShaderProgram::TesselationEval:
|
|
|
|
return ShaderType::TesselationEval;
|
|
|
|
case Maxwell::ShaderProgram::Geometry:
|
|
|
|
return ShaderType::Geometry;
|
|
|
|
case Maxwell::ShaderProgram::Fragment:
|
|
|
|
return ShaderType::Fragment;
|
|
|
|
}
|
2019-09-25 04:34:18 +02:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
std::string MakeShaderID(u64 unique_identifier, ShaderType shader_type) {
|
2019-11-18 22:35:21 +01:00
|
|
|
return fmt::format("{}{:016X}", GetShaderTypeName(shader_type), unique_identifier);
|
2019-09-25 04:34:18 +02:00
|
|
|
}
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
std::shared_ptr<ConstBufferLocker> MakeLocker(const ShaderDiskCacheEntry& entry) {
|
|
|
|
const VideoCore::GuestDriverProfile guest_profile{entry.texture_handler_size};
|
|
|
|
auto locker = std::make_shared<ConstBufferLocker>(entry.type, guest_profile);
|
|
|
|
locker->SetBoundBuffer(entry.bound_buffer);
|
|
|
|
for (const auto& [address, value] : entry.keys) {
|
|
|
|
const auto [buffer, offset] = address;
|
|
|
|
locker->InsertKey(buffer, offset, value);
|
2019-09-26 05:23:08 +02:00
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
for (const auto& [offset, sampler] : entry.bound_samplers) {
|
|
|
|
locker->InsertBoundSampler(offset, sampler);
|
2019-09-26 05:23:08 +02:00
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
for (const auto& [key, sampler] : entry.bindless_samplers) {
|
2019-09-26 05:23:08 +02:00
|
|
|
const auto [buffer, offset] = key;
|
2020-02-26 20:13:47 +01:00
|
|
|
locker->InsertBindlessSampler(buffer, offset, sampler);
|
2019-09-26 05:23:08 +02:00
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
return locker;
|
2019-09-26 05:23:08 +02:00
|
|
|
}
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
std::shared_ptr<OGLProgram> BuildShader(const Device& device, ShaderType shader_type,
|
|
|
|
u64 unique_identifier, const ShaderIR& ir,
|
|
|
|
bool hint_retrievable = false) {
|
|
|
|
LOG_INFO(Render_OpenGL, "{}", MakeShaderID(unique_identifier, shader_type));
|
|
|
|
const std::string glsl = DecompileShader(device, ir, shader_type);
|
2019-01-14 04:58:15 +01:00
|
|
|
OGLShader shader;
|
2020-02-26 20:13:47 +01:00
|
|
|
shader.Create(glsl.c_str(), GetGLShaderType(shader_type));
|
2019-01-14 04:58:15 +01:00
|
|
|
|
|
|
|
auto program = std::make_shared<OGLProgram>();
|
|
|
|
program->Create(true, hint_retrievable, shader.handle);
|
|
|
|
return program;
|
|
|
|
}
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
std::unordered_set<GLenum> GetSupportedFormats() {
|
2020-02-26 20:13:47 +01:00
|
|
|
GLint num_formats;
|
2019-01-14 04:58:15 +01:00
|
|
|
glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &num_formats);
|
|
|
|
|
|
|
|
std::vector<GLint> formats(num_formats);
|
|
|
|
glGetIntegerv(GL_PROGRAM_BINARY_FORMATS, formats.data());
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
std::unordered_set<GLenum> supported_formats;
|
|
|
|
for (const GLint format : formats) {
|
2019-01-14 04:58:15 +01:00
|
|
|
supported_formats.insert(static_cast<GLenum>(format));
|
2019-09-25 04:34:18 +02:00
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
return supported_formats;
|
|
|
|
}
|
|
|
|
|
2019-04-10 23:03:52 +02:00
|
|
|
} // Anonymous namespace
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
CachedShader::CachedShader(const u8* host_ptr, VAddr cpu_addr, std::size_t size_in_bytes,
|
|
|
|
std::shared_ptr<VideoCommon::Shader::ConstBufferLocker> locker,
|
|
|
|
ShaderEntries entries, std::shared_ptr<OGLProgram> program)
|
|
|
|
: RasterizerCacheObject{host_ptr}, locker{std::move(locker)}, entries{std::move(entries)},
|
|
|
|
cpu_addr{cpu_addr}, size_in_bytes{size_in_bytes}, program{std::move(program)} {}
|
|
|
|
|
|
|
|
CachedShader::~CachedShader() = default;
|
|
|
|
|
|
|
|
GLuint CachedShader::GetHandle() const {
|
|
|
|
if (!locker->IsConsistent()) {
|
|
|
|
std::abort();
|
2019-09-25 04:34:18 +02:00
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
return program->handle;
|
2019-09-25 04:34:18 +02:00
|
|
|
}
|
2019-05-31 02:56:37 +02:00
|
|
|
|
|
|
|
Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params,
|
2019-11-18 22:35:21 +01:00
|
|
|
Maxwell::ShaderProgram program_type, ProgramCode code,
|
|
|
|
ProgramCode code_b) {
|
|
|
|
const auto shader_type = GetShaderType(program_type);
|
2020-02-26 20:13:47 +01:00
|
|
|
const std::size_t size_in_bytes = code.size() * sizeof(u64);
|
2019-11-18 22:35:21 +01:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
auto locker = std::make_shared<ConstBufferLocker>(shader_type, params.system.GPU().Maxwell3D());
|
|
|
|
const ShaderIR ir(code, STAGE_MAIN_OFFSET, COMPILER_SETTINGS, *locker);
|
2019-09-25 04:34:18 +02:00
|
|
|
// TODO(Rodrigo): Handle VertexA shaders
|
|
|
|
// std::optional<ShaderIR> ir_b;
|
2019-11-18 22:35:21 +01:00
|
|
|
// if (!code_b.empty()) {
|
|
|
|
// ir_b.emplace(code_b, STAGE_MAIN_OFFSET);
|
2019-09-25 04:34:18 +02:00
|
|
|
// }
|
2020-02-26 20:13:47 +01:00
|
|
|
auto program = BuildShader(params.device, shader_type, params.unique_identifier, ir);
|
|
|
|
|
|
|
|
ShaderDiskCacheEntry entry;
|
|
|
|
entry.type = shader_type;
|
|
|
|
entry.code = std::move(code);
|
|
|
|
entry.code_b = std::move(code_b);
|
|
|
|
entry.unique_identifier = params.unique_identifier;
|
|
|
|
entry.bound_buffer = locker->GetBoundBuffer();
|
|
|
|
entry.keys = locker->GetKeys();
|
|
|
|
entry.bound_samplers = locker->GetBoundSamplers();
|
|
|
|
entry.bindless_samplers = locker->GetBindlessSamplers();
|
|
|
|
params.disk_cache.SaveEntry(std::move(entry));
|
|
|
|
|
|
|
|
return std::shared_ptr<CachedShader>(new CachedShader(params.host_ptr, params.cpu_addr,
|
|
|
|
size_in_bytes, std::move(locker),
|
|
|
|
MakeEntries(ir), std::move(program)));
|
2019-07-15 03:25:13 +02:00
|
|
|
}
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code) {
|
2020-02-26 20:13:47 +01:00
|
|
|
const std::size_t size_in_bytes = code.size() * sizeof(u64);
|
|
|
|
|
|
|
|
auto locker = std::make_shared<ConstBufferLocker>(Tegra::Engines::ShaderType::Compute,
|
|
|
|
params.system.GPU().KeplerCompute());
|
|
|
|
const ShaderIR ir(code, KERNEL_MAIN_OFFSET, COMPILER_SETTINGS, *locker);
|
|
|
|
auto program = BuildShader(params.device, ShaderType::Compute, params.unique_identifier, ir);
|
|
|
|
|
|
|
|
ShaderDiskCacheEntry entry;
|
|
|
|
entry.type = ShaderType::Compute;
|
|
|
|
entry.code = std::move(code);
|
|
|
|
entry.unique_identifier = params.unique_identifier;
|
|
|
|
entry.bound_buffer = locker->GetBoundBuffer();
|
|
|
|
entry.keys = locker->GetKeys();
|
|
|
|
entry.bound_samplers = locker->GetBoundSamplers();
|
|
|
|
entry.bindless_samplers = locker->GetBindlessSamplers();
|
|
|
|
params.disk_cache.SaveEntry(std::move(entry));
|
|
|
|
|
|
|
|
return std::shared_ptr<CachedShader>(new CachedShader(params.host_ptr, params.cpu_addr,
|
|
|
|
size_in_bytes, std::move(locker),
|
|
|
|
MakeEntries(ir), std::move(program)));
|
2019-07-15 03:25:13 +02:00
|
|
|
}
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
Shader CachedShader::CreateFromCache(const ShaderParameters& params,
|
2020-02-26 20:13:47 +01:00
|
|
|
const PrecompiledShader& precompiled_shader,
|
|
|
|
std::size_t size_in_bytes) {
|
|
|
|
return std::shared_ptr<CachedShader>(
|
|
|
|
new CachedShader(params.host_ptr, params.cpu_addr, size_in_bytes, precompiled_shader.locker,
|
|
|
|
precompiled_shader.entries, precompiled_shader.program));
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
|
2019-04-10 23:03:52 +02:00
|
|
|
ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system,
|
2019-04-06 22:59:56 +02:00
|
|
|
Core::Frontend::EmuWindow& emu_window, const Device& device)
|
2019-05-30 19:01:40 +02:00
|
|
|
: RasterizerCache{rasterizer}, system{system}, emu_window{emu_window}, device{device},
|
|
|
|
disk_cache{system} {}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-01-21 20:38:23 +01:00
|
|
|
void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading,
|
|
|
|
const VideoCore::DiskResourceLoadCallback& callback) {
|
2020-02-26 20:13:47 +01:00
|
|
|
const std::optional transferable = disk_cache.LoadTransferable();
|
2019-01-15 06:17:38 +01:00
|
|
|
if (!transferable) {
|
2019-01-14 04:58:15 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
const std::vector gl_cache = disk_cache.LoadPrecompiled();
|
2019-09-25 04:34:18 +02:00
|
|
|
const auto supported_formats = GetSupportedFormats();
|
|
|
|
|
|
|
|
// Track if precompiled cache was altered during loading to know if we have to
|
|
|
|
// serialize the virtual precompiled cache file back to the hard drive
|
2019-02-25 11:47:23 +01:00
|
|
|
bool precompiled_cache_altered = false;
|
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
// Inform the frontend about shader build initialization
|
|
|
|
if (callback) {
|
2020-02-26 20:13:47 +01:00
|
|
|
callback(VideoCore::LoadCallbackStage::Build, 0, transferable->size());
|
2019-04-06 22:59:56 +02:00
|
|
|
}
|
2019-01-21 20:38:23 +01:00
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
std::mutex mutex;
|
|
|
|
std::size_t built_shaders = 0; // It doesn't have be atomic since it's used behind a mutex
|
2020-02-26 20:13:47 +01:00
|
|
|
std::atomic_bool gl_cache_failed = false;
|
|
|
|
|
|
|
|
const auto find_precompiled = [&gl_cache](u64 id) {
|
|
|
|
return std::find_if(gl_cache.begin(), gl_cache.end(),
|
|
|
|
[id](const auto& entry) { return entry.unique_identifier == id; });
|
|
|
|
};
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
const auto worker = [&](Core::Frontend::GraphicsContext* context, std::size_t begin,
|
|
|
|
std::size_t end) {
|
2019-04-06 22:59:56 +02:00
|
|
|
context->MakeCurrent();
|
|
|
|
SCOPE_EXIT({ return context->DoneCurrent(); });
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
for (std::size_t i = begin; i < end; ++i) {
|
2020-02-26 20:13:47 +01:00
|
|
|
if (stop_loading) {
|
2019-04-06 22:59:56 +02:00
|
|
|
return;
|
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
const auto& entry = (*transferable)[i];
|
|
|
|
const u64 unique_identifier = entry.unique_identifier;
|
|
|
|
const auto it = find_precompiled(unique_identifier);
|
|
|
|
const auto precompiled_entry = it != gl_cache.end() ? &*it : nullptr;
|
|
|
|
|
|
|
|
const bool is_compute = entry.type == ShaderType::Compute;
|
|
|
|
const u32 main_offset = is_compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET;
|
|
|
|
auto locker = MakeLocker(entry);
|
|
|
|
const ShaderIR ir(entry.code, main_offset, COMPILER_SETTINGS, *locker);
|
|
|
|
|
|
|
|
std::shared_ptr<OGLProgram> program;
|
|
|
|
if (precompiled_entry) {
|
|
|
|
// If the shader is precompiled, attempt to load it with
|
|
|
|
program = GeneratePrecompiledProgram(entry, *precompiled_entry, supported_formats);
|
|
|
|
if (!program) {
|
|
|
|
gl_cache_failed = true;
|
2019-04-06 22:59:56 +02:00
|
|
|
}
|
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
if (!program) {
|
|
|
|
// Otherwise compile it from GLSL
|
|
|
|
program = BuildShader(device, entry.type, unique_identifier, ir, true);
|
2019-01-15 05:07:57 +01:00
|
|
|
}
|
2019-04-06 22:59:56 +02:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
PrecompiledShader shader;
|
|
|
|
shader.program = std::move(program);
|
|
|
|
shader.locker = std::move(locker);
|
|
|
|
shader.entries = MakeEntries(ir);
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
std::scoped_lock lock{mutex};
|
2019-04-06 22:59:56 +02:00
|
|
|
if (callback) {
|
|
|
|
callback(VideoCore::LoadCallbackStage::Build, ++built_shaders,
|
2020-02-26 20:13:47 +01:00
|
|
|
transferable->size());
|
2019-04-06 22:59:56 +02:00
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
runtime_cache.emplace(entry.unique_identifier, std::move(shader));
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
2019-04-06 22:59:56 +02:00
|
|
|
};
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
const auto num_workers{static_cast<std::size_t>(std::thread::hardware_concurrency() + 1ULL)};
|
2020-02-26 20:13:47 +01:00
|
|
|
const std::size_t bucket_size{transferable->size() / num_workers};
|
2019-04-06 22:59:56 +02:00
|
|
|
std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> contexts(num_workers);
|
|
|
|
std::vector<std::thread> threads(num_workers);
|
|
|
|
for (std::size_t i = 0; i < num_workers; ++i) {
|
|
|
|
const bool is_last_worker = i + 1 == num_workers;
|
|
|
|
const std::size_t start{bucket_size * i};
|
2020-02-26 20:13:47 +01:00
|
|
|
const std::size_t end{is_last_worker ? transferable->size() : start + bucket_size};
|
2019-04-06 22:59:56 +02:00
|
|
|
|
|
|
|
// On some platforms the shared context has to be created from the GUI thread
|
|
|
|
contexts[i] = emu_window.CreateSharedContext();
|
2020-02-26 20:13:47 +01:00
|
|
|
threads[i] = std::thread(worker, contexts[i].get(), start, end);
|
2019-04-06 22:59:56 +02:00
|
|
|
}
|
|
|
|
for (auto& thread : threads) {
|
|
|
|
thread.join();
|
|
|
|
}
|
2019-01-21 20:38:23 +01:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
if (gl_cache_failed) {
|
2019-04-06 22:59:56 +02:00
|
|
|
// Invalidate the precompiled cache if a shader dumped shader was rejected
|
|
|
|
disk_cache.InvalidatePrecompiled();
|
|
|
|
precompiled_cache_altered = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (stop_loading) {
|
|
|
|
return;
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
// TODO(Rodrigo): Do state tracking for transferable shaders and do a dummy draw
|
|
|
|
// before precompiling them
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
for (std::size_t i = 0; i < transferable->size(); ++i) {
|
|
|
|
const u64 id = (*transferable)[i].unique_identifier;
|
|
|
|
const auto it = find_precompiled(id);
|
|
|
|
if (it == gl_cache.end()) {
|
|
|
|
const GLuint program = runtime_cache.at(id).program->handle;
|
|
|
|
disk_cache.SavePrecompiled(id, program);
|
2019-02-25 11:47:23 +01:00
|
|
|
precompiled_cache_altered = true;
|
2018-12-09 22:33:10 +01:00
|
|
|
}
|
|
|
|
}
|
2019-02-25 11:47:23 +01:00
|
|
|
|
|
|
|
if (precompiled_cache_altered) {
|
|
|
|
disk_cache.SaveVirtualPrecompiledFile();
|
|
|
|
}
|
2018-12-09 22:33:10 +01:00
|
|
|
}
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
std::shared_ptr<OGLProgram> ShaderCacheOpenGL::GeneratePrecompiledProgram(
|
|
|
|
const ShaderDiskCacheEntry& entry, const ShaderDiskCachePrecompiled& precompiled_entry,
|
|
|
|
const std::unordered_set<GLenum>& supported_formats) {
|
|
|
|
if (supported_formats.find(precompiled_entry.binary_format) == supported_formats.end()) {
|
|
|
|
LOG_INFO(Render_OpenGL, "Precompiled cache entry with unsupported format, removing");
|
2019-01-14 04:58:15 +01:00
|
|
|
return {};
|
2018-12-09 22:33:10 +01:00
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
auto program = std::make_shared<OGLProgram>();
|
|
|
|
program->handle = glCreateProgram();
|
|
|
|
glProgramParameteri(program->handle, GL_PROGRAM_SEPARABLE, GL_TRUE);
|
|
|
|
glProgramBinary(program->handle, precompiled_entry.binary_format,
|
|
|
|
precompiled_entry.binary.data(),
|
|
|
|
static_cast<GLsizei>(precompiled_entry.binary.size()));
|
|
|
|
|
|
|
|
GLint link_status;
|
|
|
|
glGetProgramiv(program->handle, GL_LINK_STATUS, &link_status);
|
2019-01-14 04:58:15 +01:00
|
|
|
if (link_status == GL_FALSE) {
|
2020-02-26 20:13:47 +01:00
|
|
|
LOG_INFO(Render_OpenGL, "Precompiled cache rejected by the driver, removing");
|
2019-01-15 05:07:57 +01:00
|
|
|
return {};
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
return program;
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
2019-01-14 02:05:53 +01:00
|
|
|
|
2018-08-23 23:30:27 +02:00
|
|
|
Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
|
2019-12-29 06:03:05 +01:00
|
|
|
if (!system.GPU().Maxwell3D().dirty.flags[Dirty::Shaders]) {
|
|
|
|
return last_shaders[static_cast<std::size_t>(program)];
|
|
|
|
}
|
|
|
|
|
2019-05-30 19:01:40 +02:00
|
|
|
auto& memory_manager{system.GPU().MemoryManager()};
|
2019-09-25 04:34:18 +02:00
|
|
|
const GPUVAddr address{GetShaderAddress(system, program)};
|
2018-08-23 23:30:27 +02:00
|
|
|
|
|
|
|
// Look up shader in the cache based on address
|
2019-09-25 04:34:18 +02:00
|
|
|
const auto host_ptr{memory_manager.GetPointer(address)};
|
2019-02-19 02:58:32 +01:00
|
|
|
Shader shader{TryGet(host_ptr)};
|
2019-05-30 18:55:11 +02:00
|
|
|
if (shader) {
|
|
|
|
return last_shaders[static_cast<std::size_t>(program)] = shader;
|
|
|
|
}
|
2018-08-23 23:30:27 +02:00
|
|
|
|
2019-05-30 18:55:11 +02:00
|
|
|
// No shader found - create a new one
|
2019-09-25 04:34:18 +02:00
|
|
|
ProgramCode code{GetShaderCode(memory_manager, address, host_ptr)};
|
|
|
|
ProgramCode code_b;
|
|
|
|
if (program == Maxwell::ShaderProgram::VertexA) {
|
|
|
|
const GPUVAddr address_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)};
|
|
|
|
code_b = GetShaderCode(memory_manager, address_b, memory_manager.GetPointer(address_b));
|
|
|
|
}
|
|
|
|
|
2019-11-18 22:35:21 +01:00
|
|
|
const auto unique_identifier = GetUniqueIdentifier(
|
|
|
|
GetShaderType(program), program == Maxwell::ShaderProgram::VertexA, code, code_b);
|
2019-09-25 04:34:18 +02:00
|
|
|
const auto cpu_addr{*memory_manager.GpuToCpuAddress(address)};
|
2020-02-26 20:13:47 +01:00
|
|
|
const ShaderParameters params{system, disk_cache, device,
|
2019-09-25 04:34:18 +02:00
|
|
|
cpu_addr, host_ptr, unique_identifier};
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
const auto found = runtime_cache.find(unique_identifier);
|
|
|
|
if (found == runtime_cache.end()) {
|
2019-09-25 04:34:18 +02:00
|
|
|
shader = CachedShader::CreateStageFromMemory(params, program, std::move(code),
|
|
|
|
std::move(code_b));
|
2019-05-30 18:55:11 +02:00
|
|
|
} else {
|
2020-02-26 20:13:47 +01:00
|
|
|
const std::size_t size_in_bytes = code.size() * sizeof(u64);
|
|
|
|
shader = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
2019-05-30 18:55:11 +02:00
|
|
|
Register(shader);
|
2018-08-23 23:30:27 +02:00
|
|
|
|
2019-05-30 18:55:11 +02:00
|
|
|
return last_shaders[static_cast<std::size_t>(program)] = shader;
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
|
2019-07-15 03:25:13 +02:00
|
|
|
Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
|
|
|
|
auto& memory_manager{system.GPU().MemoryManager()};
|
|
|
|
const auto host_ptr{memory_manager.GetPointer(code_addr)};
|
|
|
|
auto kernel = TryGet(host_ptr);
|
|
|
|
if (kernel) {
|
|
|
|
return kernel;
|
|
|
|
}
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
// No kernel found, create a new one
|
2019-07-15 21:29:25 +02:00
|
|
|
auto code{GetShaderCode(memory_manager, code_addr, host_ptr)};
|
2020-02-26 20:13:47 +01:00
|
|
|
const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)};
|
2019-07-15 03:25:13 +02:00
|
|
|
const auto cpu_addr{*memory_manager.GpuToCpuAddress(code_addr)};
|
2020-02-26 20:13:47 +01:00
|
|
|
const ShaderParameters params{system, disk_cache, device,
|
2019-09-25 04:34:18 +02:00
|
|
|
cpu_addr, host_ptr, unique_identifier};
|
2019-07-15 03:25:13 +02:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
const auto found = runtime_cache.find(unique_identifier);
|
|
|
|
if (found == runtime_cache.end()) {
|
2019-07-15 03:25:13 +02:00
|
|
|
kernel = CachedShader::CreateKernelFromMemory(params, std::move(code));
|
|
|
|
} else {
|
2020-02-26 20:13:47 +01:00
|
|
|
const std::size_t size_in_bytes = code.size() * sizeof(u64);
|
|
|
|
kernel = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
|
2019-07-15 03:25:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Register(kernel);
|
|
|
|
return kernel;
|
|
|
|
}
|
|
|
|
|
2019-04-23 23:19:28 +02:00
|
|
|
} // namespace OpenGL
|