2018-08-23 23:30:27 +02:00
|
|
|
// Copyright 2018 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
#include <atomic>
|
|
|
|
#include <functional>
|
2019-04-06 22:59:56 +02:00
|
|
|
#include <mutex>
|
2019-09-25 04:34:18 +02:00
|
|
|
#include <optional>
|
|
|
|
#include <string>
|
2019-04-06 22:59:56 +02:00
|
|
|
#include <thread>
|
2019-09-25 04:34:18 +02:00
|
|
|
#include <unordered_set>
|
2020-02-26 20:13:47 +01:00
|
|
|
|
2019-11-13 03:39:45 +01:00
|
|
|
#include "common/alignment.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "common/assert.h"
|
2019-11-13 03:39:45 +01:00
|
|
|
#include "common/logging/log.h"
|
2019-04-06 22:59:56 +02:00
|
|
|
#include "common/scope_exit.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "core/core.h"
|
2019-04-06 22:59:56 +02:00
|
|
|
#include "core/frontend/emu_window.h"
|
2019-09-23 21:40:58 +02:00
|
|
|
#include "video_core/engines/kepler_compute.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "video_core/engines/maxwell_3d.h"
|
2019-11-18 22:35:21 +01:00
|
|
|
#include "video_core/engines/shader_type.h"
|
2019-04-06 05:59:54 +02:00
|
|
|
#include "video_core/memory_manager.h"
|
2020-06-03 23:07:35 +02:00
|
|
|
#include "video_core/renderer_opengl/gl_arb_decompiler.h"
|
2018-11-08 12:08:00 +01:00
|
|
|
#include "video_core/renderer_opengl/gl_rasterizer.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
#include "video_core/renderer_opengl/gl_shader_cache.h"
|
2018-12-26 05:57:14 +01:00
|
|
|
#include "video_core/renderer_opengl/gl_shader_decompiler.h"
|
2019-01-14 04:58:15 +01:00
|
|
|
#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
|
2019-12-29 06:03:05 +01:00
|
|
|
#include "video_core/renderer_opengl/gl_state_tracker.h"
|
2018-10-29 01:54:08 +01:00
|
|
|
#include "video_core/renderer_opengl/utils.h"
|
2020-04-24 06:44:14 +02:00
|
|
|
#include "video_core/shader/memory_util.h"
|
2020-02-29 00:53:10 +01:00
|
|
|
#include "video_core/shader/registry.h"
|
2018-12-21 02:29:15 +01:00
|
|
|
#include "video_core/shader/shader_ir.h"
|
2020-05-23 01:55:38 +02:00
|
|
|
#include "video_core/shader_cache.h"
|
2020-07-10 05:36:38 +02:00
|
|
|
#include "video_core/shader_notify.h"
|
2018-08-23 23:30:27 +02:00
|
|
|
|
|
|
|
namespace OpenGL {
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
using Tegra::Engines::ShaderType;
|
2020-04-24 06:44:14 +02:00
|
|
|
using VideoCommon::Shader::GetShaderAddress;
|
|
|
|
using VideoCommon::Shader::GetShaderCode;
|
|
|
|
using VideoCommon::Shader::GetUniqueIdentifier;
|
|
|
|
using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
|
2018-12-21 02:29:15 +01:00
|
|
|
using VideoCommon::Shader::ProgramCode;
|
2020-02-29 00:53:10 +01:00
|
|
|
using VideoCommon::Shader::Registry;
|
2019-09-25 04:34:18 +02:00
|
|
|
using VideoCommon::Shader::ShaderIR;
|
2020-04-24 06:44:14 +02:00
|
|
|
using VideoCommon::Shader::STAGE_MAIN_OFFSET;
|
2019-09-25 04:34:18 +02:00
|
|
|
|
|
|
|
namespace {
|
2018-12-21 02:29:15 +01:00
|
|
|
|
2020-04-17 22:41:48 +02:00
|
|
|
constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{};
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-01-05 05:00:06 +01:00
|
|
|
/// Gets the shader type from a Maxwell program type
|
2019-11-18 22:35:21 +01:00
|
|
|
constexpr GLenum GetGLShaderType(ShaderType shader_type) {
|
|
|
|
switch (shader_type) {
|
|
|
|
case ShaderType::Vertex:
|
2019-01-05 05:00:06 +01:00
|
|
|
return GL_VERTEX_SHADER;
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::Geometry:
|
2019-01-05 05:00:06 +01:00
|
|
|
return GL_GEOMETRY_SHADER;
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::Fragment:
|
2019-01-05 05:00:06 +01:00
|
|
|
return GL_FRAGMENT_SHADER;
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::Compute:
|
2019-07-15 03:25:13 +02:00
|
|
|
return GL_COMPUTE_SHADER;
|
2019-01-05 05:00:06 +01:00
|
|
|
default:
|
|
|
|
return GL_NONE;
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-18 22:35:21 +01:00
|
|
|
constexpr const char* GetShaderTypeName(ShaderType shader_type) {
|
|
|
|
switch (shader_type) {
|
|
|
|
case ShaderType::Vertex:
|
2019-09-25 04:34:18 +02:00
|
|
|
return "VS";
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::TesselationControl:
|
|
|
|
return "HS";
|
|
|
|
case ShaderType::TesselationEval:
|
|
|
|
return "DS";
|
|
|
|
case ShaderType::Geometry:
|
2019-09-25 04:34:18 +02:00
|
|
|
return "GS";
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::Fragment:
|
2019-09-25 04:34:18 +02:00
|
|
|
return "FS";
|
2019-11-18 22:35:21 +01:00
|
|
|
case ShaderType::Compute:
|
2019-09-25 04:34:18 +02:00
|
|
|
return "CS";
|
|
|
|
}
|
|
|
|
return "UNK";
|
|
|
|
}
|
|
|
|
|
2019-11-18 22:35:21 +01:00
|
|
|
constexpr ShaderType GetShaderType(Maxwell::ShaderProgram program_type) {
|
2019-09-25 04:34:18 +02:00
|
|
|
switch (program_type) {
|
2019-11-18 22:35:21 +01:00
|
|
|
case Maxwell::ShaderProgram::VertexA:
|
|
|
|
case Maxwell::ShaderProgram::VertexB:
|
|
|
|
return ShaderType::Vertex;
|
|
|
|
case Maxwell::ShaderProgram::TesselationControl:
|
|
|
|
return ShaderType::TesselationControl;
|
|
|
|
case Maxwell::ShaderProgram::TesselationEval:
|
|
|
|
return ShaderType::TesselationEval;
|
|
|
|
case Maxwell::ShaderProgram::Geometry:
|
|
|
|
return ShaderType::Geometry;
|
|
|
|
case Maxwell::ShaderProgram::Fragment:
|
|
|
|
return ShaderType::Fragment;
|
|
|
|
}
|
2019-09-25 04:34:18 +02:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-05-18 03:32:49 +02:00
|
|
|
constexpr GLenum AssemblyEnum(ShaderType shader_type) {
|
|
|
|
switch (shader_type) {
|
|
|
|
case ShaderType::Vertex:
|
|
|
|
return GL_VERTEX_PROGRAM_NV;
|
|
|
|
case ShaderType::TesselationControl:
|
|
|
|
return GL_TESS_CONTROL_PROGRAM_NV;
|
|
|
|
case ShaderType::TesselationEval:
|
|
|
|
return GL_TESS_EVALUATION_PROGRAM_NV;
|
|
|
|
case ShaderType::Geometry:
|
|
|
|
return GL_GEOMETRY_PROGRAM_NV;
|
|
|
|
case ShaderType::Fragment:
|
|
|
|
return GL_FRAGMENT_PROGRAM_NV;
|
|
|
|
case ShaderType::Compute:
|
|
|
|
return GL_COMPUTE_PROGRAM_NV;
|
|
|
|
}
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
std::string MakeShaderID(u64 unique_identifier, ShaderType shader_type) {
|
2019-11-18 22:35:21 +01:00
|
|
|
return fmt::format("{}{:016X}", GetShaderTypeName(shader_type), unique_identifier);
|
2019-09-25 04:34:18 +02:00
|
|
|
}
|
|
|
|
|
2020-02-29 00:53:10 +01:00
|
|
|
std::shared_ptr<Registry> MakeRegistry(const ShaderDiskCacheEntry& entry) {
|
2020-02-26 20:13:47 +01:00
|
|
|
const VideoCore::GuestDriverProfile guest_profile{entry.texture_handler_size};
|
2020-02-29 07:49:51 +01:00
|
|
|
const VideoCommon::Shader::SerializedRegistryInfo info{guest_profile, entry.bound_buffer,
|
|
|
|
entry.graphics_info, entry.compute_info};
|
2020-07-21 06:29:23 +02:00
|
|
|
auto registry = std::make_shared<Registry>(entry.type, info);
|
2020-02-26 20:13:47 +01:00
|
|
|
for (const auto& [address, value] : entry.keys) {
|
|
|
|
const auto [buffer, offset] = address;
|
2020-02-29 00:53:10 +01:00
|
|
|
registry->InsertKey(buffer, offset, value);
|
2019-09-26 05:23:08 +02:00
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
for (const auto& [offset, sampler] : entry.bound_samplers) {
|
2020-02-29 00:53:10 +01:00
|
|
|
registry->InsertBoundSampler(offset, sampler);
|
2019-09-26 05:23:08 +02:00
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
for (const auto& [key, sampler] : entry.bindless_samplers) {
|
2019-09-26 05:23:08 +02:00
|
|
|
const auto [buffer, offset] = key;
|
2020-02-29 00:53:10 +01:00
|
|
|
registry->InsertBindlessSampler(buffer, offset, sampler);
|
2019-09-26 05:23:08 +02:00
|
|
|
}
|
2020-02-29 00:53:10 +01:00
|
|
|
return registry;
|
2019-09-26 05:23:08 +02:00
|
|
|
}
|
|
|
|
|
2020-07-10 05:36:38 +02:00
|
|
|
std::unordered_set<GLenum> GetSupportedFormats() {
|
|
|
|
GLint num_formats;
|
|
|
|
glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &num_formats);
|
|
|
|
|
|
|
|
std::vector<GLint> formats(num_formats);
|
|
|
|
glGetIntegerv(GL_PROGRAM_BINARY_FORMATS, formats.data());
|
|
|
|
|
|
|
|
std::unordered_set<GLenum> supported_formats;
|
|
|
|
for (const GLint format : formats) {
|
|
|
|
supported_formats.insert(static_cast<GLenum>(format));
|
|
|
|
}
|
|
|
|
return supported_formats;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // Anonymous namespace
|
|
|
|
|
2020-05-18 03:32:49 +02:00
|
|
|
ProgramSharedPtr BuildShader(const Device& device, ShaderType shader_type, u64 unique_identifier,
|
2020-07-10 05:36:38 +02:00
|
|
|
const ShaderIR& ir, const Registry& registry, bool hint_retrievable) {
|
2020-02-29 20:30:20 +01:00
|
|
|
const std::string shader_id = MakeShaderID(unique_identifier, shader_type);
|
|
|
|
LOG_INFO(Render_OpenGL, "{}", shader_id);
|
|
|
|
|
2020-05-18 03:32:49 +02:00
|
|
|
auto program = std::make_shared<ProgramHandle>();
|
|
|
|
|
|
|
|
if (device.UseAssemblyShaders()) {
|
2020-06-03 23:07:35 +02:00
|
|
|
const std::string arb =
|
|
|
|
DecompileAssemblyShader(device, ir, registry, shader_type, shader_id);
|
2020-05-18 03:32:49 +02:00
|
|
|
|
|
|
|
GLuint& arb_prog = program->assembly_program.handle;
|
|
|
|
|
|
|
|
// Commented out functions signal OpenGL errors but are compatible with apitrace.
|
|
|
|
// Use them only to capture and replay on apitrace.
|
|
|
|
#if 0
|
|
|
|
glGenProgramsNV(1, &arb_prog);
|
|
|
|
glLoadProgramNV(AssemblyEnum(shader_type), arb_prog, static_cast<GLsizei>(arb.size()),
|
|
|
|
reinterpret_cast<const GLubyte*>(arb.data()));
|
|
|
|
#else
|
|
|
|
glGenProgramsARB(1, &arb_prog);
|
|
|
|
glNamedProgramStringEXT(arb_prog, AssemblyEnum(shader_type), GL_PROGRAM_FORMAT_ASCII_ARB,
|
|
|
|
static_cast<GLsizei>(arb.size()), arb.data());
|
|
|
|
#endif
|
|
|
|
const auto err = reinterpret_cast<const char*>(glGetString(GL_PROGRAM_ERROR_STRING_NV));
|
|
|
|
if (err && *err) {
|
|
|
|
LOG_CRITICAL(Render_OpenGL, "{}", err);
|
|
|
|
LOG_INFO(Render_OpenGL, "\n{}", arb);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
const std::string glsl = DecompileShader(device, ir, registry, shader_type, shader_id);
|
|
|
|
OGLShader shader;
|
|
|
|
shader.Create(glsl.c_str(), GetGLShaderType(shader_type));
|
|
|
|
|
|
|
|
program->source_program.Create(true, hint_retrievable, shader.handle);
|
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
|
|
|
return program;
|
|
|
|
}
|
|
|
|
|
2020-05-23 02:01:36 +02:00
|
|
|
Shader::Shader(std::shared_ptr<VideoCommon::Shader::Registry> registry_, ShaderEntries entries_,
|
2020-07-10 05:36:38 +02:00
|
|
|
ProgramSharedPtr program_, bool is_built)
|
|
|
|
: registry{std::move(registry_)}, entries{std::move(entries_)}, program{std::move(program_)},
|
|
|
|
is_built(is_built) {
|
2020-05-18 03:32:49 +02:00
|
|
|
handle = program->assembly_program.handle;
|
|
|
|
if (handle == 0) {
|
|
|
|
handle = program->source_program.handle;
|
|
|
|
}
|
2020-07-10 05:36:38 +02:00
|
|
|
if (is_built) {
|
|
|
|
ASSERT(handle != 0);
|
|
|
|
}
|
2020-05-18 03:32:49 +02:00
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
Shader::~Shader() = default;
|
2020-02-26 20:13:47 +01:00
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
GLuint Shader::GetHandle() const {
|
2020-02-29 08:05:19 +01:00
|
|
|
DEBUG_ASSERT(registry->IsConsistent());
|
2020-05-18 03:32:49 +02:00
|
|
|
return handle;
|
2019-09-25 04:34:18 +02:00
|
|
|
}
|
2019-05-31 02:56:37 +02:00
|
|
|
|
2020-07-10 05:36:38 +02:00
|
|
|
bool Shader::IsBuilt() const {
|
|
|
|
return is_built;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Shader::AsyncOpenGLBuilt(OGLProgram new_program) {
|
|
|
|
program->source_program = std::move(new_program);
|
|
|
|
handle = program->source_program.handle;
|
|
|
|
is_built = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Shader::AsyncGLASMBuilt(OGLAssemblyProgram new_program) {
|
|
|
|
program->assembly_program = std::move(new_program);
|
|
|
|
handle = program->assembly_program.handle;
|
|
|
|
is_built = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_ptr<Shader> Shader::CreateStageFromMemory(
|
|
|
|
const ShaderParameters& params, Maxwell::ShaderProgram program_type, ProgramCode code,
|
|
|
|
ProgramCode code_b, VideoCommon::Shader::AsyncShaders& async_shaders, VAddr cpu_addr) {
|
2019-11-18 22:35:21 +01:00
|
|
|
const auto shader_type = GetShaderType(program_type);
|
|
|
|
|
2020-07-10 05:36:38 +02:00
|
|
|
auto& gpu = params.system.GPU();
|
|
|
|
gpu.ShaderNotify().MarkSharderBuilding();
|
|
|
|
|
|
|
|
auto registry = std::make_shared<Registry>(shader_type, gpu.Maxwell3D());
|
|
|
|
if (!async_shaders.IsShaderAsync(params.system.GPU()) ||
|
|
|
|
!params.device.UseAsynchronousShaders()) {
|
|
|
|
const ShaderIR ir(code, STAGE_MAIN_OFFSET, COMPILER_SETTINGS, *registry);
|
|
|
|
// TODO(Rodrigo): Handle VertexA shaders
|
|
|
|
// std::optional<ShaderIR> ir_b;
|
|
|
|
// if (!code_b.empty()) {
|
|
|
|
// ir_b.emplace(code_b, STAGE_MAIN_OFFSET);
|
|
|
|
// }
|
|
|
|
auto program =
|
|
|
|
BuildShader(params.device, shader_type, params.unique_identifier, ir, *registry);
|
|
|
|
ShaderDiskCacheEntry entry;
|
|
|
|
entry.type = shader_type;
|
|
|
|
entry.code = std::move(code);
|
|
|
|
entry.code_b = std::move(code_b);
|
|
|
|
entry.unique_identifier = params.unique_identifier;
|
|
|
|
entry.bound_buffer = registry->GetBoundBuffer();
|
|
|
|
entry.graphics_info = registry->GetGraphicsInfo();
|
|
|
|
entry.keys = registry->GetKeys();
|
|
|
|
entry.bound_samplers = registry->GetBoundSamplers();
|
|
|
|
entry.bindless_samplers = registry->GetBindlessSamplers();
|
|
|
|
params.disk_cache.SaveEntry(std::move(entry));
|
|
|
|
|
|
|
|
gpu.ShaderNotify().MarkShaderComplete();
|
|
|
|
|
|
|
|
return std::unique_ptr<Shader>(new Shader(std::move(registry),
|
|
|
|
MakeEntries(params.device, ir, shader_type),
|
|
|
|
std::move(program), true));
|
|
|
|
} else {
|
|
|
|
// Required for entries
|
|
|
|
const ShaderIR ir(code, STAGE_MAIN_OFFSET, COMPILER_SETTINGS, *registry);
|
|
|
|
auto entries = MakeEntries(params.device, ir, shader_type);
|
2020-02-26 20:13:47 +01:00
|
|
|
|
2020-07-10 05:36:38 +02:00
|
|
|
async_shaders.QueueOpenGLShader(params.device, shader_type, params.unique_identifier,
|
|
|
|
std::move(code), std::move(code_b), STAGE_MAIN_OFFSET,
|
|
|
|
COMPILER_SETTINGS, *registry, cpu_addr);
|
2020-02-26 20:13:47 +01:00
|
|
|
|
2020-07-10 05:36:38 +02:00
|
|
|
auto program = std::make_shared<ProgramHandle>();
|
|
|
|
return std::unique_ptr<Shader>(
|
|
|
|
new Shader(std::move(registry), std::move(entries), std::move(program), false));
|
|
|
|
}
|
2019-07-15 03:25:13 +02:00
|
|
|
}
|
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
std::unique_ptr<Shader> Shader::CreateKernelFromMemory(const ShaderParameters& params,
|
|
|
|
ProgramCode code) {
|
2020-07-10 05:36:38 +02:00
|
|
|
auto& gpu = params.system.GPU();
|
|
|
|
gpu.ShaderNotify().MarkSharderBuilding();
|
|
|
|
|
|
|
|
auto& engine = gpu.KeplerCompute();
|
2020-02-29 07:49:51 +01:00
|
|
|
auto registry = std::make_shared<Registry>(ShaderType::Compute, engine);
|
2020-02-29 00:53:10 +01:00
|
|
|
const ShaderIR ir(code, KERNEL_MAIN_OFFSET, COMPILER_SETTINGS, *registry);
|
2020-02-29 07:49:51 +01:00
|
|
|
const u64 uid = params.unique_identifier;
|
|
|
|
auto program = BuildShader(params.device, ShaderType::Compute, uid, ir, *registry);
|
2020-02-26 20:13:47 +01:00
|
|
|
|
|
|
|
ShaderDiskCacheEntry entry;
|
|
|
|
entry.type = ShaderType::Compute;
|
|
|
|
entry.code = std::move(code);
|
2020-02-29 07:49:51 +01:00
|
|
|
entry.unique_identifier = uid;
|
2020-02-29 00:53:10 +01:00
|
|
|
entry.bound_buffer = registry->GetBoundBuffer();
|
2020-02-29 07:49:51 +01:00
|
|
|
entry.compute_info = registry->GetComputeInfo();
|
2020-02-29 00:53:10 +01:00
|
|
|
entry.keys = registry->GetKeys();
|
|
|
|
entry.bound_samplers = registry->GetBoundSamplers();
|
|
|
|
entry.bindless_samplers = registry->GetBindlessSamplers();
|
2020-02-26 20:13:47 +01:00
|
|
|
params.disk_cache.SaveEntry(std::move(entry));
|
|
|
|
|
2020-07-10 05:36:38 +02:00
|
|
|
gpu.ShaderNotify().MarkShaderComplete();
|
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
return std::unique_ptr<Shader>(new Shader(std::move(registry),
|
|
|
|
MakeEntries(params.device, ir, ShaderType::Compute),
|
|
|
|
std::move(program)));
|
2019-07-15 03:25:13 +02:00
|
|
|
}
|
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
std::unique_ptr<Shader> Shader::CreateFromCache(const ShaderParameters& params,
|
|
|
|
const PrecompiledShader& precompiled_shader) {
|
|
|
|
return std::unique_ptr<Shader>(new Shader(
|
|
|
|
precompiled_shader.registry, precompiled_shader.entries, precompiled_shader.program));
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
|
2019-04-10 23:03:52 +02:00
|
|
|
ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system,
|
2019-04-06 22:59:56 +02:00
|
|
|
Core::Frontend::EmuWindow& emu_window, const Device& device)
|
2020-05-23 01:55:38 +02:00
|
|
|
: VideoCommon::ShaderCache<Shader>{rasterizer}, system{system},
|
|
|
|
emu_window{emu_window}, device{device}, disk_cache{system} {}
|
|
|
|
|
|
|
|
ShaderCacheOpenGL::~ShaderCacheOpenGL() = default;
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-01-21 20:38:23 +01:00
|
|
|
void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading,
|
|
|
|
const VideoCore::DiskResourceLoadCallback& callback) {
|
2020-02-26 20:13:47 +01:00
|
|
|
const std::optional transferable = disk_cache.LoadTransferable();
|
2019-01-15 06:17:38 +01:00
|
|
|
if (!transferable) {
|
2019-01-14 04:58:15 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-18 03:32:49 +02:00
|
|
|
std::vector<ShaderDiskCachePrecompiled> gl_cache;
|
|
|
|
if (!device.UseAssemblyShaders()) {
|
|
|
|
// Only load precompiled cache when we are not using assembly shaders
|
|
|
|
gl_cache = disk_cache.LoadPrecompiled();
|
|
|
|
}
|
2020-03-25 05:57:36 +01:00
|
|
|
const auto supported_formats = GetSupportedFormats();
|
2019-09-25 04:34:18 +02:00
|
|
|
|
|
|
|
// Track if precompiled cache was altered during loading to know if we have to
|
|
|
|
// serialize the virtual precompiled cache file back to the hard drive
|
2019-02-25 11:47:23 +01:00
|
|
|
bool precompiled_cache_altered = false;
|
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
// Inform the frontend about shader build initialization
|
|
|
|
if (callback) {
|
2020-02-26 20:13:47 +01:00
|
|
|
callback(VideoCore::LoadCallbackStage::Build, 0, transferable->size());
|
2019-04-06 22:59:56 +02:00
|
|
|
}
|
2019-01-21 20:38:23 +01:00
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
std::mutex mutex;
|
|
|
|
std::size_t built_shaders = 0; // It doesn't have be atomic since it's used behind a mutex
|
2020-02-26 20:13:47 +01:00
|
|
|
std::atomic_bool gl_cache_failed = false;
|
|
|
|
|
|
|
|
const auto find_precompiled = [&gl_cache](u64 id) {
|
|
|
|
return std::find_if(gl_cache.begin(), gl_cache.end(),
|
|
|
|
[id](const auto& entry) { return entry.unique_identifier == id; });
|
|
|
|
};
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
const auto worker = [&](Core::Frontend::GraphicsContext* context, std::size_t begin,
|
|
|
|
std::size_t end) {
|
2020-03-25 03:58:49 +01:00
|
|
|
const auto scope = context->Acquire();
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2019-04-06 22:59:56 +02:00
|
|
|
for (std::size_t i = begin; i < end; ++i) {
|
2020-02-26 20:13:47 +01:00
|
|
|
if (stop_loading) {
|
2019-04-06 22:59:56 +02:00
|
|
|
return;
|
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
const auto& entry = (*transferable)[i];
|
2020-02-29 07:49:51 +01:00
|
|
|
const u64 uid = entry.unique_identifier;
|
|
|
|
const auto it = find_precompiled(uid);
|
2020-02-26 20:13:47 +01:00
|
|
|
const auto precompiled_entry = it != gl_cache.end() ? &*it : nullptr;
|
|
|
|
|
|
|
|
const bool is_compute = entry.type == ShaderType::Compute;
|
|
|
|
const u32 main_offset = is_compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET;
|
2020-02-29 00:53:10 +01:00
|
|
|
auto registry = MakeRegistry(entry);
|
|
|
|
const ShaderIR ir(entry.code, main_offset, COMPILER_SETTINGS, *registry);
|
2020-02-26 20:13:47 +01:00
|
|
|
|
2020-05-18 03:32:49 +02:00
|
|
|
ProgramSharedPtr program;
|
2020-02-26 20:13:47 +01:00
|
|
|
if (precompiled_entry) {
|
|
|
|
// If the shader is precompiled, attempt to load it with
|
|
|
|
program = GeneratePrecompiledProgram(entry, *precompiled_entry, supported_formats);
|
|
|
|
if (!program) {
|
|
|
|
gl_cache_failed = true;
|
2019-04-06 22:59:56 +02:00
|
|
|
}
|
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
if (!program) {
|
|
|
|
// Otherwise compile it from GLSL
|
2020-02-29 07:49:51 +01:00
|
|
|
program = BuildShader(device, entry.type, uid, ir, *registry, true);
|
2019-01-15 05:07:57 +01:00
|
|
|
}
|
2019-04-06 22:59:56 +02:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
PrecompiledShader shader;
|
|
|
|
shader.program = std::move(program);
|
2020-02-29 00:53:10 +01:00
|
|
|
shader.registry = std::move(registry);
|
2020-05-28 22:06:22 +02:00
|
|
|
shader.entries = MakeEntries(device, ir, entry.type);
|
2020-02-26 20:13:47 +01:00
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
std::scoped_lock lock{mutex};
|
2019-04-06 22:59:56 +02:00
|
|
|
if (callback) {
|
|
|
|
callback(VideoCore::LoadCallbackStage::Build, ++built_shaders,
|
2020-02-26 20:13:47 +01:00
|
|
|
transferable->size());
|
2019-04-06 22:59:56 +02:00
|
|
|
}
|
2020-02-26 20:13:47 +01:00
|
|
|
runtime_cache.emplace(entry.unique_identifier, std::move(shader));
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
2019-04-06 22:59:56 +02:00
|
|
|
};
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
const auto num_workers{static_cast<std::size_t>(std::thread::hardware_concurrency() + 1ULL)};
|
2020-02-26 20:13:47 +01:00
|
|
|
const std::size_t bucket_size{transferable->size() / num_workers};
|
2019-04-06 22:59:56 +02:00
|
|
|
std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> contexts(num_workers);
|
|
|
|
std::vector<std::thread> threads(num_workers);
|
|
|
|
for (std::size_t i = 0; i < num_workers; ++i) {
|
|
|
|
const bool is_last_worker = i + 1 == num_workers;
|
|
|
|
const std::size_t start{bucket_size * i};
|
2020-02-26 20:13:47 +01:00
|
|
|
const std::size_t end{is_last_worker ? transferable->size() : start + bucket_size};
|
2019-04-06 22:59:56 +02:00
|
|
|
|
|
|
|
// On some platforms the shared context has to be created from the GUI thread
|
|
|
|
contexts[i] = emu_window.CreateSharedContext();
|
2020-02-26 20:13:47 +01:00
|
|
|
threads[i] = std::thread(worker, contexts[i].get(), start, end);
|
2019-04-06 22:59:56 +02:00
|
|
|
}
|
|
|
|
for (auto& thread : threads) {
|
|
|
|
thread.join();
|
|
|
|
}
|
2019-01-21 20:38:23 +01:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
if (gl_cache_failed) {
|
2019-04-06 22:59:56 +02:00
|
|
|
// Invalidate the precompiled cache if a shader dumped shader was rejected
|
|
|
|
disk_cache.InvalidatePrecompiled();
|
|
|
|
precompiled_cache_altered = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (stop_loading) {
|
|
|
|
return;
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
|
2020-05-18 03:32:49 +02:00
|
|
|
if (device.UseAssemblyShaders()) {
|
|
|
|
// Don't store precompiled binaries for assembly shaders.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-09-25 04:34:18 +02:00
|
|
|
// TODO(Rodrigo): Do state tracking for transferable shaders and do a dummy draw
|
|
|
|
// before precompiling them
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
for (std::size_t i = 0; i < transferable->size(); ++i) {
|
|
|
|
const u64 id = (*transferable)[i].unique_identifier;
|
|
|
|
const auto it = find_precompiled(id);
|
|
|
|
if (it == gl_cache.end()) {
|
2020-05-18 03:32:49 +02:00
|
|
|
const GLuint program = runtime_cache.at(id).program->source_program.handle;
|
2020-02-26 20:13:47 +01:00
|
|
|
disk_cache.SavePrecompiled(id, program);
|
2019-02-25 11:47:23 +01:00
|
|
|
precompiled_cache_altered = true;
|
2018-12-09 22:33:10 +01:00
|
|
|
}
|
|
|
|
}
|
2019-02-25 11:47:23 +01:00
|
|
|
|
|
|
|
if (precompiled_cache_altered) {
|
|
|
|
disk_cache.SaveVirtualPrecompiledFile();
|
|
|
|
}
|
2018-12-09 22:33:10 +01:00
|
|
|
}
|
|
|
|
|
2020-05-18 03:32:49 +02:00
|
|
|
ProgramSharedPtr ShaderCacheOpenGL::GeneratePrecompiledProgram(
|
2020-02-26 20:13:47 +01:00
|
|
|
const ShaderDiskCacheEntry& entry, const ShaderDiskCachePrecompiled& precompiled_entry,
|
|
|
|
const std::unordered_set<GLenum>& supported_formats) {
|
|
|
|
if (supported_formats.find(precompiled_entry.binary_format) == supported_formats.end()) {
|
|
|
|
LOG_INFO(Render_OpenGL, "Precompiled cache entry with unsupported format, removing");
|
2019-01-14 04:58:15 +01:00
|
|
|
return {};
|
2018-12-09 22:33:10 +01:00
|
|
|
}
|
2019-01-14 04:58:15 +01:00
|
|
|
|
2020-05-18 03:32:49 +02:00
|
|
|
auto program = std::make_shared<ProgramHandle>();
|
|
|
|
GLuint& handle = program->source_program.handle;
|
|
|
|
handle = glCreateProgram();
|
|
|
|
glProgramParameteri(handle, GL_PROGRAM_SEPARABLE, GL_TRUE);
|
|
|
|
glProgramBinary(handle, precompiled_entry.binary_format, precompiled_entry.binary.data(),
|
2020-02-26 20:13:47 +01:00
|
|
|
static_cast<GLsizei>(precompiled_entry.binary.size()));
|
|
|
|
|
|
|
|
GLint link_status;
|
2020-05-18 03:32:49 +02:00
|
|
|
glGetProgramiv(handle, GL_LINK_STATUS, &link_status);
|
2019-01-14 04:58:15 +01:00
|
|
|
if (link_status == GL_FALSE) {
|
2020-02-26 20:13:47 +01:00
|
|
|
LOG_INFO(Render_OpenGL, "Precompiled cache rejected by the driver, removing");
|
2019-01-15 05:07:57 +01:00
|
|
|
return {};
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
|
|
|
|
2020-02-26 20:13:47 +01:00
|
|
|
return program;
|
2019-01-14 04:58:15 +01:00
|
|
|
}
|
2019-01-14 02:05:53 +01:00
|
|
|
|
2020-07-10 05:36:38 +02:00
|
|
|
Shader* ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program,
|
|
|
|
VideoCommon::Shader::AsyncShaders& async_shaders) {
|
2019-12-29 06:03:05 +01:00
|
|
|
if (!system.GPU().Maxwell3D().dirty.flags[Dirty::Shaders]) {
|
2020-07-10 05:36:38 +02:00
|
|
|
auto* last_shader = last_shaders[static_cast<std::size_t>(program)];
|
|
|
|
if (last_shader->IsBuilt()) {
|
|
|
|
return last_shader;
|
|
|
|
}
|
2019-12-29 06:03:05 +01:00
|
|
|
}
|
|
|
|
|
2019-05-30 19:01:40 +02:00
|
|
|
auto& memory_manager{system.GPU().MemoryManager()};
|
2019-09-25 04:34:18 +02:00
|
|
|
const GPUVAddr address{GetShaderAddress(system, program)};
|
2018-08-23 23:30:27 +02:00
|
|
|
|
2020-07-10 05:36:38 +02:00
|
|
|
if (device.UseAsynchronousShaders() && async_shaders.HasCompletedWork()) {
|
|
|
|
auto completed_work = async_shaders.GetCompletedWork();
|
|
|
|
for (auto& work : completed_work) {
|
|
|
|
Shader* shader = TryGet(work.cpu_address);
|
|
|
|
auto& gpu = system.GPU();
|
|
|
|
gpu.ShaderNotify().MarkShaderComplete();
|
|
|
|
if (shader == nullptr) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
using namespace VideoCommon::Shader;
|
|
|
|
if (work.backend == AsyncShaders::Backend::OpenGL) {
|
|
|
|
shader->AsyncOpenGLBuilt(std::move(work.program.opengl));
|
|
|
|
} else if (work.backend == AsyncShaders::Backend::GLASM) {
|
|
|
|
shader->AsyncGLASMBuilt(std::move(work.program.glasm));
|
|
|
|
}
|
|
|
|
|
|
|
|
ShaderDiskCacheEntry entry;
|
|
|
|
entry.type = work.shader_type;
|
|
|
|
entry.code = std::move(work.code);
|
|
|
|
entry.code_b = std::move(work.code_b);
|
|
|
|
entry.unique_identifier = work.uid;
|
|
|
|
|
|
|
|
auto& registry = shader->GetRegistry();
|
|
|
|
|
|
|
|
entry.bound_buffer = registry.GetBoundBuffer();
|
|
|
|
entry.graphics_info = registry.GetGraphicsInfo();
|
|
|
|
entry.keys = registry.GetKeys();
|
|
|
|
entry.bound_samplers = registry.GetBoundSamplers();
|
|
|
|
entry.bindless_samplers = registry.GetBindlessSamplers();
|
|
|
|
disk_cache.SaveEntry(std::move(entry));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-23 23:30:27 +02:00
|
|
|
// Look up shader in the cache based on address
|
2020-04-06 01:18:00 +02:00
|
|
|
const auto cpu_addr{memory_manager.GpuToCpuAddress(address)};
|
2020-05-23 01:55:38 +02:00
|
|
|
if (Shader* const shader{cpu_addr ? TryGet(*cpu_addr) : null_shader.get()}) {
|
2019-05-30 18:55:11 +02:00
|
|
|
return last_shaders[static_cast<std::size_t>(program)] = shader;
|
|
|
|
}
|
2018-08-23 23:30:27 +02:00
|
|
|
|
2020-04-06 01:18:00 +02:00
|
|
|
const auto host_ptr{memory_manager.GetPointer(address)};
|
|
|
|
|
2019-05-30 18:55:11 +02:00
|
|
|
// No shader found - create a new one
|
2020-04-24 06:44:14 +02:00
|
|
|
ProgramCode code{GetShaderCode(memory_manager, address, host_ptr, false)};
|
2019-09-25 04:34:18 +02:00
|
|
|
ProgramCode code_b;
|
|
|
|
if (program == Maxwell::ShaderProgram::VertexA) {
|
|
|
|
const GPUVAddr address_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)};
|
2020-04-24 06:44:14 +02:00
|
|
|
const u8* host_ptr_b = memory_manager.GetPointer(address_b);
|
|
|
|
code_b = GetShaderCode(memory_manager, address_b, host_ptr_b, false);
|
2019-09-25 04:34:18 +02:00
|
|
|
}
|
2020-06-24 03:51:03 +02:00
|
|
|
const std::size_t code_size = code.size() * sizeof(u64);
|
2019-09-25 04:34:18 +02:00
|
|
|
|
2020-06-24 03:51:03 +02:00
|
|
|
const u64 unique_identifier = GetUniqueIdentifier(
|
2019-11-18 22:35:21 +01:00
|
|
|
GetShaderType(program), program == Maxwell::ShaderProgram::VertexA, code, code_b);
|
2020-04-06 01:18:00 +02:00
|
|
|
|
|
|
|
const ShaderParameters params{system, disk_cache, device,
|
|
|
|
*cpu_addr, host_ptr, unique_identifier};
|
2019-09-25 04:34:18 +02:00
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
std::unique_ptr<Shader> shader;
|
2020-02-26 20:13:47 +01:00
|
|
|
const auto found = runtime_cache.find(unique_identifier);
|
|
|
|
if (found == runtime_cache.end()) {
|
2020-07-10 05:36:38 +02:00
|
|
|
shader = Shader::CreateStageFromMemory(params, program, std::move(code), std::move(code_b),
|
|
|
|
async_shaders, cpu_addr.value_or(0));
|
2019-05-30 18:55:11 +02:00
|
|
|
} else {
|
2020-05-23 01:55:38 +02:00
|
|
|
shader = Shader::CreateFromCache(params, found->second);
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
2020-04-16 19:50:12 +02:00
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
Shader* const result = shader.get();
|
2020-04-16 19:50:12 +02:00
|
|
|
if (cpu_addr) {
|
2020-06-24 03:51:03 +02:00
|
|
|
Register(std::move(shader), *cpu_addr, code_size);
|
2020-04-16 19:50:12 +02:00
|
|
|
} else {
|
2020-05-23 01:55:38 +02:00
|
|
|
null_shader = std::move(shader);
|
2020-04-16 19:50:12 +02:00
|
|
|
}
|
2018-08-23 23:30:27 +02:00
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
return last_shaders[static_cast<std::size_t>(program)] = result;
|
2018-08-23 23:30:27 +02:00
|
|
|
}
|
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
Shader* ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
|
2019-07-15 03:25:13 +02:00
|
|
|
auto& memory_manager{system.GPU().MemoryManager()};
|
2020-04-06 01:18:00 +02:00
|
|
|
const auto cpu_addr{memory_manager.GpuToCpuAddress(code_addr)};
|
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
if (Shader* const kernel = cpu_addr ? TryGet(*cpu_addr) : null_kernel.get()) {
|
2019-07-15 03:25:13 +02:00
|
|
|
return kernel;
|
|
|
|
}
|
|
|
|
|
2020-04-06 01:18:00 +02:00
|
|
|
const auto host_ptr{memory_manager.GetPointer(code_addr)};
|
2020-02-26 20:13:47 +01:00
|
|
|
// No kernel found, create a new one
|
2020-06-24 03:51:03 +02:00
|
|
|
ProgramCode code{GetShaderCode(memory_manager, code_addr, host_ptr, true)};
|
|
|
|
const std::size_t code_size{code.size() * sizeof(u64)};
|
|
|
|
const u64 unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)};
|
2020-04-06 01:18:00 +02:00
|
|
|
|
|
|
|
const ShaderParameters params{system, disk_cache, device,
|
|
|
|
*cpu_addr, host_ptr, unique_identifier};
|
2019-07-15 03:25:13 +02:00
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
std::unique_ptr<Shader> kernel;
|
2020-02-26 20:13:47 +01:00
|
|
|
const auto found = runtime_cache.find(unique_identifier);
|
|
|
|
if (found == runtime_cache.end()) {
|
2020-05-23 01:55:38 +02:00
|
|
|
kernel = Shader::CreateKernelFromMemory(params, std::move(code));
|
2019-07-15 03:25:13 +02:00
|
|
|
} else {
|
2020-05-23 01:55:38 +02:00
|
|
|
kernel = Shader::CreateFromCache(params, found->second);
|
2019-07-15 03:25:13 +02:00
|
|
|
}
|
|
|
|
|
2020-05-23 01:55:38 +02:00
|
|
|
Shader* const result = kernel.get();
|
2020-04-16 19:50:12 +02:00
|
|
|
if (cpu_addr) {
|
2020-06-24 03:51:03 +02:00
|
|
|
Register(std::move(kernel), *cpu_addr, code_size);
|
2020-04-16 19:50:12 +02:00
|
|
|
} else {
|
2020-05-23 01:55:38 +02:00
|
|
|
null_kernel = std::move(kernel);
|
2020-04-16 19:50:12 +02:00
|
|
|
}
|
2020-05-23 01:55:38 +02:00
|
|
|
return result;
|
2019-07-15 03:25:13 +02:00
|
|
|
}
|
|
|
|
|
2019-04-23 23:19:28 +02:00
|
|
|
} // namespace OpenGL
|