1
0
Fork 0
forked from suyu/suyu

async_shaders: Mark getters as const member functions

While we're at it, we can also mark them as nodiscard.
This commit is contained in:
Lioncash 2020-08-24 01:15:48 -04:00
parent b72d2069ba
commit bafef3d1c9
2 changed files with 15 additions and 17 deletions

View file

@ -73,11 +73,11 @@ void AsyncShaders::KillWorkers() {
worker_threads.clear(); worker_threads.clear();
} }
bool AsyncShaders::HasWorkQueued() { bool AsyncShaders::HasWorkQueued() const {
return !pending_queue.empty(); return !pending_queue.empty();
} }
bool AsyncShaders::HasCompletedWork() { bool AsyncShaders::HasCompletedWork() const {
std::shared_lock lock{completed_mutex}; std::shared_lock lock{completed_mutex};
return !finished_work.empty(); return !finished_work.empty();
} }
@ -102,7 +102,7 @@ bool AsyncShaders::IsShaderAsync(const Tegra::GPU& gpu) const {
} }
std::vector<AsyncShaders::Result> AsyncShaders::GetCompletedWork() { std::vector<AsyncShaders::Result> AsyncShaders::GetCompletedWork() {
std::vector<AsyncShaders::Result> results; std::vector<Result> results;
{ {
std::unique_lock lock{completed_mutex}; std::unique_lock lock{completed_mutex};
results.assign(std::make_move_iterator(finished_work.begin()), results.assign(std::make_move_iterator(finished_work.begin()),

View file

@ -5,11 +5,10 @@
#pragma once #pragma once
#include <condition_variable> #include <condition_variable>
#include <deque>
#include <memory> #include <memory>
#include <shared_mutex> #include <shared_mutex>
#include <thread> #include <thread>
#include "common/bit_field.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_opengl/gl_device.h" #include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_resource_manager.h"
@ -17,7 +16,6 @@
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h" #include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
namespace Core::Frontend { namespace Core::Frontend {
class EmuWindow; class EmuWindow;
@ -70,20 +68,20 @@ public:
void KillWorkers(); void KillWorkers();
/// Check to see if any shaders have actually been compiled /// Check to see if any shaders have actually been compiled
bool HasCompletedWork(); [[nodiscard]] bool HasCompletedWork() const;
/// Deduce if a shader can be build on another thread of MUST be built in sync. We cannot build /// Deduce if a shader can be build on another thread of MUST be built in sync. We cannot build
/// every shader async as some shaders are only built and executed once. We try to "guess" which /// every shader async as some shaders are only built and executed once. We try to "guess" which
/// shader would be used only once /// shader would be used only once
bool IsShaderAsync(const Tegra::GPU& gpu) const; [[nodiscard]] bool IsShaderAsync(const Tegra::GPU& gpu) const;
/// Pulls completed compiled shaders /// Pulls completed compiled shaders
std::vector<Result> GetCompletedWork(); [[nodiscard]] std::vector<Result> GetCompletedWork();
void QueueOpenGLShader(const OpenGL::Device& device, Tegra::Engines::ShaderType shader_type, void QueueOpenGLShader(const OpenGL::Device& device, Tegra::Engines::ShaderType shader_type,
u64 uid, std::vector<u64> code, std::vector<u64> code_b, u32 main_offset, u64 uid, std::vector<u64> code, std::vector<u64> code_b, u32 main_offset,
VideoCommon::Shader::CompilerSettings compiler_settings, CompilerSettings compiler_settings, const Registry& registry,
const VideoCommon::Shader::Registry& registry, VAddr cpu_addr); VAddr cpu_addr);
void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, const Vulkan::VKDevice& device, void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, const Vulkan::VKDevice& device,
Vulkan::VKScheduler& scheduler, Vulkan::VKScheduler& scheduler,
@ -97,7 +95,7 @@ private:
void ShaderCompilerThread(Core::Frontend::GraphicsContext* context); void ShaderCompilerThread(Core::Frontend::GraphicsContext* context);
/// Check our worker queue to see if we have any work queued already /// Check our worker queue to see if we have any work queued already
bool HasWorkQueued(); [[nodiscard]] bool HasWorkQueued() const;
struct WorkerParams { struct WorkerParams {
Backend backend; Backend backend;
@ -108,8 +106,8 @@ private:
std::vector<u64> code; std::vector<u64> code;
std::vector<u64> code_b; std::vector<u64> code_b;
u32 main_offset; u32 main_offset;
VideoCommon::Shader::CompilerSettings compiler_settings; CompilerSettings compiler_settings;
std::optional<VideoCommon::Shader::Registry> registry; std::optional<Registry> registry;
VAddr cpu_address; VAddr cpu_address;
// For Vulkan // For Vulkan
@ -125,13 +123,13 @@ private:
}; };
std::condition_variable cv; std::condition_variable cv;
std::mutex queue_mutex; mutable std::mutex queue_mutex;
std::shared_mutex completed_mutex; mutable std::shared_mutex completed_mutex;
std::atomic<bool> is_thread_exiting{}; std::atomic<bool> is_thread_exiting{};
std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> context_list; std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> context_list;
std::vector<std::thread> worker_threads; std::vector<std::thread> worker_threads;
std::queue<WorkerParams> pending_queue; std::queue<WorkerParams> pending_queue;
std::vector<AsyncShaders::Result> finished_work; std::vector<Result> finished_work;
Core::Frontend::EmuWindow& emu_window; Core::Frontend::EmuWindow& emu_window;
}; };