3
0
Fork 0
forked from suyu/suyu

Use conditional var

This commit is contained in:
David Marcec 2020-07-16 18:38:35 +10:00
parent 2ba195aa0d
commit f48187449e
2 changed files with 15 additions and 9 deletions

View file

@ -59,7 +59,6 @@ void AsyncShaders::KillWorkers() {
} }
bool AsyncShaders::HasWorkQueued() { bool AsyncShaders::HasWorkQueued() {
std::shared_lock lock(queue_mutex);
return !pending_queue.empty(); return !pending_queue.empty();
} }
@ -118,26 +117,31 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device,
cpu_addr}; cpu_addr};
std::unique_lock lock(queue_mutex); std::unique_lock lock(queue_mutex);
pending_queue.push_back(std::move(params)); pending_queue.push_back(std::move(params));
cv.notify_one();
} }
void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context) { void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context) {
using namespace std::chrono_literals; using namespace std::chrono_literals;
while (!is_thread_exiting.load(std::memory_order_relaxed)) { while (!is_thread_exiting.load(std::memory_order_relaxed)) {
std::unique_lock<std::mutex> lock(queue_mutex);
cv.wait(lock, [&] { return HasWorkQueued() || is_thread_exiting; });
if (is_thread_exiting) {
return;
}
// Partial lock to allow all threads to read at the same time // Partial lock to allow all threads to read at the same time
if (!HasWorkQueued()) { if (!HasWorkQueued()) {
continue; continue;
} }
// Complete lock for pulling workload
queue_mutex.lock();
// Another thread beat us, just unlock and wait for the next load // Another thread beat us, just unlock and wait for the next load
if (pending_queue.empty()) { if (pending_queue.empty()) {
queue_mutex.unlock();
continue; continue;
} }
// Pull work from queue // Pull work from queue
WorkerParams work = std::move(pending_queue.front()); WorkerParams work = std::move(pending_queue.front());
pending_queue.pop_front(); pending_queue.pop_front();
queue_mutex.unlock();
lock.unlock();
if (work.backend == AsyncShaders::Backend::OpenGL || if (work.backend == AsyncShaders::Backend::OpenGL ||
work.backend == AsyncShaders::Backend::GLASM) { work.backend == AsyncShaders::Backend::GLASM) {

View file

@ -4,6 +4,7 @@
#pragma once #pragma once
#include <condition_variable>
#include <deque> #include <deque>
#include <memory> #include <memory>
#include <shared_mutex> #include <shared_mutex>
@ -59,9 +60,6 @@ public:
// Force end all threads // Force end all threads
void KillWorkers(); void KillWorkers();
/// Check our worker queue to see if we have any work queued already
bool HasWorkQueued();
/// Check to see if any shaders have actually been compiled /// Check to see if any shaders have actually been compiled
bool HasCompletedWork(); bool HasCompletedWork();
@ -81,6 +79,9 @@ public:
private: private:
void ShaderCompilerThread(Core::Frontend::GraphicsContext* context); void ShaderCompilerThread(Core::Frontend::GraphicsContext* context);
/// Check our worker queue to see if we have any work queued already
bool HasWorkQueued();
struct WorkerParams { struct WorkerParams {
AsyncShaders::Backend backend; AsyncShaders::Backend backend;
OpenGL::Device device; OpenGL::Device device;
@ -94,7 +95,8 @@ private:
VAddr cpu_address; VAddr cpu_address;
}; };
std::shared_mutex queue_mutex; std::condition_variable cv;
std::mutex queue_mutex;
std::shared_mutex completed_mutex; std::shared_mutex completed_mutex;
std::atomic<bool> is_thread_exiting{}; std::atomic<bool> is_thread_exiting{};
std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> context_list; std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> context_list;