forked from suyu/suyu
move thread 1/4 count computation into allocate workers method
This commit is contained in:
parent
31a76410e8
commit
1b829fbd7a
4 changed files with 14 additions and 23 deletions
|
@ -177,15 +177,7 @@ RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWind
|
||||||
}
|
}
|
||||||
|
|
||||||
if (device.UseAsynchronousShaders()) {
|
if (device.UseAsynchronousShaders()) {
|
||||||
// Max worker threads we should allow
|
async_shaders.AllocateWorkers();
|
||||||
constexpr u32 MAX_THREADS = 4;
|
|
||||||
// Deduce how many threads we can use
|
|
||||||
const u32 threads_used = std::thread::hardware_concurrency() / 4;
|
|
||||||
// Always allow at least 1 thread regardless of our settings
|
|
||||||
const auto max_worker_count = std::max(1U, threads_used);
|
|
||||||
// Don't use more than MAX_THREADS
|
|
||||||
const auto worker_count = std::min(max_worker_count, MAX_THREADS);
|
|
||||||
async_shaders.AllocateWorkers(worker_count);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -405,17 +405,7 @@ RasterizerVulkan::RasterizerVulkan(Core::System& system, Core::Frontend::EmuWind
|
||||||
wfi_event{device.GetLogical().CreateNewEvent()}, async_shaders{renderer} {
|
wfi_event{device.GetLogical().CreateNewEvent()}, async_shaders{renderer} {
|
||||||
scheduler.SetQueryCache(query_cache);
|
scheduler.SetQueryCache(query_cache);
|
||||||
if (device.UseAsynchronousShaders()) {
|
if (device.UseAsynchronousShaders()) {
|
||||||
// The following is subject to move into the allocate workers method, to be api agnostic
|
async_shaders.AllocateWorkers();
|
||||||
|
|
||||||
// Max worker threads we should allow
|
|
||||||
constexpr u32 MAX_THREADS = 4;
|
|
||||||
// Deduce how many threads we can use
|
|
||||||
const auto threads_used = std::thread::hardware_concurrency() / 4;
|
|
||||||
// Always allow at least 1 thread regardless of our settings
|
|
||||||
const auto max_worker_count = std::max(1U, threads_used);
|
|
||||||
// Don't use more than MAX_THREADS
|
|
||||||
const auto worker_count = std::min(max_worker_count, MAX_THREADS);
|
|
||||||
async_shaders.AllocateWorkers(worker_count);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,9 +19,18 @@ AsyncShaders::~AsyncShaders() {
|
||||||
KillWorkers();
|
KillWorkers();
|
||||||
}
|
}
|
||||||
|
|
||||||
void AsyncShaders::AllocateWorkers(std::size_t num_workers) {
|
void AsyncShaders::AllocateWorkers() {
|
||||||
|
// Max worker threads we should allow
|
||||||
|
constexpr u32 MAX_THREADS = 4;
|
||||||
|
// Deduce how many threads we can use
|
||||||
|
const u32 threads_used = std::thread::hardware_concurrency() / 4;
|
||||||
|
// Always allow at least 1 thread regardless of our settings
|
||||||
|
const auto max_worker_count = std::max(1U, threads_used);
|
||||||
|
// Don't use more than MAX_THREADS
|
||||||
|
const auto num_workers = std::min(max_worker_count, MAX_THREADS);
|
||||||
|
|
||||||
// If we're already have workers queued or don't want to queue workers, ignore
|
// If we're already have workers queued or don't want to queue workers, ignore
|
||||||
if (num_workers == worker_threads.size() || num_workers == 0) {
|
if (num_workers == worker_threads.size()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ public:
|
||||||
~AsyncShaders();
|
~AsyncShaders();
|
||||||
|
|
||||||
/// Start up shader worker threads
|
/// Start up shader worker threads
|
||||||
void AllocateWorkers(std::size_t num_workers);
|
void AllocateWorkers();
|
||||||
|
|
||||||
/// Clear the shader queue and kill all worker threads
|
/// Clear the shader queue and kill all worker threads
|
||||||
void FreeWorkers();
|
void FreeWorkers();
|
||||||
|
|
Loading…
Reference in a new issue