forked from suyu/suyu
vk_scheduler: Use std::jthread
This commit is contained in:
parent
877cd60b00
commit
84f7e7e91c
2 changed files with 9 additions and 17 deletions
|
@ -43,17 +43,10 @@ VKScheduler::VKScheduler(const Device& device_, StateTracker& state_tracker_)
|
||||||
command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
|
command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
|
||||||
AcquireNewChunk();
|
AcquireNewChunk();
|
||||||
AllocateWorkerCommandBuffer();
|
AllocateWorkerCommandBuffer();
|
||||||
worker_thread = std::thread(&VKScheduler::WorkerThread, this);
|
worker_thread = std::jthread([this](std::stop_token token) { WorkerThread(token); });
|
||||||
}
|
}
|
||||||
|
|
||||||
VKScheduler::~VKScheduler() {
|
VKScheduler::~VKScheduler() = default;
|
||||||
{
|
|
||||||
std::lock_guard lock{work_mutex};
|
|
||||||
quit = true;
|
|
||||||
}
|
|
||||||
work_cv.notify_all();
|
|
||||||
worker_thread.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
void VKScheduler::Flush(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
void VKScheduler::Flush(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
||||||
SubmitExecution(signal_semaphore, wait_semaphore);
|
SubmitExecution(signal_semaphore, wait_semaphore);
|
||||||
|
@ -135,7 +128,7 @@ bool VKScheduler::UpdateGraphicsPipeline(GraphicsPipeline* pipeline) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::WorkerThread() {
|
void VKScheduler::WorkerThread(std::stop_token stop_token) {
|
||||||
Common::SetCurrentThreadName("yuzu:VulkanWorker");
|
Common::SetCurrentThreadName("yuzu:VulkanWorker");
|
||||||
do {
|
do {
|
||||||
if (work_queue.empty()) {
|
if (work_queue.empty()) {
|
||||||
|
@ -144,8 +137,8 @@ void VKScheduler::WorkerThread() {
|
||||||
std::unique_ptr<CommandChunk> work;
|
std::unique_ptr<CommandChunk> work;
|
||||||
{
|
{
|
||||||
std::unique_lock lock{work_mutex};
|
std::unique_lock lock{work_mutex};
|
||||||
work_cv.wait(lock, [this] { return !work_queue.empty() || quit; });
|
work_cv.wait(lock, stop_token, [this] { return !work_queue.empty(); });
|
||||||
if (quit) {
|
if (stop_token.stop_requested()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
work = std::move(work_queue.front());
|
work = std::move(work_queue.front());
|
||||||
|
@ -158,7 +151,7 @@ void VKScheduler::WorkerThread() {
|
||||||
}
|
}
|
||||||
std::lock_guard reserve_lock{reserve_mutex};
|
std::lock_guard reserve_lock{reserve_mutex};
|
||||||
chunk_reserve.push_back(std::move(work));
|
chunk_reserve.push_back(std::move(work));
|
||||||
} while (!quit);
|
} while (!stop_token.stop_requested());
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::AllocateWorkerCommandBuffer() {
|
void VKScheduler::AllocateWorkerCommandBuffer() {
|
||||||
|
|
|
@ -187,7 +187,7 @@ private:
|
||||||
GraphicsPipeline* graphics_pipeline = nullptr;
|
GraphicsPipeline* graphics_pipeline = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
void WorkerThread();
|
void WorkerThread(std::stop_token stop_token);
|
||||||
|
|
||||||
void AllocateWorkerCommandBuffer();
|
void AllocateWorkerCommandBuffer();
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ private:
|
||||||
vk::CommandBuffer current_cmdbuf;
|
vk::CommandBuffer current_cmdbuf;
|
||||||
|
|
||||||
std::unique_ptr<CommandChunk> chunk;
|
std::unique_ptr<CommandChunk> chunk;
|
||||||
std::thread worker_thread;
|
std::jthread worker_thread;
|
||||||
|
|
||||||
State state;
|
State state;
|
||||||
|
|
||||||
|
@ -224,9 +224,8 @@ private:
|
||||||
std::vector<std::unique_ptr<CommandChunk>> chunk_reserve;
|
std::vector<std::unique_ptr<CommandChunk>> chunk_reserve;
|
||||||
std::mutex reserve_mutex;
|
std::mutex reserve_mutex;
|
||||||
std::mutex work_mutex;
|
std::mutex work_mutex;
|
||||||
std::condition_variable work_cv;
|
std::condition_variable_any work_cv;
|
||||||
std::condition_variable wait_cv;
|
std::condition_variable wait_cv;
|
||||||
std::atomic_bool quit{};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
Loading…
Reference in a new issue