1
0
Fork 0
forked from suyu/suyu

Merge pull request #2128 from FearlessTobi/port-4197

Port citra-emu/citra#4197: "threadsafe_queue: Add PopWait and use it where possible "
This commit is contained in:
bunnei 2019-02-16 15:34:49 -05:00 committed by GitHub
commit cd7e1183e2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 26 additions and 12 deletions

View file

@ -40,9 +40,7 @@ public:
const Impl& operator=(Impl const&) = delete;
void PushEntry(Entry e) {
std::lock_guard<std::mutex> lock(message_mutex);
message_queue.Push(std::move(e));
message_cv.notify_one();
}
void AddBackend(std::unique_ptr<Backend> backend) {
@ -86,15 +84,13 @@ private:
}
};
while (true) {
{
std::unique_lock<std::mutex> lock(message_mutex);
message_cv.wait(lock, [&] { return !running || message_queue.Pop(entry); });
}
if (!running) {
entry = message_queue.PopWait();
if (entry.final_entry) {
break;
}
write_logs(entry);
}
// Drain the logging queue. Only writes out up to MAX_LOGS_TO_WRITE to prevent a case
// where a system is repeatedly spamming logs even on close.
const int MAX_LOGS_TO_WRITE = filter.IsDebug() ? INT_MAX : 100;
@ -106,14 +102,13 @@ private:
}
~Impl() {
running = false;
message_cv.notify_one();
Entry entry;
entry.final_entry = true;
message_queue.Push(entry);
backend_thread.join();
}
std::atomic_bool running{true};
std::mutex message_mutex, writing_mutex;
std::condition_variable message_cv;
std::mutex writing_mutex;
std::thread backend_thread;
std::vector<std::unique_ptr<Backend>> backends;
Common::MPSCQueue<Log::Entry> message_queue;

View file

@ -27,6 +27,7 @@ struct Entry {
unsigned int line_num;
std::string function;
std::string message;
bool final_entry = false;
Entry() = default;
Entry(Entry&& o) = default;

View file

@ -8,6 +8,7 @@
// single reader, single writer queue
#include <atomic>
#include <condition_variable>
#include <cstddef>
#include <mutex>
#include <utility>
@ -45,6 +46,7 @@ public:
ElementPtr* new_ptr = new ElementPtr();
write_ptr->next.store(new_ptr, std::memory_order_release);
write_ptr = new_ptr;
cv.notify_one();
++size;
}
@ -74,6 +76,16 @@ public:
return true;
}
T PopWait() {
if (Empty()) {
std::unique_lock<std::mutex> lock(cv_mutex);
cv.wait(lock, [this]() { return !Empty(); });
}
T t;
Pop(t);
return t;
}
// not thread-safe
void Clear() {
size.store(0);
@ -101,6 +113,8 @@ private:
ElementPtr* write_ptr;
ElementPtr* read_ptr;
std::atomic_size_t size{0};
std::mutex cv_mutex;
std::condition_variable cv;
};
// a simple thread-safe,
@ -135,6 +149,10 @@ public:
return spsc_queue.Pop(t);
}
T PopWait() {
return spsc_queue.PopWait();
}
// not thread-safe
void Clear() {
spsc_queue.Clear();