1
0
Fork 0
forked from suyu/suyu

Merge pull request #2905 from danzel/fix-2902

Use recursive_mutex instead of mutex to fix #2902
This commit is contained in:
Sebastian Valle 2017-08-29 09:35:56 -05:00 committed by GitHub
commit acbd46366c
4 changed files with 5 additions and 5 deletions

View file

@ -7,5 +7,5 @@
#include <core/hle/lock.h>
namespace HLE {
std::mutex g_hle_lock;
std::recursive_mutex g_hle_lock;
}

View file

@ -14,5 +14,5 @@ namespace HLE {
* to the emulated memory is not protected by this mutex, and should be avoided in any threads other
* than the CPU thread.
*/
extern std::mutex g_hle_lock;
extern std::recursive_mutex g_hle_lock;
} // namespace HLE

View file

@ -1334,7 +1334,7 @@ void CallSVC(u32 immediate) {
MICROPROFILE_SCOPE(Kernel_SVC);
// Lock the global kernel mutex when we enter the kernel HLE.
std::lock_guard<std::mutex> lock(HLE::g_hle_lock);
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
const FunctionDef* info = GetSVCInfo(immediate);
if (info) {

View file

@ -183,7 +183,7 @@ T Read(const VAddr vaddr) {
}
// The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
std::lock_guard<std::mutex> lock(HLE::g_hle_lock);
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
switch (type) {
@ -224,7 +224,7 @@ void Write(const VAddr vaddr, const T data) {
}
// The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
std::lock_guard<std::mutex> lock(HLE::g_hle_lock);
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
switch (type) {