diff --git a/src/dynarmic/CMakeLists.txt b/src/dynarmic/CMakeLists.txt index a81fb0bc..9b7d7cb1 100644 --- a/src/dynarmic/CMakeLists.txt +++ b/src/dynarmic/CMakeLists.txt @@ -390,11 +390,14 @@ elseif(ARCHITECTURE STREQUAL "arm64") backend/arm64/emit_arm64_vector_floating_point.cpp backend/arm64/emit_arm64_vector_saturation.cpp backend/arm64/emit_context.h + backend/arm64/exclusive_monitor.cpp backend/arm64/fpsr_manager.cpp backend/arm64/fpsr_manager.h backend/arm64/reg_alloc.cpp backend/arm64/reg_alloc.h backend/arm64/stack_layout.h + common/spin_lock_arm64.cpp + common/spin_lock_arm64.h ) if ("A32" IN_LIST DYNARMIC_FRONTENDS) diff --git a/src/dynarmic/backend/arm64/a32_address_space.cpp b/src/dynarmic/backend/arm64/a32_address_space.cpp index 2ea167ac..a8f89a83 100644 --- a/src/dynarmic/backend/arm64/a32_address_space.cpp +++ b/src/dynarmic/backend/arm64/a32_address_space.cpp @@ -10,9 +10,11 @@ #include "dynarmic/backend/arm64/devirtualize.h" #include "dynarmic/backend/arm64/emit_arm64.h" #include "dynarmic/backend/arm64/stack_layout.h" +#include "dynarmic/common/cast_util.h" #include "dynarmic/common/fp/fpcr.h" #include "dynarmic/frontend/A32/a32_location_descriptor.h" #include "dynarmic/frontend/A32/translate/a32_translate.h" +#include "dynarmic/interface/exclusive_monitor.h" #include "dynarmic/ir/opt/passes.h" namespace Dynarmic::Backend::Arm64 { @@ -39,6 +41,61 @@ static void* EmitCallTrampoline(oaknut::CodeGenerator& code, T* this_) { return target; } +template +static void* EmitExclusiveReadCallTrampoline(oaknut::CodeGenerator& code, const A32::UserConfig& conf) { + using namespace oaknut::util; + + oaknut::Label l_addr, l_this; + + auto fn = [](const A32::UserConfig& conf, A32::VAddr vaddr) -> T { + return conf.global_monitor->ReadAndMark(conf.processor_id, vaddr, [&]() -> T { + return (conf.callbacks->*callback)(vaddr); + }); + }; + + void* target = code.ptr(); + code.LDR(X0, l_this); + code.LDR(Xscratch0, l_addr); + code.BR(Xscratch0); + + code.align(8); + code.l(l_this); + code.dx(mcl::bit_cast(&conf)); + code.l(l_addr); + code.dx(mcl::bit_cast(Common::FptrCast(fn))); + + return target; +} + +template +static void* EmitExclusiveWriteCallTrampoline(oaknut::CodeGenerator& code, const A32::UserConfig& conf) { + using namespace oaknut::util; + + oaknut::Label l_addr, l_this; + + auto fn = [](const A32::UserConfig& conf, A32::VAddr vaddr, T value) -> u32 { + return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, + [&](T expected) -> bool { + return (conf.callbacks->*callback)(vaddr, value, expected); + }) + ? 0 + : 1; + }; + + void* target = code.ptr(); + code.LDR(X0, l_this); + code.LDR(Xscratch0, l_addr); + code.BR(Xscratch0); + + code.align(8); + code.l(l_this); + code.dx(mcl::bit_cast(&conf)); + code.l(l_addr); + code.dx(mcl::bit_cast(Common::FptrCast(fn))); + + return target; +} + A32AddressSpace::A32AddressSpace(const A32::UserConfig& conf) : conf(conf) , mem(conf.code_cache_size) @@ -121,11 +178,23 @@ void A32AddressSpace::EmitPrelude() { prelude_info.read_memory_16 = EmitCallTrampoline<&A32::UserCallbacks::MemoryRead16>(code, conf.callbacks); prelude_info.read_memory_32 = EmitCallTrampoline<&A32::UserCallbacks::MemoryRead32>(code, conf.callbacks); prelude_info.read_memory_64 = EmitCallTrampoline<&A32::UserCallbacks::MemoryRead64>(code, conf.callbacks); + prelude_info.exclusive_read_memory_8 = EmitExclusiveReadCallTrampoline<&A32::UserCallbacks::MemoryRead8, u8>(code, conf); + prelude_info.exclusive_read_memory_16 = EmitExclusiveReadCallTrampoline<&A32::UserCallbacks::MemoryRead16, u16>(code, conf); + prelude_info.exclusive_read_memory_32 = EmitExclusiveReadCallTrampoline<&A32::UserCallbacks::MemoryRead32, u32>(code, conf); + prelude_info.exclusive_read_memory_64 = EmitExclusiveReadCallTrampoline<&A32::UserCallbacks::MemoryRead64, u64>(code, conf); prelude_info.write_memory_8 = EmitCallTrampoline<&A32::UserCallbacks::MemoryWrite8>(code, conf.callbacks); prelude_info.write_memory_16 = EmitCallTrampoline<&A32::UserCallbacks::MemoryWrite16>(code, conf.callbacks); prelude_info.write_memory_32 = EmitCallTrampoline<&A32::UserCallbacks::MemoryWrite32>(code, conf.callbacks); prelude_info.write_memory_64 = EmitCallTrampoline<&A32::UserCallbacks::MemoryWrite64>(code, conf.callbacks); + prelude_info.exclusive_write_memory_8 = EmitExclusiveWriteCallTrampoline<&A32::UserCallbacks::MemoryWriteExclusive8, u8>(code, conf); + prelude_info.exclusive_write_memory_16 = EmitExclusiveWriteCallTrampoline<&A32::UserCallbacks::MemoryWriteExclusive16, u16>(code, conf); + prelude_info.exclusive_write_memory_32 = EmitExclusiveWriteCallTrampoline<&A32::UserCallbacks::MemoryWriteExclusive32, u32>(code, conf); + prelude_info.exclusive_write_memory_64 = EmitExclusiveWriteCallTrampoline<&A32::UserCallbacks::MemoryWriteExclusive64, u64>(code, conf); + prelude_info.call_svc = EmitCallTrampoline<&A32::UserCallbacks::CallSVC>(code, conf.callbacks); + prelude_info.exception_raised = EmitCallTrampoline<&A32::UserCallbacks::ExceptionRaised>(code, conf.callbacks); prelude_info.isb_raised = EmitCallTrampoline<&A32::UserCallbacks::InstructionSynchronizationBarrierRaised>(code, conf.callbacks); + prelude_info.add_ticks = EmitCallTrampoline<&A32::UserCallbacks::AddTicks>(code, conf.callbacks); + prelude_info.get_ticks_remaining = EmitCallTrampoline<&A32::UserCallbacks::GetTicksRemaining>(code, conf.callbacks); prelude_info.end_of_prelude = code.ptr(); @@ -185,6 +254,18 @@ void A32AddressSpace::Link(EmittedBlockInfo& block_info) { case LinkTarget::ReadMemory64: c.BL(prelude_info.read_memory_64); break; + case LinkTarget::ExclusiveReadMemory8: + c.BL(prelude_info.exclusive_read_memory_8); + break; + case LinkTarget::ExclusiveReadMemory16: + c.BL(prelude_info.exclusive_read_memory_16); + break; + case LinkTarget::ExclusiveReadMemory32: + c.BL(prelude_info.exclusive_read_memory_32); + break; + case LinkTarget::ExclusiveReadMemory64: + c.BL(prelude_info.exclusive_read_memory_64); + break; case LinkTarget::WriteMemory8: c.BL(prelude_info.write_memory_8); break; @@ -197,9 +278,33 @@ void A32AddressSpace::Link(EmittedBlockInfo& block_info) { case LinkTarget::WriteMemory64: c.BL(prelude_info.write_memory_64); break; + case LinkTarget::ExclusiveWriteMemory8: + c.BL(prelude_info.exclusive_write_memory_8); + break; + case LinkTarget::ExclusiveWriteMemory16: + c.BL(prelude_info.exclusive_write_memory_16); + break; + case LinkTarget::ExclusiveWriteMemory32: + c.BL(prelude_info.exclusive_write_memory_32); + break; + case LinkTarget::ExclusiveWriteMemory64: + c.BL(prelude_info.exclusive_write_memory_64); + break; + case LinkTarget::CallSVC: + c.BL(prelude_info.call_svc); + break; + case LinkTarget::ExceptionRaised: + c.BL(prelude_info.exception_raised); + break; case LinkTarget::InstructionSynchronizationBarrierRaised: c.BL(prelude_info.isb_raised); break; + case LinkTarget::AddTicks: + c.BL(prelude_info.add_ticks); + break; + case LinkTarget::GetTicksRemaining: + c.BL(prelude_info.get_ticks_remaining); + break; default: ASSERT_FALSE("Invalid relocation target"); } diff --git a/src/dynarmic/backend/arm64/a32_address_space.h b/src/dynarmic/backend/arm64/a32_address_space.h index 4243290f..b4c276f4 100644 --- a/src/dynarmic/backend/arm64/a32_address_space.h +++ b/src/dynarmic/backend/arm64/a32_address_space.h @@ -54,17 +54,30 @@ private: using RunCodeFuncType = HaltReason (*)(CodePtr entry_point, A32JitState* context, volatile u32* halt_reason); RunCodeFuncType run_code; + RunCodeFuncType step_code; void* return_from_run_code; void* read_memory_8; void* read_memory_16; void* read_memory_32; void* read_memory_64; + void* exclusive_read_memory_8; + void* exclusive_read_memory_16; + void* exclusive_read_memory_32; + void* exclusive_read_memory_64; void* write_memory_8; void* write_memory_16; void* write_memory_32; void* write_memory_64; + void* exclusive_write_memory_8; + void* exclusive_write_memory_16; + void* exclusive_write_memory_32; + void* exclusive_write_memory_64; + void* call_svc; + void* exception_raised; void* isb_raised; + void* add_ticks; + void* get_ticks_remaining; } prelude_info; }; diff --git a/src/dynarmic/backend/arm64/emit_arm64.h b/src/dynarmic/backend/arm64/emit_arm64.h index 0665ae87..db8c544b 100644 --- a/src/dynarmic/backend/arm64/emit_arm64.h +++ b/src/dynarmic/backend/arm64/emit_arm64.h @@ -44,10 +44,18 @@ enum class LinkTarget { ReadMemory16, ReadMemory32, ReadMemory64, + ExclusiveReadMemory8, + ExclusiveReadMemory16, + ExclusiveReadMemory32, + ExclusiveReadMemory64, WriteMemory8, WriteMemory16, WriteMemory32, WriteMemory64, + ExclusiveWriteMemory8, + ExclusiveWriteMemory16, + ExclusiveWriteMemory32, + ExclusiveWriteMemory64, CallSVC, ExceptionRaised, InstructionSynchronizationBarrierRaised, diff --git a/src/dynarmic/backend/arm64/emit_arm64_a32_memory.cpp b/src/dynarmic/backend/arm64/emit_arm64_a32_memory.cpp index 83700e0f..e0bf558c 100644 --- a/src/dynarmic/backend/arm64/emit_arm64_a32_memory.cpp +++ b/src/dynarmic/backend/arm64/emit_arm64_a32_memory.cpp @@ -10,6 +10,7 @@ #include "dynarmic/backend/arm64/emit_arm64.h" #include "dynarmic/backend/arm64/emit_context.h" #include "dynarmic/backend/arm64/reg_alloc.h" +#include "dynarmic/ir/acc_type.h" #include "dynarmic/ir/basic_block.h" #include "dynarmic/ir/microinstruction.h" #include "dynarmic/ir/opcodes.h" @@ -18,6 +19,68 @@ namespace Dynarmic::Backend::Arm64 { using namespace oaknut::util; +static bool IsOrdered(IR::AccType acctype) { + return acctype == IR::AccType::ORDERED || acctype == IR::AccType::ORDEREDRW || acctype == IR::AccType::LIMITEDORDERED; +} + +static void EmitReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + ctx.reg_alloc.PrepareForCall(inst, {}, args[1]); + const bool ordered = IsOrdered(args[2].GetImmediateAccType()); + + EmitRelocation(code, ctx, fn); + if (ordered) { + code.DMB(oaknut::BarrierOp::ISH); + } +} + +static void EmitExclusiveReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + ctx.reg_alloc.PrepareForCall(inst, {}, args[1]); + const bool ordered = IsOrdered(args[2].GetImmediateAccType()); + + code.MOV(Wscratch0, 1); + code.STRB(Wscratch0, Xstate, offsetof(A32JitState, exclusive_state)); + EmitRelocation(code, ctx, fn); + if (ordered) { + code.DMB(oaknut::BarrierOp::ISH); + } +} + +static void EmitWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + ctx.reg_alloc.PrepareForCall(inst, {}, args[1], args[2]); + const bool ordered = IsOrdered(args[3].GetImmediateAccType()); + + if (ordered) { + code.DMB(oaknut::BarrierOp::ISH); + } + EmitRelocation(code, ctx, fn); + if (ordered) { + code.DMB(oaknut::BarrierOp::ISH); + } +} + +static void EmitExclusiveWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + ctx.reg_alloc.PrepareForCall(inst, {}, args[1], args[2]); + const bool ordered = IsOrdered(args[3].GetImmediateAccType()); + + oaknut::Label end; + + if (ordered) { + code.DMB(oaknut::BarrierOp::ISH); + } + code.LDRB(Wscratch0, Xstate, offsetof(A32JitState, exclusive_state)); + code.CBZ(Wscratch0, end); + code.STRB(WZR, Xstate, offsetof(A32JitState, exclusive_state)); + EmitRelocation(code, ctx, fn); + if (ordered) { + code.DMB(oaknut::BarrierOp::ISH); + } + code.l(end); +} + template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext&, IR::Inst*) { code.STR(WZR, Xstate, offsetof(A32JitState, exclusive_state)); @@ -25,138 +88,82 @@ void EmitIR(oaknut::CodeGenerator& code, EmitCont template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - - ctx.reg_alloc.PrepareForCall(inst, {}, args[1]); - - EmitRelocation(code, ctx, LinkTarget::ReadMemory8); + EmitReadMemory(code, ctx, inst, LinkTarget::ReadMemory8); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - - ctx.reg_alloc.PrepareForCall(inst, {}, args[1]); - - EmitRelocation(code, ctx, LinkTarget::ReadMemory16); + EmitReadMemory(code, ctx, inst, LinkTarget::ReadMemory16); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - - ctx.reg_alloc.PrepareForCall(inst, {}, args[1]); - - EmitRelocation(code, ctx, LinkTarget::ReadMemory32); + EmitReadMemory(code, ctx, inst, LinkTarget::ReadMemory32); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - - ctx.reg_alloc.PrepareForCall(inst, {}, args[1]); - - EmitRelocation(code, ctx, LinkTarget::ReadMemory64); + EmitReadMemory(code, ctx, inst, LinkTarget::ReadMemory64); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - (void)code; - (void)ctx; - (void)inst; - ASSERT_FALSE("Unimplemented"); + EmitExclusiveReadMemory(code, ctx, inst, LinkTarget::ExclusiveReadMemory8); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - (void)code; - (void)ctx; - (void)inst; - ASSERT_FALSE("Unimplemented"); + EmitExclusiveReadMemory(code, ctx, inst, LinkTarget::ExclusiveReadMemory16); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - (void)code; - (void)ctx; - (void)inst; - ASSERT_FALSE("Unimplemented"); + EmitExclusiveReadMemory(code, ctx, inst, LinkTarget::ExclusiveReadMemory32); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - (void)code; - (void)ctx; - (void)inst; - ASSERT_FALSE("Unimplemented"); + EmitExclusiveReadMemory(code, ctx, inst, LinkTarget::ExclusiveReadMemory64); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - - ctx.reg_alloc.PrepareForCall(nullptr, {}, args[1], args[2]); - - EmitRelocation(code, ctx, LinkTarget::WriteMemory8); + EmitWriteMemory(code, ctx, inst, LinkTarget::WriteMemory8); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - - ctx.reg_alloc.PrepareForCall(nullptr, {}, args[1], args[2]); - - EmitRelocation(code, ctx, LinkTarget::WriteMemory16); + EmitWriteMemory(code, ctx, inst, LinkTarget::WriteMemory16); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - - ctx.reg_alloc.PrepareForCall(nullptr, {}, args[1], args[2]); - - EmitRelocation(code, ctx, LinkTarget::WriteMemory32); + EmitWriteMemory(code, ctx, inst, LinkTarget::WriteMemory32); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - - ctx.reg_alloc.PrepareForCall(nullptr, {}, args[1], args[2]); - - EmitRelocation(code, ctx, LinkTarget::WriteMemory64); + EmitWriteMemory(code, ctx, inst, LinkTarget::WriteMemory64); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - (void)code; - (void)ctx; - (void)inst; - ASSERT_FALSE("Unimplemented"); + EmitExclusiveWriteMemory(code, ctx, inst, LinkTarget::ExclusiveWriteMemory8); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - (void)code; - (void)ctx; - (void)inst; - ASSERT_FALSE("Unimplemented"); + EmitExclusiveWriteMemory(code, ctx, inst, LinkTarget::ExclusiveWriteMemory16); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - (void)code; - (void)ctx; - (void)inst; - ASSERT_FALSE("Unimplemented"); + EmitExclusiveWriteMemory(code, ctx, inst, LinkTarget::ExclusiveWriteMemory32); } template<> void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) { - (void)code; - (void)ctx; - (void)inst; - ASSERT_FALSE("Unimplemented"); + EmitExclusiveWriteMemory(code, ctx, inst, LinkTarget::ExclusiveWriteMemory64); } } // namespace Dynarmic::Backend::Arm64 diff --git a/src/dynarmic/backend/arm64/exclusive_monitor.cpp b/src/dynarmic/backend/arm64/exclusive_monitor.cpp new file mode 100644 index 00000000..cd28598a --- /dev/null +++ b/src/dynarmic/backend/arm64/exclusive_monitor.cpp @@ -0,0 +1,60 @@ +/* This file is part of the dynarmic project. + * Copyright (c) 2022 MerryMage + * SPDX-License-Identifier: 0BSD + */ + +#include "dynarmic/interface/exclusive_monitor.h" + +#include + +#include + +namespace Dynarmic { + +ExclusiveMonitor::ExclusiveMonitor(size_t processor_count) + : exclusive_addresses(processor_count, INVALID_EXCLUSIVE_ADDRESS), exclusive_values(processor_count) { + Unlock(); +} + +size_t ExclusiveMonitor::GetProcessorCount() const { + return exclusive_addresses.size(); +} + +void ExclusiveMonitor::Lock() { + lock.Lock(); +} + +void ExclusiveMonitor::Unlock() { + lock.Unlock(); +} + +bool ExclusiveMonitor::CheckAndClear(size_t processor_id, VAddr address) { + const VAddr masked_address = address & RESERVATION_GRANULE_MASK; + + Lock(); + if (exclusive_addresses[processor_id] != masked_address) { + Unlock(); + return false; + } + + for (VAddr& other_address : exclusive_addresses) { + if (other_address == masked_address) { + other_address = INVALID_EXCLUSIVE_ADDRESS; + } + } + return true; +} + +void ExclusiveMonitor::Clear() { + Lock(); + std::fill(exclusive_addresses.begin(), exclusive_addresses.end(), INVALID_EXCLUSIVE_ADDRESS); + Unlock(); +} + +void ExclusiveMonitor::ClearProcessor(size_t processor_id) { + Lock(); + exclusive_addresses[processor_id] = INVALID_EXCLUSIVE_ADDRESS; + Unlock(); +} + +} // namespace Dynarmic diff --git a/src/dynarmic/common/spin_lock_arm64.cpp b/src/dynarmic/common/spin_lock_arm64.cpp new file mode 100644 index 00000000..77d306cc --- /dev/null +++ b/src/dynarmic/common/spin_lock_arm64.cpp @@ -0,0 +1,75 @@ +/* This file is part of the dynarmic project. + * Copyright (c) 2022 MerryMage + * SPDX-License-Identifier: 0BSD + */ + +#include +#include + +#include "dynarmic/backend/arm64/abi.h" +#include "dynarmic/common/spin_lock.h" + +namespace Dynarmic { + +using Backend::Arm64::Wscratch0; +using Backend::Arm64::Wscratch1; +using namespace oaknut::util; + +void EmitSpinLockLock(oaknut::CodeGenerator& code, oaknut::XReg ptr) { + oaknut::Label start, loop; + + code.MOV(Wscratch1, 1); + code.SEVL(); + code.l(start); + code.WFE(); + code.l(loop); + code.LDAXR(Wscratch0, ptr); + code.CBNZ(Wscratch0, start); + code.STXR(Wscratch0, Wscratch1, ptr); + code.CBNZ(Wscratch0, loop); +} + +void EmitSpinLockUnlock(oaknut::CodeGenerator& code, oaknut::XReg ptr) { + code.STLR(WZR, ptr); +} + +namespace { + +struct SpinLockImpl { + SpinLockImpl(); + + oaknut::CodeBlock mem; + oaknut::CodeGenerator code; + void (*lock)(volatile int*); + void (*unlock)(volatile int*); +}; + +SpinLockImpl impl; + +SpinLockImpl::SpinLockImpl() + : mem{4096} + , code{mem.ptr()} { + mem.unprotect(); + + lock = code.ptr(); + EmitSpinLockLock(code, X0); + code.RET(); + + unlock = code.ptr(); + EmitSpinLockUnlock(code, X0); + code.RET(); + + mem.protect(); +} + +} // namespace + +void SpinLock::Lock() { + impl.lock(&storage); +} + +void SpinLock::Unlock() { + impl.unlock(&storage); +} + +} // namespace Dynarmic diff --git a/src/dynarmic/common/spin_lock_arm64.h b/src/dynarmic/common/spin_lock_arm64.h new file mode 100644 index 00000000..c0a86bfe --- /dev/null +++ b/src/dynarmic/common/spin_lock_arm64.h @@ -0,0 +1,15 @@ +/* This file is part of the dynarmic project. + * Copyright (c) 2022 MerryMage + * SPDX-License-Identifier: 0BSD + */ + +#pragma once + +#include + +namespace Dynarmic { + +void EmitSpinLockLock(oaknut::CodeGenerator& code, oaknut::XReg ptr); +void EmitSpinLockUnlock(oaknut::CodeGenerator& code, oaknut::XReg ptr); + +} // namespace Dynarmic