diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f962e57f..c4faec44 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -293,6 +293,7 @@ if (ARCHITECTURE STREQUAL "x86_64") backend/x64/perf_map.h backend/x64/reg_alloc.cpp backend/x64/reg_alloc.h + backend/x64/stack_layout.h ) if ("A32" IN_LIST DYNARMIC_FRONTENDS) diff --git a/src/backend/x64/a32_emit_x64.cpp b/src/backend/x64/a32_emit_x64.cpp index 4e6b681f..ae5ce68b 100644 --- a/src/backend/x64/a32_emit_x64.cpp +++ b/src/backend/x64/a32_emit_x64.cpp @@ -109,7 +109,7 @@ A32EmitX64::BlockDescriptor A32EmitX64::Emit(IR::Block& block) { return gprs; }(); - RegAlloc reg_alloc{code, A32JitState::SpillCount, SpillToOpArg, gpr_order, any_xmm}; + RegAlloc reg_alloc{code, gpr_order, any_xmm}; A32EmitContext ctx{conf, reg_alloc, block}; // Start emitting. diff --git a/src/backend/x64/a32_jitstate.h b/src/backend/x64/a32_jitstate.h index 40d85e6b..92271c73 100644 --- a/src/backend/x64/a32_jitstate.h +++ b/src/backend/x64/a32_jitstate.h @@ -39,13 +39,6 @@ struct A32JitState { alignas(16) std::array ExtReg{}; // Extension registers. - static constexpr size_t SpillCount = 64; - alignas(16) std::array, SpillCount> spill{}; // Spill. - static Xbyak::Address GetSpillLocationFromIndex(size_t i) { - using namespace Xbyak::util; - return xword[r15 + offsetof(A32JitState, spill) + i * sizeof(u64) * 2]; - } - // For internal use (See: BlockOfCode::RunCode) u32 guest_MXCSR = 0x00001f80; u32 asimd_MXCSR = 0x00009fc0; diff --git a/src/backend/x64/a64_emit_x64.cpp b/src/backend/x64/a64_emit_x64.cpp index 26670b2c..ad5ad03f 100644 --- a/src/backend/x64/a64_emit_x64.cpp +++ b/src/backend/x64/a64_emit_x64.cpp @@ -75,7 +75,7 @@ A64EmitX64::BlockDescriptor A64EmitX64::Emit(IR::Block& block) { return gprs; }(); - RegAlloc reg_alloc{code, A64JitState::SpillCount, SpillToOpArg, gpr_order, any_xmm}; + RegAlloc reg_alloc{code, gpr_order, any_xmm}; A64EmitContext ctx{conf, reg_alloc, block}; // Start emitting. diff --git a/src/backend/x64/a64_jitstate.h b/src/backend/x64/a64_jitstate.h index 1bfe3ad5..e94a88c6 100644 --- a/src/backend/x64/a64_jitstate.h +++ b/src/backend/x64/a64_jitstate.h @@ -42,13 +42,6 @@ struct A64JitState { alignas(16) std::array vec{}; // Extension registers. - static constexpr size_t SpillCount = 64; - alignas(16) std::array, SpillCount> spill{}; // Spill. - static Xbyak::Address GetSpillLocationFromIndex(size_t i) { - using namespace Xbyak::util; - return xword[r15 + offsetof(A64JitState, spill) + i * sizeof(u64) * 2]; - } - // For internal use (See: BlockOfCode::RunCode) u32 guest_MXCSR = 0x00001f80; u32 asimd_MXCSR = 0x00009fc0; diff --git a/src/backend/x64/block_of_code.cpp b/src/backend/x64/block_of_code.cpp index fe8ee997..8a19a7fb 100644 --- a/src/backend/x64/block_of_code.cpp +++ b/src/backend/x64/block_of_code.cpp @@ -13,6 +13,7 @@ #include "backend/x64/block_of_code.h" #include "backend/x64/hostloc.h" #include "backend/x64/perf_map.h" +#include "backend/x64/stack_layout.h" #include "common/assert.h" #include "common/bit_util.h" @@ -155,7 +156,7 @@ void BlockOfCode::GenRunCode(std::function rcp) { // 1. It saves all the registers we as a callee need to save. // 2. It aligns the stack so that the code the JIT emits can assume // that the stack is appropriately aligned for CALLs. - ABI_PushCalleeSaveRegistersAndAdjustStack(*this); + ABI_PushCalleeSaveRegistersAndAdjustStack(*this, sizeof(StackLayout)); mov(r15, ABI_PARAM1); mov(rbx, ABI_PARAM2); // save temporarily in non-volatile register @@ -172,7 +173,7 @@ void BlockOfCode::GenRunCode(std::function rcp) { align(); step_code = getCurr(); - ABI_PushCalleeSaveRegistersAndAdjustStack(*this); + ABI_PushCalleeSaveRegistersAndAdjustStack(*this, sizeof(StackLayout)); mov(r15, ABI_PARAM1); @@ -222,7 +223,7 @@ void BlockOfCode::GenRunCode(std::function rcp) { sub(param[0], qword[r15 + jsi.offsetof_cycles_remaining]); }); - ABI_PopCalleeSaveRegistersAndAdjustStack(*this); + ABI_PopCalleeSaveRegistersAndAdjustStack(*this, sizeof(StackLayout)); ret(); PerfMapRegister(run_code, getCurr(), "dynarmic_dispatcher"); diff --git a/src/backend/x64/hostloc.cpp b/src/backend/x64/hostloc.cpp index b51de7b7..dae8bf29 100644 --- a/src/backend/x64/hostloc.cpp +++ b/src/backend/x64/hostloc.cpp @@ -5,7 +5,9 @@ #include +#include "backend/x64/abi.h" #include "backend/x64/hostloc.h" +#include "backend/x64/stack_layout.h" namespace Dynarmic::Backend::X64 { @@ -19,4 +21,14 @@ Xbyak::Xmm HostLocToXmm(HostLoc loc) { return Xbyak::Xmm(static_cast(loc) - static_cast(HostLoc::XMM0)); } +Xbyak::Address SpillToOpArg(HostLoc loc) { + ASSERT(HostLocIsSpill(loc)); + + size_t i = static_cast(loc) - static_cast(HostLoc::FirstSpill); + ASSERT_MSG(i < SpillCount, "Spill index greater than number of available spill locations"); + + using namespace Xbyak::util; + return xword[rsp + ABI_SHADOW_SPACE + offsetof(StackLayout, spill) + i * sizeof(u64) * 2]; +} + } // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/hostloc.h b/src/backend/x64/hostloc.h index 460a6711..fe8169ac 100644 --- a/src/backend/x64/hostloc.h +++ b/src/backend/x64/hostloc.h @@ -110,15 +110,6 @@ const HostLocList any_xmm = { Xbyak::Reg64 HostLocToReg64(HostLoc loc); Xbyak::Xmm HostLocToXmm(HostLoc loc); - -template -Xbyak::Address SpillToOpArg(HostLoc loc) { - ASSERT(HostLocIsSpill(loc)); - - size_t i = static_cast(loc) - static_cast(HostLoc::FirstSpill); - ASSERT_MSG(i < JitStateType::SpillCount, "Spill index greater than number of available spill locations"); - - return JitStateType::GetSpillLocationFromIndex(i); -} +Xbyak::Address SpillToOpArg(HostLoc loc); } // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/reg_alloc.cpp b/src/backend/x64/reg_alloc.cpp index b341eb80..0ab77a8c 100644 --- a/src/backend/x64/reg_alloc.cpp +++ b/src/backend/x64/reg_alloc.cpp @@ -12,6 +12,7 @@ #include "backend/x64/abi.h" #include "backend/x64/reg_alloc.h" +#include "backend/x64/stack_layout.h" #include "common/assert.h" namespace Dynarmic::Backend::X64 { @@ -223,12 +224,11 @@ bool Argument::IsInMemory() const { return HostLocIsSpill(*reg_alloc.ValueLocation(value.GetInst())); } -RegAlloc::RegAlloc(BlockOfCode& code, size_t num_spills, std::function spill_to_addr, std::vector gpr_order, std::vector xmm_order) +RegAlloc::RegAlloc(BlockOfCode& code, std::vector gpr_order, std::vector xmm_order) : gpr_order(gpr_order) , xmm_order(xmm_order) - , hostloc_info(NonSpillHostLocCount + num_spills) + , hostloc_info(NonSpillHostLocCount + SpillCount) , code(code) - , spill_to_addr(std::move(spill_to_addr)) {} RegAlloc::ArgumentInfo RegAlloc::GetArgumentInfo(IR::Inst* inst) { @@ -629,7 +629,7 @@ void RegAlloc::EmitMove(size_t bit_width, HostLoc to, HostLoc from) { MAYBE_AVX(movd, HostLocToReg64(to).cvt32(), HostLocToXmm(from)); } } else if (HostLocIsXMM(to) && HostLocIsSpill(from)) { - const Xbyak::Address spill_addr = spill_to_addr(from); + const Xbyak::Address spill_addr = SpillToOpArg(from); ASSERT(spill_addr.getBit() >= bit_width); switch (bit_width) { case 128: @@ -647,7 +647,7 @@ void RegAlloc::EmitMove(size_t bit_width, HostLoc to, HostLoc from) { UNREACHABLE(); } } else if (HostLocIsSpill(to) && HostLocIsXMM(from)) { - const Xbyak::Address spill_addr = spill_to_addr(to); + const Xbyak::Address spill_addr = SpillToOpArg(to); ASSERT(spill_addr.getBit() >= bit_width); switch (bit_width) { case 128: @@ -667,16 +667,16 @@ void RegAlloc::EmitMove(size_t bit_width, HostLoc to, HostLoc from) { } else if (HostLocIsGPR(to) && HostLocIsSpill(from)) { ASSERT(bit_width != 128); if (bit_width == 64) { - code.mov(HostLocToReg64(to), spill_to_addr(from)); + code.mov(HostLocToReg64(to), SpillToOpArg(from)); } else { - code.mov(HostLocToReg64(to).cvt32(), spill_to_addr(from)); + code.mov(HostLocToReg64(to).cvt32(), SpillToOpArg(from)); } } else if (HostLocIsSpill(to) && HostLocIsGPR(from)) { ASSERT(bit_width != 128); if (bit_width == 64) { - code.mov(spill_to_addr(to), HostLocToReg64(from)); + code.mov(SpillToOpArg(to), HostLocToReg64(from)); } else { - code.mov(spill_to_addr(to), HostLocToReg64(from).cvt32()); + code.mov(SpillToOpArg(to), HostLocToReg64(from).cvt32()); } } else { ASSERT_FALSE("Invalid RegAlloc::EmitMove"); diff --git a/src/backend/x64/reg_alloc.h b/src/backend/x64/reg_alloc.h index 37347f60..04f6987b 100644 --- a/src/backend/x64/reg_alloc.h +++ b/src/backend/x64/reg_alloc.h @@ -96,7 +96,7 @@ class RegAlloc final { public: using ArgumentInfo = std::array; - explicit RegAlloc(BlockOfCode& code, size_t num_spills, std::function spill_to_addr, std::vector gpr_order, std::vector xmm_order); + explicit RegAlloc(BlockOfCode& code, std::vector gpr_order, std::vector xmm_order); ArgumentInfo GetArgumentInfo(IR::Inst* inst); @@ -160,7 +160,6 @@ private: const HostLocInfo& LocInfo(HostLoc loc) const; BlockOfCode& code; - std::function spill_to_addr; void EmitMove(size_t bit_width, HostLoc to, HostLoc from); void EmitExchange(HostLoc a, HostLoc b); }; diff --git a/src/backend/x64/stack_layout.h b/src/backend/x64/stack_layout.h new file mode 100644 index 00000000..054fefab --- /dev/null +++ b/src/backend/x64/stack_layout.h @@ -0,0 +1,22 @@ +/* This file is part of the dynarmic project. + * Copyright (c) 2016 MerryMage + * SPDX-License-Identifier: 0BSD + */ + +#pragma once + +#include + +#include "common/common_types.h" + +namespace Dynarmic::Backend::X64 { + +constexpr size_t SpillCount = 64; + +struct alignas(16) StackLayout { + std::array, SpillCount> spill; +}; + +static_assert(sizeof(StackLayout) % 16 == 0); + +} // namespace Dynarmic::Backend::X64