From 611cffb612176982962d2a20e0708ddfbc009b79 Mon Sep 17 00:00:00 2001 From: MerryMage Date: Wed, 24 Aug 2016 01:32:35 +0100 Subject: [PATCH 1/3] externals: Add xbyak --- .gitmodules | 3 +++ externals/xbyak | 1 + 2 files changed, 4 insertions(+) create mode 100644 .gitmodules create mode 160000 externals/xbyak diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..d501359b --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "externals/xbyak"] + path = externals/xbyak + url = https://github.com/herumi/xbyak diff --git a/externals/xbyak b/externals/xbyak new file mode 160000 index 00000000..4cc35dbe --- /dev/null +++ b/externals/xbyak @@ -0,0 +1 @@ +Subproject commit 4cc35dbec0e2eb4d66205f12ea3fab9d8622f99f From e32812cd0026e87ff5d94c4d5ff752baf36f7749 Mon Sep 17 00:00:00 2001 From: MerryMage Date: Wed, 24 Aug 2016 20:07:08 +0100 Subject: [PATCH 2/3] Port x64 backend to xbyak --- CMakeLists.txt | 4 + src/CMakeLists.txt | 15 +- src/backend_x64/abi.cpp | 113 + src/backend_x64/abi.h | 119 + src/backend_x64/block_of_code.cpp | 137 +- src/backend_x64/block_of_code.h | 116 +- src/backend_x64/emit_x64.cpp | 1700 +++++++------- src/backend_x64/emit_x64.h | 2 +- src/backend_x64/hostloc.cpp | 35 + src/backend_x64/hostloc.h | 98 + src/backend_x64/jitstate.h | 4 +- src/backend_x64/reg_alloc.cpp | 150 +- src/backend_x64/reg_alloc.h | 172 +- src/common/bit_set.h | 190 -- src/common/code_block.h | 92 - src/common/iterator_util.h | 39 + src/common/memory_util.cpp | 192 -- src/common/memory_util.h | 19 - src/common/x64/abi.cpp | 363 --- src/common/x64/abi.h | 59 - src/common/x64/cpu_detect.cpp | 197 -- src/common/x64/cpu_detect.h | 66 - src/common/x64/emitter.cpp | 2018 ----------------- src/common/x64/emitter.h | 1057 --------- .../disassembler/disassembler_thumb.cpp | 4 +- 25 files changed, 1638 insertions(+), 5323 deletions(-) create mode 100644 src/backend_x64/abi.cpp create mode 100644 src/backend_x64/abi.h create mode 100644 src/backend_x64/hostloc.cpp create mode 100644 src/backend_x64/hostloc.h delete mode 100644 src/common/bit_set.h delete mode 100644 src/common/code_block.h create mode 100644 src/common/iterator_util.h delete mode 100644 src/common/memory_util.cpp delete mode 100644 src/common/memory_util.h delete mode 100644 src/common/x64/abi.cpp delete mode 100644 src/common/x64/abi.h delete mode 100644 src/common/x64/cpu_detect.cpp delete mode 100644 src/common/x64/cpu_detect.h delete mode 100644 src/common/x64/emitter.cpp delete mode 100644 src/common/x64/emitter.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 11026ffe..ec9f4945 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -29,6 +29,7 @@ else() -Wfatal-errors -Wno-unused-parameter -Wno-missing-braces) + add_compile_options(-fno-operator-names) if (ARCHITECTURE_x86_64) add_compile_options(-msse4.1) @@ -67,6 +68,9 @@ include_directories(${Boost_INCLUDE_DIRS}) include_directories(externals/catch) enable_testing(true) # Enables unit-testing. +# Include Xbyak +include_directories(externals/xbyak/xbyak) + # Include LLVM if (DYNARMIC_USE_LLVM) find_package(LLVM REQUIRED CONFIG) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 3b2217cc..d664f8bc 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -2,17 +2,15 @@ include_directories(.) include(CreateDirectoryGroups) set(SRCS + backend_x64/abi.cpp backend_x64/block_of_code.cpp backend_x64/emit_x64.cpp + backend_x64/hostloc.cpp backend_x64/interface_x64.cpp backend_x64/jitstate.cpp backend_x64/reg_alloc.cpp common/memory_pool.cpp - common/memory_util.cpp common/string_util.cpp - common/x64/abi.cpp - common/x64/cpu_detect.cpp - common/x64/emitter.cpp frontend/arm_types.cpp frontend/disassembler/disassembler_arm.cpp frontend/disassembler/disassembler_thumb.cpp @@ -41,24 +39,21 @@ set(SRCS ) set(HEADERS + backend_x64/abi.h backend_x64/block_of_code.h backend_x64/emit_x64.h + backend_x64/hostloc.h backend_x64/jitstate.h backend_x64/reg_alloc.h common/assert.h - common/bit_set.h common/bit_util.h - common/code_block.h common/common_types.h common/intrusive_list.h + common/iterator_util.h common/memory_pool.h - common/memory_util.h common/mp.h common/scope_exit.h common/string_util.h - common/x64/abi.h - common/x64/cpu_detect.h - common/x64/emitter.h frontend/arm_types.h frontend/arm/FPSCR.h frontend/decoder/arm.h diff --git a/src/backend_x64/abi.cpp b/src/backend_x64/abi.cpp new file mode 100644 index 00000000..26e41ef2 --- /dev/null +++ b/src/backend_x64/abi.cpp @@ -0,0 +1,113 @@ +// Copyright (C) 2003 Dolphin Project. + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, version 2.0 or later versions. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License 2.0 for more details. + +// A copy of the GPL 2.0 should have been included with the program. +// If not, see http://www.gnu.org/licenses/ + +// 24th August 2016: This code was modified for Dynarmic. + +#include + +#include "backend_x64/abi.h" +#include "common/common_types.h" +#include "common/iterator_util.h" + +namespace Dynarmic { +namespace BackendX64 { + +constexpr size_t GPR_SIZE = 8; +constexpr size_t XMM_SIZE = 16; + +struct FrameInfo { + size_t stack_subtraction = 0; + size_t xmm_offset = 0; +}; + +static FrameInfo CalculateFrameInfo(size_t num_gprs, size_t num_xmms, size_t frame_size) { + FrameInfo frame_info = {}; + + size_t rsp_alignment = 8; // We are always 8-byte aligned initially + rsp_alignment -= num_gprs * GPR_SIZE; + + if (num_xmms > 0) { + frame_info.stack_subtraction = rsp_alignment & 0xF; + frame_info.stack_subtraction += num_xmms * XMM_SIZE; + } + + size_t xmm_base = frame_info.stack_subtraction; + + frame_info.stack_subtraction += frame_size; + frame_info.stack_subtraction += ABI_SHADOW_SPACE; + + rsp_alignment -= frame_info.stack_subtraction; + frame_info.stack_subtraction += rsp_alignment & 0xF; + + frame_info.xmm_offset = frame_info.stack_subtraction - xmm_base; + + return frame_info; +} + +void ABI_PushCalleeSaveRegistersAndAdjustStack(Xbyak::CodeGenerator* code, size_t frame_size) { + using namespace Xbyak::util; + + const size_t num_gprs = std::count_if(ABI_ALL_CALLEE_SAVE.begin(), ABI_ALL_CALLEE_SAVE.end(), HostLocIsGPR); + const size_t num_xmms = std::count_if(ABI_ALL_CALLEE_SAVE.begin(), ABI_ALL_CALLEE_SAVE.end(), HostLocIsXMM); + + FrameInfo frame_info = CalculateFrameInfo(num_gprs, num_xmms, frame_size); + + for (HostLoc gpr : ABI_ALL_CALLEE_SAVE) { + if (HostLocIsGPR(gpr)) { + code->push(HostLocToReg64(gpr)); + } + } + + if (frame_info.stack_subtraction != 0) { + code->sub(rsp, u32(frame_info.stack_subtraction)); + } + + size_t xmm_offset = frame_info.xmm_offset; + for (HostLoc xmm : ABI_ALL_CALLEE_SAVE) { + if (HostLocIsXMM(xmm)) { + code->movaps(code->xword[rsp + xmm_offset], HostLocToXmm(xmm)); + xmm_offset += XMM_SIZE; + } + } +} + +void ABI_PopCalleeSaveRegistersAndAdjustStack(Xbyak::CodeGenerator* code, size_t frame_size) { + using namespace Xbyak::util; + + const size_t num_gprs = std::count_if(ABI_ALL_CALLEE_SAVE.begin(), ABI_ALL_CALLEE_SAVE.end(), HostLocIsGPR); + const size_t num_xmms = std::count_if(ABI_ALL_CALLEE_SAVE.begin(), ABI_ALL_CALLEE_SAVE.end(), HostLocIsXMM); + + FrameInfo frame_info = CalculateFrameInfo(num_gprs, num_xmms, frame_size); + + size_t xmm_offset = frame_info.xmm_offset; + for (HostLoc xmm : Common::Reverse(ABI_ALL_CALLEE_SAVE)) { + if (HostLocIsXMM(xmm)) { + code->movaps(HostLocToXmm(xmm), code->xword[rsp + xmm_offset]); + xmm_offset += XMM_SIZE; + } + } + + if (frame_info.stack_subtraction != 0) { + code->add(rsp, u32(frame_info.stack_subtraction)); + } + + for (HostLoc gpr : Common::Reverse(ABI_ALL_CALLEE_SAVE)) { + if (HostLocIsGPR(gpr)) { + code->pop(HostLocToReg64(gpr)); + } + } +} + +} // namespace BackendX64 +} // namespace Dynarmic diff --git a/src/backend_x64/abi.h b/src/backend_x64/abi.h new file mode 100644 index 00000000..89912dff --- /dev/null +++ b/src/backend_x64/abi.h @@ -0,0 +1,119 @@ +/* This file is part of the dynarmic project. + * Copyright (c) 2016 MerryMage + * This software may be used and distributed according to the terms of the GNU + * General Public License version 2 or any later version. + */ +#pragma once + +#include + +#include "backend_x64/hostloc.h" + +namespace Dynarmic { +namespace BackendX64 { + +#ifdef _WIN32 + +constexpr HostLoc ABI_RETURN = HostLoc::RAX; + +constexpr HostLoc ABI_PARAM1 = HostLoc::RCX; +constexpr HostLoc ABI_PARAM2 = HostLoc::RDX; +constexpr HostLoc ABI_PARAM3 = HostLoc::R8; +constexpr HostLoc ABI_PARAM4 = HostLoc::R9; + +constexpr std::array ABI_ALL_CALLER_SAVE = { + HostLoc::RAX, + HostLoc::RCX, + HostLoc::RDX, + HostLoc::R8, + HostLoc::R9, + HostLoc::R10, + HostLoc::R11, + HostLoc::XMM0, + HostLoc::XMM1, + HostLoc::XMM2, + HostLoc::XMM3, + HostLoc::XMM4, + HostLoc::XMM5, +}; + +constexpr std::array ABI_ALL_CALLEE_SAVE = { + HostLoc::RBX, + HostLoc::RSI, + HostLoc::RDI, + HostLoc::RBP, + HostLoc::R12, + HostLoc::R13, + HostLoc::R14, + HostLoc::R15, + HostLoc::XMM6, + HostLoc::XMM7, + HostLoc::XMM8, + HostLoc::XMM9, + HostLoc::XMM10, + HostLoc::XMM11, + HostLoc::XMM12, + HostLoc::XMM13, + HostLoc::XMM14, + HostLoc::XMM15, +}; + +constexpr size_t ABI_SHADOW_SPACE = 32; // bytes + +#else + +constexpr HostLoc ABI_RETURN = HostLoc::RAX; + +constexpr HostLoc ABI_PARAM1 = HostLoc::RDI; +constexpr HostLoc ABI_PARAM2 = HostLoc::RSI; +constexpr HostLoc ABI_PARAM3 = HostLoc::RDX; +constexpr HostLoc ABI_PARAM4 = HostLoc::RCX; + +constexpr std::array ABI_ALL_CALLER_SAVE = { + HostLoc::RAX, + HostLoc::RCX, + HostLoc::RDX, + HostLoc::RDI, + HostLoc::RSI, + HostLoc::R8, + HostLoc::R9, + HostLoc::R10, + HostLoc::R11, + HostLoc::XMM0, + HostLoc::XMM1, + HostLoc::XMM2, + HostLoc::XMM3, + HostLoc::XMM4, + HostLoc::XMM5, + HostLoc::XMM6, + HostLoc::XMM7, + HostLoc::XMM8, + HostLoc::XMM9, + HostLoc::XMM10, + HostLoc::XMM11, + HostLoc::XMM12, + HostLoc::XMM13, + HostLoc::XMM14, + HostLoc::XMM15, +}; + +constexpr std::array ABI_ALL_CALLEE_SAVE = { + HostLoc::RBX, + HostLoc::RBP, + HostLoc::R12, + HostLoc::R13, + HostLoc::R14, + HostLoc::R15, +}; + +constexpr size_t ABI_SHADOW_SPACE = 0; // bytes + +#endif + +static_assert(ABI_ALL_CALLER_SAVE.size() + ABI_ALL_CALLEE_SAVE.size() == 31, "Invalid total number of registers"); + +void ABI_PushCalleeSaveRegistersAndAdjustStack(Xbyak::CodeGenerator* code, size_t frame_size = 0); +void ABI_PopCalleeSaveRegistersAndAdjustStack(Xbyak::CodeGenerator* code, size_t frame_size = 0); + +} // namespace BackendX64 +} // namespace Dynarmic diff --git a/src/backend_x64/block_of_code.cpp b/src/backend_x64/block_of_code.cpp index 7a22a070..1d276989 100644 --- a/src/backend_x64/block_of_code.cpp +++ b/src/backend_x64/block_of_code.cpp @@ -6,27 +6,22 @@ #include +#include + +#include "backend_x64/abi.h" #include "backend_x64/block_of_code.h" #include "backend_x64/jitstate.h" -#include "common/x64/abi.h" - -using namespace Gen; +#include "common/assert.h" namespace Dynarmic { namespace BackendX64 { -BlockOfCode::BlockOfCode() : Gen::XCodeBlock() { - AllocCodeSpace(128 * 1024 * 1024); +BlockOfCode::BlockOfCode() : Xbyak::CodeGenerator(128 * 1024 * 1024) { ClearCache(false); } void BlockOfCode::ClearCache(bool poison_memory) { - if (poison_memory) { - ClearCodeSpace(); - } else { - ResetCodePtr(); - } - + reset(); GenConstants(); GenRunCode(); GenReturnFromRunCode(); @@ -42,68 +37,116 @@ size_t BlockOfCode::RunCode(JitState* jit_state, CodePtr basic_block, size_t cyc } void BlockOfCode::ReturnFromRunCode(bool MXCSR_switch) { - JMP(MXCSR_switch ? return_from_run_code : return_from_run_code_without_mxcsr_switch, true); + jmp(MXCSR_switch ? return_from_run_code : return_from_run_code_without_mxcsr_switch); } void BlockOfCode::GenConstants() { - const_FloatNegativeZero32 = AlignCode16(); - Write32(0x80000000u); - const_FloatNaN32 = AlignCode16(); - Write32(0x7fc00000u); - const_FloatNonSignMask32 = AlignCode16(); - Write64(0x7fffffffu); - const_FloatNegativeZero64 = AlignCode16(); - Write64(0x8000000000000000u); - const_FloatNaN64 = AlignCode16(); - Write64(0x7ff8000000000000u); - const_FloatNonSignMask64 = AlignCode16(); - Write64(0x7fffffffffffffffu); - const_FloatPenultimatePositiveDenormal64 = AlignCode16(); - Write64(0x000ffffffffffffeu); - const_FloatMinS32 = AlignCode16(); - Write64(0xc1e0000000000000u); // -2147483648 as a double - const_FloatMaxS32 = AlignCode16(); - Write64(0x41dfffffffc00000u); // 2147483647 as a double - const_FloatPositiveZero32 = const_FloatPositiveZero64 = const_FloatMinU32 = AlignCode16(); - Write64(0x0000000000000000u); // 0 as a double - const_FloatMaxU32 = AlignCode16(); - Write64(0x41efffffffe00000u); // 4294967295 as a double - AlignCode16(); + align(); + L(const_FloatNegativeZero32); + dd(0x80000000u); + + align(); + L(const_FloatNaN32); + dd(0x7fc00000u); + + align(); + L(const_FloatNonSignMask32); + dq(0x7fffffffu); + + align(); + L(const_FloatNegativeZero64); + dq(0x8000000000000000u); + + align(); + L(const_FloatNaN64); + dq(0x7ff8000000000000u); + + align(); + L(const_FloatNonSignMask64); + dq(0x7fffffffffffffffu); + + align(); + L(const_FloatPenultimatePositiveDenormal64); + dq(0x000ffffffffffffeu); + + align(); + L(const_FloatMinS32); + dq(0xc1e0000000000000u); // -2147483648 as a double + + align(); + L(const_FloatMaxS32); + dq(0x41dfffffffc00000u); // 2147483647 as a double + + align(); + L(const_FloatPositiveZero32); + L(const_FloatPositiveZero64); + L(const_FloatMinU32); + dq(0x0000000000000000u); // 0 as a double + + align(); + L(const_FloatMaxU32); + dq(0x41efffffffe00000u); // 4294967295 as a double + + align(); } void BlockOfCode::GenRunCode() { - run_code = reinterpret_cast(const_cast(GetCodePtr())); + align(); + run_code = getCurr(); // This serves two purposes: // 1. It saves all the registers we as a callee need to save. // 2. It aligns the stack so that the code the JIT emits can assume // that the stack is appropriately aligned for CALLs. - ABI_PushRegistersAndAdjustStack(ABI_ALL_CALLEE_SAVED, 8); + ABI_PushCalleeSaveRegistersAndAdjustStack(this); - MOV(64, R(R15), R(ABI_PARAM1)); + mov(r15, ABI_PARAM1); SwitchMxcsrOnEntry(); - JMPptr(R(ABI_PARAM2)); + jmp(ABI_PARAM2); } void BlockOfCode::GenReturnFromRunCode() { - return_from_run_code = GetCodePtr(); + return_from_run_code = getCurr(); SwitchMxcsrOnExit(); - return_from_run_code_without_mxcsr_switch = GetCodePtr(); + return_from_run_code_without_mxcsr_switch = getCurr(); - ABI_PopRegistersAndAdjustStack(ABI_ALL_CALLEE_SAVED, 8); - RET(); + ABI_PopCalleeSaveRegistersAndAdjustStack(this); + ret(); } void BlockOfCode::SwitchMxcsrOnEntry() { - STMXCSR(MDisp(R15, offsetof(JitState, save_host_MXCSR))); - LDMXCSR(MDisp(R15, offsetof(JitState, guest_MXCSR))); + stmxcsr(dword[r15 + offsetof(JitState, save_host_MXCSR)]); + ldmxcsr(dword[r15 + offsetof(JitState, guest_MXCSR)]); } void BlockOfCode::SwitchMxcsrOnExit() { - STMXCSR(MDisp(R15, offsetof(JitState, guest_MXCSR))); - LDMXCSR(MDisp(R15, offsetof(JitState, save_host_MXCSR))); + stmxcsr(dword[r15 + offsetof(JitState, guest_MXCSR)]); + ldmxcsr(dword[r15 + offsetof(JitState, save_host_MXCSR)]); +} + +void BlockOfCode::CallFunction(const void* fn) { + u64 distance = u64(fn) - (getCurr() + 5); + if (distance >= 0x0000000080000000ULL && distance < 0xFFFFFFFF80000000ULL) { + // Far call + mov(rax, u64(fn)); + call(rax); + } else { + call(fn); + } +} + +void BlockOfCode::SetCodePtr(CodePtr ptr) { + // The "size" defines where top_, the insertion point, is. + size_t required_size = reinterpret_cast(ptr) - getCode(); + setSize(required_size); +} + +void BlockOfCode::EnsurePatchLocationSize(CodePtr begin, size_t size) { + size_t current_size = getCurr() - reinterpret_cast(begin); + ASSERT(current_size <= size); + nop(size - current_size); } } // namespace BackendX64 diff --git a/src/backend_x64/block_of_code.h b/src/backend_x64/block_of_code.h index 870e8b66..49314a5c 100644 --- a/src/backend_x64/block_of_code.h +++ b/src/backend_x64/block_of_code.h @@ -7,15 +7,17 @@ #pragma once #include +#include + +#include #include "backend_x64/jitstate.h" #include "common/common_types.h" -#include "common/x64/emitter.h" namespace Dynarmic { namespace BackendX64 { -class BlockOfCode final : public Gen::XCodeBlock { +class BlockOfCode final : public Xbyak::CodeGenerator { public: BlockOfCode(); @@ -30,73 +32,99 @@ public: void SwitchMxcsrOnEntry(); /// Code emitter: Makes saved host MXCSR the current MXCSR void SwitchMxcsrOnExit(); + /// Code emitter: Calls the function + void CallFunction(const void* fn); - Gen::OpArg MFloatPositiveZero32() const { - return Gen::M(const_FloatPositiveZero32); + Xbyak::Address MFloatPositiveZero32() { + return xword[rip + const_FloatPositiveZero32]; } - Gen::OpArg MFloatNegativeZero32() const { - return Gen::M(const_FloatNegativeZero32); + Xbyak::Address MFloatNegativeZero32() { + return xword[rip + const_FloatNegativeZero32]; } - Gen::OpArg MFloatNaN32() const { - return Gen::M(const_FloatNaN32); + Xbyak::Address MFloatNaN32() { + return xword[rip + const_FloatNaN32]; } - Gen::OpArg MFloatNonSignMask32() const { - return Gen::M(const_FloatNonSignMask32); + Xbyak::Address MFloatNonSignMask32() { + return xword[rip + const_FloatNonSignMask32]; } - Gen::OpArg MFloatPositiveZero64() const { - return Gen::M(const_FloatPositiveZero64); + Xbyak::Address MFloatPositiveZero64() { + return xword[rip + const_FloatPositiveZero64]; } - Gen::OpArg MFloatNegativeZero64() const { - return Gen::M(const_FloatNegativeZero64); + Xbyak::Address MFloatNegativeZero64() { + return xword[rip + const_FloatNegativeZero64]; } - Gen::OpArg MFloatNaN64() const { - return Gen::M(const_FloatNaN64); + Xbyak::Address MFloatNaN64() { + return xword[rip + const_FloatNaN64]; } - Gen::OpArg MFloatNonSignMask64() const { - return Gen::M(const_FloatNonSignMask64); + Xbyak::Address MFloatNonSignMask64() { + return xword[rip + const_FloatNonSignMask64]; } - Gen::OpArg MFloatPenultimatePositiveDenormal64() const { - return Gen::M(const_FloatPenultimatePositiveDenormal64); + Xbyak::Address MFloatPenultimatePositiveDenormal64() { + return xword[rip + const_FloatPenultimatePositiveDenormal64]; } - Gen::OpArg MFloatMinS32() const { - return Gen::M(const_FloatMinS32); + Xbyak::Address MFloatMinS32() { + return xword[rip + const_FloatMinS32]; } - Gen::OpArg MFloatMaxS32() const { - return Gen::M(const_FloatMaxS32); + Xbyak::Address MFloatMaxS32() { + return xword[rip + const_FloatMaxS32]; } - Gen::OpArg MFloatMinU32() const { - return Gen::M(const_FloatMinU32); + Xbyak::Address MFloatMinU32() { + return xword[rip + const_FloatMinU32]; } - Gen::OpArg MFloatMaxU32() const { - return Gen::M(const_FloatMaxU32); + Xbyak::Address MFloatMaxU32() { + return xword[rip + const_FloatMaxU32]; } - CodePtr GetReturnFromRunCodeAddress() const { + const void* GetReturnFromRunCodeAddress() const { return return_from_run_code; } + void int3() { db(0xCC); } + void nop(size_t size = 0) { + for (size_t i = 0; i < size; i++) { + db(0x90); + } + } + + void SetCodePtr(CodePtr ptr); + void EnsurePatchLocationSize(CodePtr begin, size_t size); + +#ifdef _WIN32 + Xbyak::Reg64 ABI_RETURN = rax; + Xbyak::Reg64 ABI_PARAM1 = rcx; + Xbyak::Reg64 ABI_PARAM2 = rdx; + Xbyak::Reg64 ABI_PARAM3 = r8; + Xbyak::Reg64 ABI_PARAM4 = r9; +#else + Xbyak::Reg64 ABI_RETURN = rax; + Xbyak::Reg64 ABI_PARAM1 = rdi; + Xbyak::Reg64 ABI_PARAM2 = rsi; + Xbyak::Reg64 ABI_PARAM3 = rdx; + Xbyak::Reg64 ABI_PARAM4 = rcx; +#endif + private: - const u8* const_FloatPositiveZero32 = nullptr; - const u8* const_FloatNegativeZero32 = nullptr; - const u8* const_FloatNaN32 = nullptr; - const u8* const_FloatNonSignMask32 = nullptr; - const u8* const_FloatPositiveZero64 = nullptr; - const u8* const_FloatNegativeZero64 = nullptr; - const u8* const_FloatNaN64 = nullptr; - const u8* const_FloatNonSignMask64 = nullptr; - const u8* const_FloatPenultimatePositiveDenormal64 = nullptr; - const u8* const_FloatMinS32 = nullptr; - const u8* const_FloatMaxS32 = nullptr; - const u8* const_FloatMinU32 = nullptr; - const u8* const_FloatMaxU32 = nullptr; + Xbyak::Label const_FloatPositiveZero32; + Xbyak::Label const_FloatNegativeZero32; + Xbyak::Label const_FloatNaN32; + Xbyak::Label const_FloatNonSignMask32; + Xbyak::Label const_FloatPositiveZero64; + Xbyak::Label const_FloatNegativeZero64; + Xbyak::Label const_FloatNaN64; + Xbyak::Label const_FloatNonSignMask64; + Xbyak::Label const_FloatPenultimatePositiveDenormal64; + Xbyak::Label const_FloatMinS32; + Xbyak::Label const_FloatMaxS32; + Xbyak::Label const_FloatMinU32; + Xbyak::Label const_FloatMaxU32; void GenConstants(); using RunCodeFuncType = void(*)(JitState*, CodePtr); RunCodeFuncType run_code = nullptr; void GenRunCode(); - CodePtr return_from_run_code = nullptr; - CodePtr return_from_run_code_without_mxcsr_switch = nullptr; + const void* return_from_run_code = nullptr; + const void* return_from_run_code_without_mxcsr_switch = nullptr; void GenReturnFromRunCode(); }; diff --git a/src/backend_x64/emit_x64.cpp b/src/backend_x64/emit_x64.cpp index a70f5687..be9db7e2 100644 --- a/src/backend_x64/emit_x64.cpp +++ b/src/backend_x64/emit_x64.cpp @@ -9,37 +9,36 @@ #include #include "backend_x64/emit_x64.h" -#include "common/x64/abi.h" -#include "common/x64/emitter.h" +#include "backend_x64/jitstate.h" #include "frontend/arm_types.h" -// TODO: More optimal use of immediates. // TODO: Have ARM flags in host flags and not have them use up GPR registers unless necessary. // TODO: Actually implement that proper instruction selector you've always wanted to sweetheart. -using namespace Gen; - namespace Dynarmic { namespace BackendX64 { -static OpArg MJitStateReg(Arm::Reg reg) { - return MDisp(R15, offsetof(JitState, Reg) + sizeof(u32) * static_cast(reg)); +static Xbyak::Address MJitStateReg(Arm::Reg reg) { + using namespace Xbyak::util; + return dword[r15 + offsetof(JitState, Reg) + sizeof(u32) * static_cast(reg)]; } -static OpArg MJitStateExtReg(Arm::ExtReg reg) { +static Xbyak::Address MJitStateExtReg(Arm::ExtReg reg) { + using namespace Xbyak::util; if (reg >= Arm::ExtReg::S0 && reg <= Arm::ExtReg::S31) { size_t index = static_cast(reg) - static_cast(Arm::ExtReg::S0); - return MDisp(R15, int(offsetof(JitState, ExtReg) + sizeof(u32) * index)); + return dword[r15 + offsetof(JitState, ExtReg) + sizeof(u32) * index]; } if (reg >= Arm::ExtReg::D0 && reg <= Arm::ExtReg::D31) { size_t index = static_cast(reg) - static_cast(Arm::ExtReg::D0); - return MDisp(R15, int(offsetof(JitState, ExtReg) + sizeof(u64) * index)); + return qword[r15 + offsetof(JitState, ExtReg) + sizeof(u64) * index]; } ASSERT_MSG(false, "Should never happen."); } -static OpArg MJitStateCpsr() { - return MDisp(R15, offsetof(JitState, Cpsr)); +static Xbyak::Address MJitStateCpsr() { + using namespace Xbyak::util; + return dword[r15 + offsetof(JitState, Cpsr)]; } static IR::Inst* FindUseWithOpcode(IR::Inst* inst, IR::Opcode opcode) { @@ -64,8 +63,8 @@ EmitX64::BlockDescriptor EmitX64::Emit(const Arm::LocationDescriptor descriptor, inhibit_emission.clear(); reg_alloc.Reset(); - code->INT3(); - const CodePtr code_ptr = code->GetCodePtr(); + code->int3(); + const CodePtr code_ptr = code->getCurr(); basic_blocks[descriptor].code_ptr = code_ptr; unique_hash_to_code_ptr[descriptor.UniqueHash()] = code_ptr; @@ -98,12 +97,12 @@ EmitX64::BlockDescriptor EmitX64::Emit(const Arm::LocationDescriptor descriptor, reg_alloc.AssertNoMoreUses(); Patch(descriptor, code_ptr); - basic_blocks[descriptor].size = code->GetCodePtr() - code_ptr; + basic_blocks[descriptor].size = std::intptr_t(code->getCurr()) - std::intptr_t(code_ptr); return basic_blocks[descriptor]; } void EmitX64::EmitBreakpoint(IR::Block&, IR::Inst*) { - code->INT3(); + code->int3(); } void EmitX64::EmitIdentity(IR::Block& block, IR::Inst* inst) { @@ -114,54 +113,64 @@ void EmitX64::EmitIdentity(IR::Block& block, IR::Inst* inst) { void EmitX64::EmitGetRegister(IR::Block&, IR::Inst* inst) { Arm::Reg reg = inst->GetArg(0).GetRegRef(); - X64Reg result = reg_alloc.DefRegister(inst, any_gpr); - code->MOV(32, R(result), MJitStateReg(reg)); + Xbyak::Reg32 result = reg_alloc.DefGpr(inst).cvt32(); + code->mov(result, MJitStateReg(reg)); } void EmitX64::EmitGetExtendedRegister32(IR::Block& block, IR::Inst* inst) { Arm::ExtReg reg = inst->GetArg(0).GetExtRegRef(); ASSERT(reg >= Arm::ExtReg::S0 && reg <= Arm::ExtReg::S31); - X64Reg result = reg_alloc.DefRegister(inst, any_xmm); - code->MOVSS(result, MJitStateExtReg(reg)); + Xbyak::Xmm result = reg_alloc.DefXmm(inst); + code->movss(result, MJitStateExtReg(reg)); } void EmitX64::EmitGetExtendedRegister64(IR::Block&, IR::Inst* inst) { Arm::ExtReg reg = inst->GetArg(0).GetExtRegRef(); ASSERT(reg >= Arm::ExtReg::D0 && reg <= Arm::ExtReg::D31); - X64Reg result = reg_alloc.DefRegister(inst, any_xmm); - code->MOVSD(result, MJitStateExtReg(reg)); + Xbyak::Xmm result = reg_alloc.DefXmm(inst); + code->movsd(result, MJitStateExtReg(reg)); } void EmitX64::EmitSetRegister(IR::Block&, IR::Inst* inst) { Arm::Reg reg = inst->GetArg(0).GetRegRef(); IR::Value arg = inst->GetArg(1); if (arg.IsImmediate()) { - code->MOV(32, MJitStateReg(reg), Imm32(arg.GetU32())); + code->mov(MJitStateReg(reg), arg.GetU32()); } else { - X64Reg to_store = reg_alloc.UseRegister(arg.GetInst(), any_gpr); - code->MOV(32, MJitStateReg(reg), R(to_store)); + Xbyak::Reg32 to_store = reg_alloc.UseGpr(arg).cvt32(); + code->mov(MJitStateReg(reg), to_store); } } void EmitX64::EmitSetExtendedRegister32(IR::Block&, IR::Inst* inst) { Arm::ExtReg reg = inst->GetArg(0).GetExtRegRef(); ASSERT(reg >= Arm::ExtReg::S0 && reg <= Arm::ExtReg::S31); - X64Reg source = reg_alloc.UseRegister(inst->GetArg(1), any_xmm); - code->MOVSS(MJitStateExtReg(reg), source); + Xbyak::Xmm source = reg_alloc.UseXmm(inst->GetArg(1)); + code->movss(MJitStateExtReg(reg), source); } void EmitX64::EmitSetExtendedRegister64(IR::Block&, IR::Inst* inst) { Arm::ExtReg reg = inst->GetArg(0).GetExtRegRef(); ASSERT(reg >= Arm::ExtReg::D0 && reg <= Arm::ExtReg::D31); - X64Reg source = reg_alloc.UseRegister(inst->GetArg(1), any_xmm); - code->MOVSD(MJitStateExtReg(reg), source); + Xbyak::Xmm source = reg_alloc.UseXmm(inst->GetArg(1)); + code->movsd(MJitStateExtReg(reg), source); +} + +void EmitX64::EmitGetCpsr(IR::Block&, IR::Inst* inst) { + Xbyak::Reg32 result = reg_alloc.DefGpr(inst).cvt32(); + code->mov(result, MJitStateCpsr()); +} + +void EmitX64::EmitSetCpsr(IR::Block&, IR::Inst* inst) { + Xbyak::Reg32 arg = reg_alloc.UseGpr(inst->GetArg(0)).cvt32(); + code->mov(MJitStateCpsr(), arg); } void EmitX64::EmitGetNFlag(IR::Block&, IR::Inst* inst) { - X64Reg result = reg_alloc.DefRegister(inst, any_gpr); - code->MOV(32, R(result), MJitStateCpsr()); - code->SHR(32, R(result), Imm8(31)); + Xbyak::Reg32 result = reg_alloc.DefGpr(inst).cvt32(); + code->mov(result, MJitStateCpsr()); + code->shr(result, 31); } void EmitX64::EmitSetNFlag(IR::Block&, IR::Inst* inst) { @@ -170,24 +179,24 @@ void EmitX64::EmitSetNFlag(IR::Block&, IR::Inst* inst) { IR::Value arg = inst->GetArg(0); if (arg.IsImmediate()) { if (arg.GetU1()) { - code->OR(32, MJitStateCpsr(), Imm32(flag_mask)); + code->or_(MJitStateCpsr(), flag_mask); } else { - code->AND(32, MJitStateCpsr(), Imm32(~flag_mask)); + code->and_(MJitStateCpsr(), ~flag_mask); } } else { - X64Reg to_store = reg_alloc.UseScratchRegister(arg.GetInst(), any_gpr); + Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(arg).cvt32(); - code->SHL(32, R(to_store), Imm8(flag_bit)); - code->AND(32, MJitStateCpsr(), Imm32(~flag_mask)); - code->OR(32, MJitStateCpsr(), R(to_store)); + code->shl(to_store, flag_bit); + code->and_(MJitStateCpsr(), ~flag_mask); + code->or_(MJitStateCpsr(), to_store); } } void EmitX64::EmitGetZFlag(IR::Block&, IR::Inst* inst) { - X64Reg result = reg_alloc.DefRegister(inst, any_gpr); - code->MOV(32, R(result), MJitStateCpsr()); - code->SHR(32, R(result), Imm8(30)); - code->AND(32, R(result), Imm32(1)); + Xbyak::Reg32 result = reg_alloc.DefGpr(inst).cvt32(); + code->mov(result, MJitStateCpsr()); + code->shr(result, 30); + code->and_(result, 1); } void EmitX64::EmitSetZFlag(IR::Block&, IR::Inst* inst) { @@ -196,34 +205,24 @@ void EmitX64::EmitSetZFlag(IR::Block&, IR::Inst* inst) { IR::Value arg = inst->GetArg(0); if (arg.IsImmediate()) { if (arg.GetU1()) { - code->OR(32, MJitStateCpsr(), Imm32(flag_mask)); + code->or_(MJitStateCpsr(), flag_mask); } else { - code->AND(32, MJitStateCpsr(), Imm32(~flag_mask)); + code->and_(MJitStateCpsr(), ~flag_mask); } } else { - X64Reg to_store = reg_alloc.UseScratchRegister(arg.GetInst(), any_gpr); + Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(arg).cvt32(); - code->SHL(32, R(to_store), Imm8(flag_bit)); - code->AND(32, MJitStateCpsr(), Imm32(~flag_mask)); - code->OR(32, MJitStateCpsr(), R(to_store)); + code->shl(to_store, flag_bit); + code->and_(MJitStateCpsr(), ~flag_mask); + code->or_(MJitStateCpsr(), to_store); } } -void EmitX64::EmitGetCpsr(IR::Block&, IR::Inst* inst) { - X64Reg result = reg_alloc.DefRegister(inst, any_gpr); - code->MOV(32, R(result), MJitStateCpsr()); -} - -void EmitX64::EmitSetCpsr(IR::Block&, IR::Inst* inst) { - X64Reg arg = reg_alloc.UseRegister(inst->GetArg(0), any_gpr); - code->MOV(32, MJitStateCpsr(), R(arg)); -} - void EmitX64::EmitGetCFlag(IR::Block&, IR::Inst* inst) { - X64Reg result = reg_alloc.DefRegister(inst, any_gpr); - code->MOV(32, R(result), MJitStateCpsr()); - code->SHR(32, R(result), Imm8(29)); - code->AND(32, R(result), Imm32(1)); + Xbyak::Reg32 result = reg_alloc.DefGpr(inst).cvt32(); + code->mov(result, MJitStateCpsr()); + code->shr(result, 29); + code->and_(result, 1); } void EmitX64::EmitSetCFlag(IR::Block&, IR::Inst* inst) { @@ -232,24 +231,24 @@ void EmitX64::EmitSetCFlag(IR::Block&, IR::Inst* inst) { IR::Value arg = inst->GetArg(0); if (arg.IsImmediate()) { if (arg.GetU1()) { - code->OR(32, MJitStateCpsr(), Imm32(flag_mask)); + code->or_(MJitStateCpsr(), flag_mask); } else { - code->AND(32, MJitStateCpsr(), Imm32(~flag_mask)); + code->and_(MJitStateCpsr(), ~flag_mask); } } else { - X64Reg to_store = reg_alloc.UseScratchRegister(arg.GetInst(), any_gpr); + Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(arg).cvt32(); - code->SHL(32, R(to_store), Imm8(flag_bit)); - code->AND(32, MJitStateCpsr(), Imm32(~flag_mask)); - code->OR(32, MJitStateCpsr(), R(to_store)); + code->shl(to_store, flag_bit); + code->and_(MJitStateCpsr(), ~flag_mask); + code->or_(MJitStateCpsr(), to_store); } } void EmitX64::EmitGetVFlag(IR::Block&, IR::Inst* inst) { - X64Reg result = reg_alloc.DefRegister(inst, any_gpr); - code->MOV(32, R(result), MJitStateCpsr()); - code->SHR(32, R(result), Imm8(28)); - code->AND(32, R(result), Imm32(1)); + Xbyak::Reg32 result = reg_alloc.DefGpr(inst).cvt32(); + code->mov(result, MJitStateCpsr()); + code->shr(result, 28); + code->and_(result, 1); } void EmitX64::EmitSetVFlag(IR::Block&, IR::Inst* inst) { @@ -258,16 +257,16 @@ void EmitX64::EmitSetVFlag(IR::Block&, IR::Inst* inst) { IR::Value arg = inst->GetArg(0); if (arg.IsImmediate()) { if (arg.GetU1()) { - code->OR(32, MJitStateCpsr(), Imm32(flag_mask)); + code->or_(MJitStateCpsr(), flag_mask); } else { - code->AND(32, MJitStateCpsr(), Imm32(~flag_mask)); + code->and_(MJitStateCpsr(), ~flag_mask); } } else { - X64Reg to_store = reg_alloc.UseScratchRegister(arg.GetInst(), any_gpr); + Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(arg).cvt32(); - code->SHL(32, R(to_store), Imm8(flag_bit)); - code->AND(32, MJitStateCpsr(), Imm32(~flag_mask)); - code->OR(32, MJitStateCpsr(), R(to_store)); + code->shl(to_store, flag_bit); + code->and_(MJitStateCpsr(), ~flag_mask); + code->or_(MJitStateCpsr(), to_store); } } @@ -277,12 +276,12 @@ void EmitX64::EmitOrQFlag(IR::Block&, IR::Inst* inst) { IR::Value arg = inst->GetArg(0); if (arg.IsImmediate()) { if (arg.GetU1()) - code->OR(32, MJitStateCpsr(), Imm32(flag_mask)); + code->or_(MJitStateCpsr(), flag_mask); } else { - X64Reg to_store = reg_alloc.UseScratchRegister(arg.GetInst(), any_gpr); + Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(arg).cvt32(); - code->SHL(32, R(to_store), Imm8(flag_bit)); - code->OR(32, MJitStateCpsr(), R(to_store)); + code->shl(to_store, flag_bit); + code->or_(MJitStateCpsr(), to_store); } } @@ -303,29 +302,31 @@ void EmitX64::EmitBXWritePC(IR::Block&, IR::Inst* inst) { u32 new_pc = arg.GetU32(); if (Common::Bit<0>(new_pc)) { new_pc &= 0xFFFFFFFE; - code->MOV(32, MJitStateReg(Arm::Reg::PC), Imm32(new_pc)); - code->OR(32, MJitStateCpsr(), Imm32(T_bit)); + code->mov(MJitStateReg(Arm::Reg::PC), new_pc); + code->or_(MJitStateCpsr(), T_bit); } else { new_pc &= 0xFFFFFFFC; - code->MOV(32, MJitStateReg(Arm::Reg::PC), Imm32(new_pc)); - code->AND(32, MJitStateCpsr(), Imm32(~T_bit)); + code->mov(MJitStateReg(Arm::Reg::PC), new_pc); + code->and_(MJitStateCpsr(), ~T_bit); } } else { - X64Reg new_pc = reg_alloc.UseScratchRegister(arg.GetInst(), any_gpr); - X64Reg tmp1 = reg_alloc.ScratchRegister(any_gpr); - X64Reg tmp2 = reg_alloc.ScratchRegister(any_gpr); + using Xbyak::util::ptr; - code->MOV(32, R(tmp1), MJitStateCpsr()); - code->MOV(32, R(tmp2), R(tmp1)); - code->AND(32, R(tmp2), Imm32(~T_bit)); // CPSR.T = 0 - code->OR(32, R(tmp1), Imm32(T_bit)); // CPSR.T = 1 - code->TEST(8, R(new_pc), Imm8(1)); - code->CMOVcc(32, tmp1, R(tmp2), CC_E); // CPSR.T = pc & 1 - code->MOV(32, MJitStateCpsr(), R(tmp1)); - code->LEA(32, tmp2, MComplex(new_pc, new_pc, 1, 0)); - code->OR(32, R(tmp2), Imm32(0xFFFFFFFC)); // tmp2 = pc & 1 ? 0xFFFFFFFE : 0xFFFFFFFC - code->AND(32, R(new_pc), R(tmp2)); - code->MOV(32, MJitStateReg(Arm::Reg::PC), R(new_pc)); + Xbyak::Reg64 new_pc = reg_alloc.UseScratchGpr(arg); + Xbyak::Reg64 tmp1 = reg_alloc.ScratchGpr(); + Xbyak::Reg64 tmp2 = reg_alloc.ScratchGpr(); + + code->mov(tmp1, MJitStateCpsr()); + code->mov(tmp2, tmp1); + code->and_(tmp2, u32(~T_bit)); // CPSR.T = 0 + code->or_(tmp1, u32(T_bit)); // CPSR.T = 1 + code->test(new_pc, u32(1)); + code->cmove(tmp1, tmp2); // CPSR.T = pc & 1 + code->mov(MJitStateCpsr(), tmp1); + code->lea(tmp2, ptr[new_pc + new_pc * 1]); + code->or_(tmp2, u32(0xFFFFFFFC)); // tmp2 = pc & 1 ? 0xFFFFFFFE : 0xFFFFFFFC + code->and_(new_pc, tmp2); + code->mov(MJitStateReg(Arm::Reg::PC), new_pc); } } @@ -335,44 +336,43 @@ void EmitX64::EmitCallSupervisor(IR::Block&, IR::Inst* inst) { reg_alloc.HostCall(nullptr, imm32); code->SwitchMxcsrOnExit(); - code->ABI_CallFunction(reinterpret_cast(cb.CallSVC)); + code->CallFunction(reinterpret_cast(cb.CallSVC)); code->SwitchMxcsrOnEntry(); } void EmitX64::EmitPushRSB(IR::Block&, IR::Inst* inst) { + using namespace Xbyak::util; + ASSERT(inst->GetArg(0).IsImmediate()); u64 imm64 = inst->GetArg(0).GetU64(); - X64Reg code_ptr_reg = reg_alloc.ScratchRegister({HostLoc::RCX}); - X64Reg loc_desc_reg = reg_alloc.ScratchRegister(any_gpr); - X64Reg index_reg = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Reg64 code_ptr_reg = reg_alloc.ScratchGpr({HostLoc::RCX}); + Xbyak::Reg64 loc_desc_reg = reg_alloc.ScratchGpr(); + Xbyak::Reg32 index_reg = reg_alloc.ScratchGpr().cvt32(); u64 code_ptr = unique_hash_to_code_ptr.find(imm64) != unique_hash_to_code_ptr.end() ? u64(unique_hash_to_code_ptr[imm64]) : u64(code->GetReturnFromRunCodeAddress()); - code->MOV(32, R(index_reg), MDisp(R15, offsetof(JitState, rsb_ptr))); - code->ADD(32, R(index_reg), Imm8(1)); - code->AND(32, R(index_reg), Imm32(JitState::RSBSize - 1)); + code->mov(index_reg, dword[r15 + offsetof(JitState, rsb_ptr)]); + code->add(index_reg, 1); + code->and_(index_reg, u32(JitState::RSBSize - 1)); - code->MOV(64, R(loc_desc_reg), Imm64(imm64)); - CodePtr patch_location = code->GetCodePtr(); + code->mov(loc_desc_reg, u64(imm64)); + CodePtr patch_location = code->getCurr(); patch_unique_hash_locations[imm64].emplace_back(patch_location); - code->MOV(64, R(code_ptr_reg), Imm64(code_ptr)); // This line has to match up with EmitX64::Patch. - ASSERT((code->GetCodePtr() - patch_location) == 10); + code->mov(code_ptr_reg, u64(code_ptr)); // This line has to match up with EmitX64::Patch. + code->EnsurePatchLocationSize(patch_location, 10); - std::vector fixups; - fixups.reserve(JitState::RSBSize); + Xbyak::Label label; for (size_t i = 0; i < JitState::RSBSize; ++i) { - code->CMP(64, R(loc_desc_reg), MDisp(R15, int(offsetof(JitState, rsb_location_descriptors) + i * sizeof(u64)))); - fixups.push_back(code->J_CC(CC_E)); + code->cmp(loc_desc_reg, qword[r15 + offsetof(JitState, rsb_location_descriptors) + i * sizeof(u64)]); + code->je(label, code->T_SHORT); } - code->MOV(32, MDisp(R15, offsetof(JitState, rsb_ptr)), R(index_reg)); - code->MOV(64, MComplex(R15, index_reg, SCALE_8, offsetof(JitState, rsb_location_descriptors)), R(loc_desc_reg)); - code->MOV(64, MComplex(R15, index_reg, SCALE_8, offsetof(JitState, rsb_codeptrs)), R(code_ptr_reg)); - for (auto f : fixups) { - code->SetJumpTarget(f); - } + code->mov(dword[r15 + offsetof(JitState, rsb_ptr)], index_reg); + code->mov(qword[r15 + index_reg.cvt64() * 8 + offsetof(JitState, rsb_location_descriptors)], loc_desc_reg); + code->mov(qword[r15 + index_reg.cvt64() * 8 + offsetof(JitState, rsb_codeptrs)], code_ptr_reg); + code->L(label); } void EmitX64::EmitGetCarryFromOp(IR::Block&, IR::Inst*) { @@ -385,19 +385,20 @@ void EmitX64::EmitGetOverflowFromOp(IR::Block&, IR::Inst*) { void EmitX64::EmitPack2x32To1x64(IR::Block&, IR::Inst* inst) { OpArg lo; - X64Reg result; + Xbyak::Reg64 result; if (inst->GetArg(0).IsImmediate()) { // TODO: Optimize - result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - lo = Gen::R(result); + result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); + lo = result.cvt32(); } else { - std::tie(lo, result) = reg_alloc.UseDefOpArg(inst->GetArg(0), inst, any_gpr); + std::tie(lo, result) = reg_alloc.UseDefOpArgGpr(inst->GetArg(0), inst); } - X64Reg hi = reg_alloc.UseScratchRegister(inst->GetArg(1), any_gpr); + lo.setBit(32); + Xbyak::Reg64 hi = reg_alloc.UseScratchGpr(inst->GetArg(1)); - code->SHL(64, R(hi), Imm8(32)); - code->MOVZX(64, 32, result, lo); - code->OR(64, R(result), R(hi)); + code->shl(hi, 32); + code->mov(result.cvt32(), *lo); // Zero extend to 64-bits + code->or_(result, hi); } void EmitX64::EmitLeastSignificantWord(IR::Block&, IR::Inst* inst) { @@ -406,16 +407,16 @@ void EmitX64::EmitLeastSignificantWord(IR::Block&, IR::Inst* inst) { void EmitX64::EmitMostSignificantWord(IR::Block& block, IR::Inst* inst) { auto carry_inst = FindUseWithOpcode(inst, IR::Opcode::GetCarryFromOp); - auto result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg64 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); - code->SHR(64, R(result), Imm8(32)); + code->shr(result, 32); if (carry_inst) { EraseInstruction(block, carry_inst); reg_alloc.DecrementRemainingUses(inst); - X64Reg carry = reg_alloc.DefRegister(carry_inst, any_gpr); + Xbyak::Reg64 carry = reg_alloc.DefGpr(carry_inst); - code->SETcc(CC_C, R(carry)); + code->setc(carry.cvt8()); } } @@ -428,31 +429,31 @@ void EmitX64::EmitLeastSignificantByte(IR::Block&, IR::Inst* inst) { } void EmitX64::EmitMostSignificantBit(IR::Block&, IR::Inst* inst) { - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); // TODO: Flag optimization - code->SHR(32, R(result), Imm8(31)); + code->shr(result, 31); } void EmitX64::EmitIsZero(IR::Block&, IR::Inst* inst) { - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); // TODO: Flag optimization - code->TEST(32, R(result), R(result)); - code->SETcc(CCFlags::CC_E, R(result)); - code->MOVZX(32, 8, result, R(result)); + code->test(result, result); + code->sete(result.cvt8()); + code->movzx(result, result.cvt8()); } void EmitX64::EmitIsZero64(IR::Block&, IR::Inst* inst) { - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg64 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); // TODO: Flag optimization - code->TEST(64, R(result), R(result)); - code->SETcc(CCFlags::CC_E, R(result)); - code->MOVZX(32, 8, result, R(result)); + code->test(result, result); + code->sete(result.cvt8()); + code->movzx(result, result.cvt8()); } void EmitX64::EmitLogicalShiftLeft(IR::Block& block, IR::Inst* inst) { @@ -469,26 +470,26 @@ void EmitX64::EmitLogicalShiftLeft(IR::Block& block, IR::Inst* inst) { auto shift_arg = inst->GetArg(1); if (shift_arg.IsImmediate()) { - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); u8 shift = shift_arg.GetU8(); if (shift <= 31) { - code->SHL(32, R(result), Imm8(shift)); + code->shl(result, shift); } else { - code->XOR(32, R(result), R(result)); + code->xor_(result, result); } } else { - X64Reg shift = reg_alloc.UseRegister(shift_arg.GetInst(), {HostLoc::RCX}); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg zero = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Reg8 shift = reg_alloc.UseGpr(shift_arg, {HostLoc::RCX}).cvt8(); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg32 zero = reg_alloc.ScratchGpr().cvt32(); // The 32-bit x64 SHL instruction masks the shift count by 0x1F before performing the shift. // ARM differs from the behaviour: It does not mask the count, so shifts above 31 result in zeros. - code->SHL(32, R(result), R(shift)); - code->XOR(32, R(zero), R(zero)); - code->CMP(8, R(shift), Imm8(32)); - code->CMOVcc(32, result, R(zero), CC_NB); + code->shl(result, shift); + code->xor_(zero, zero); + code->cmp(shift, 32); + code->cmovnb(result, zero); } } else { EraseInstruction(block, carry_inst); @@ -498,51 +499,54 @@ void EmitX64::EmitLogicalShiftLeft(IR::Block& block, IR::Inst* inst) { if (shift_arg.IsImmediate()) { u8 shift = shift_arg.GetU8(); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg carry = reg_alloc.UseDefRegister(inst->GetArg(2), carry_inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg32 carry = reg_alloc.UseDefGpr(inst->GetArg(2), carry_inst).cvt32(); if (shift == 0) { // There is nothing more to do. } else if (shift < 32) { - code->BT(32, R(carry), Imm8(0)); - code->SHL(32, R(result), Imm8(shift)); - code->SETcc(CC_C, R(carry)); + code->bt(carry.cvt32(), 0); + code->shl(result, shift); + code->setc(carry.cvt8()); } else if (shift > 32) { - code->XOR(32, R(result), R(result)); - code->XOR(32, R(carry), R(carry)); + code->xor_(result, result); + code->xor_(carry, carry); } else { - code->MOV(32, R(carry), R(result)); - code->XOR(32, R(result), R(result)); - code->AND(32, R(carry), Imm32(1)); + code->mov(carry, result); + code->xor_(result, result); + code->and_(carry, 1); } } else { - X64Reg shift = reg_alloc.UseRegister(shift_arg.GetInst(), {HostLoc::RCX}); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg carry = reg_alloc.UseDefRegister(inst->GetArg(2), carry_inst, any_gpr); + Xbyak::Reg8 shift = reg_alloc.UseGpr(shift_arg, {HostLoc::RCX}).cvt8(); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg32 carry = reg_alloc.UseDefGpr(inst->GetArg(2), carry_inst).cvt32(); // TODO: Optimize this. - code->CMP(8, R(shift), Imm8(32)); - auto Rs_gt32 = code->J_CC(CC_A); - auto Rs_eq32 = code->J_CC(CC_E); + code->inLocalLabel(); + + code->cmp(shift, 32); + code->ja(".Rs_gt32"); + code->je(".Rs_eq32"); // if (Rs & 0xFF < 32) { - code->BT(32, R(carry), Imm8(0)); // Set the carry flag for correct behaviour in the case when Rs & 0xFF == 0 - code->SHL(32, R(result), R(shift)); - code->SETcc(CC_C, R(carry)); - auto jmp_to_end_1 = code->J(); + code->bt(carry.cvt32(), 0); // Set the carry flag for correct behaviour in the case when Rs & 0xFF == 0 + code->shl(result, shift); + code->setc(carry.cvt8()); + code->jmp(".end"); // } else if (Rs & 0xFF > 32) { - code->SetJumpTarget(Rs_gt32); - code->XOR(32, R(result), R(result)); - code->XOR(32, R(carry), R(carry)); - auto jmp_to_end_2 = code->J(); + code->L(".Rs_gt32"); + code->xor_(result, result); + code->xor_(carry, carry); + code->jmp(".end"); // } else if (Rs & 0xFF == 32) { - code->SetJumpTarget(Rs_eq32); - code->MOV(32, R(carry), R(result)); - code->AND(32, R(carry), Imm8(1)); - code->XOR(32, R(result), R(result)); + code->L(".Rs_eq32"); + code->mov(carry, result); + code->and_(carry, 1); + code->xor_(result, result); // } - code->SetJumpTarget(jmp_to_end_1); - code->SetJumpTarget(jmp_to_end_2); + code->L(".end"); + + code->outLocalLabel(); } } } @@ -559,26 +563,26 @@ void EmitX64::EmitLogicalShiftRight(IR::Block& block, IR::Inst* inst) { auto shift_arg = inst->GetArg(1); if (shift_arg.IsImmediate()) { - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); u8 shift = shift_arg.GetU8(); if (shift <= 31) { - code->SHR(32, R(result), Imm8(shift)); + code->shr(result, shift); } else { - code->XOR(32, R(result), R(result)); + code->xor_(result, result); } } else { - X64Reg shift = reg_alloc.UseRegister(shift_arg.GetInst(), {HostLoc::RCX}); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg zero = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Reg8 shift = reg_alloc.UseGpr(shift_arg, {HostLoc::RCX}).cvt8(); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg32 zero = reg_alloc.ScratchGpr().cvt32(); // The 32-bit x64 SHR instruction masks the shift count by 0x1F before performing the shift. // ARM differs from the behaviour: It does not mask the count, so shifts above 31 result in zeros. - code->SHR(32, R(result), R(shift)); - code->XOR(32, R(zero), R(zero)); - code->CMP(8, R(shift), Imm8(32)); - code->CMOVcc(32, result, R(zero), CC_NB); + code->shr(result, shift); + code->xor_(zero, zero); + code->cmp(shift, 32); + code->cmovnb(result, zero); } } else { EraseInstruction(block, carry_inst); @@ -588,66 +592,68 @@ void EmitX64::EmitLogicalShiftRight(IR::Block& block, IR::Inst* inst) { if (shift_arg.IsImmediate()) { u8 shift = shift_arg.GetU8(); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg carry = reg_alloc.UseDefRegister(inst->GetArg(2), carry_inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg32 carry = reg_alloc.UseDefGpr(inst->GetArg(2), carry_inst).cvt32(); if (shift == 0) { // There is nothing more to do. } else if (shift < 32) { - code->SHR(32, R(result), Imm8(shift)); - code->SETcc(CC_C, R(carry)); + code->shr(result, shift); + code->setc(carry.cvt8()); } else if (shift == 32) { - code->BT(32, R(result), Imm8(31)); - code->SETcc(CC_C, R(carry)); - code->MOV(32, R(result), Imm32(0)); + code->bt(result, 31); + code->setc(carry.cvt8()); + code->mov(result, 0); } else { - code->XOR(32, R(result), R(result)); - code->XOR(32, R(carry), R(carry)); + code->xor_(result, result); + code->xor_(carry, carry); } } else { - X64Reg shift = reg_alloc.UseRegister(shift_arg.GetInst(), {HostLoc::RCX}); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg carry = reg_alloc.UseDefRegister(inst->GetArg(2), carry_inst, any_gpr); + Xbyak::Reg8 shift = reg_alloc.UseGpr(shift_arg, {HostLoc::RCX}).cvt8(); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg32 carry = reg_alloc.UseDefGpr(inst->GetArg(2), carry_inst).cvt32(); // TODO: Optimize this. - code->CMP(8, R(shift), Imm8(32)); - auto Rs_gt32 = code->J_CC(CC_A); - auto Rs_eq32 = code->J_CC(CC_E); + code->inLocalLabel(); + + code->cmp(shift, 32); + code->ja(".Rs_gt32"); + code->je(".Rs_eq32"); // if (Rs & 0xFF == 0) goto end; - code->TEST(8, R(shift), R(shift)); - auto Rs_zero = code->J_CC(CC_Z); + code->test(shift, shift); + code->jz(".end"); // if (Rs & 0xFF < 32) { - code->SHR(32, R(result), R(shift)); - code->SETcc(CC_C, R(carry)); - auto jmp_to_end_1 = code->J(); + code->shr(result, shift); + code->setc(carry.cvt8()); + code->jmp(".end"); // } else if (Rs & 0xFF > 32) { - code->SetJumpTarget(Rs_gt32); - code->XOR(32, R(result), R(result)); - code->XOR(32, R(carry), R(carry)); - auto jmp_to_end_2 = code->J(); + code->L(".Rs_gt32"); + code->xor_(result, result); + code->xor_(carry, carry); + code->jmp(".end"); // } else if (Rs & 0xFF == 32) { - code->SetJumpTarget(Rs_eq32); - code->BT(32, R(result), Imm8(31)); - code->SETcc(CC_C, R(carry)); - code->MOV(32, R(result), Imm32(0)); + code->L(".Rs_eq32"); + code->bt(result, 31); + code->setc(carry.cvt8()); + code->xor_(result, result); // } - code->SetJumpTarget(jmp_to_end_1); - code->SetJumpTarget(jmp_to_end_2); - code->SetJumpTarget(Rs_zero); + code->L(".end"); + + code->outLocalLabel(); } } } void EmitX64::EmitLogicalShiftRight64(IR::Block& block, IR::Inst* inst) { - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg64 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); auto shift_arg = inst->GetArg(1); ASSERT_MSG(shift_arg.IsImmediate(), "variable 64 bit shifts are not implemented"); u8 shift = shift_arg.GetU8(); ASSERT_MSG(shift < 64, "shift width clamping is not implemented"); - code->SHR(64, R(result), Imm8(shift)); + code->shr(result.cvt64(), shift); } void EmitX64::EmitArithmeticShiftRight(IR::Block& block, IR::Inst* inst) { @@ -663,23 +669,23 @@ void EmitX64::EmitArithmeticShiftRight(IR::Block& block, IR::Inst* inst) { if (shift_arg.IsImmediate()) { u8 shift = shift_arg.GetU8(); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); - code->SAR(32, R(result), Imm8(shift < 31 ? shift : 31)); + code->sar(result, u8(shift < 31 ? shift : 31)); } else { - X64Reg shift = reg_alloc.UseScratchRegister(shift_arg.GetInst(), {HostLoc::RCX}); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg const31 = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Reg32 shift = reg_alloc.UseScratchGpr(shift_arg, {HostLoc::RCX}).cvt32(); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg32 const31 = reg_alloc.ScratchGpr().cvt32(); // The 32-bit x64 SAR instruction masks the shift count by 0x1F before performing the shift. // ARM differs from the behaviour: It does not mask the count. // We note that all shift values above 31 have the same behaviour as 31 does, so we saturate `shift` to 31. - code->MOV(32, R(const31), Imm32(31)); - code->MOVZX(32, 8, shift, R(shift)); - code->CMP(32, R(shift), Imm32(31)); - code->CMOVcc(32, shift, R(const31), CC_G); - code->SAR(32, R(result), R(shift)); + code->mov(const31, 31); + code->movzx(shift, shift.cvt8()); + code->cmp(shift, u32(31)); + code->cmovg(shift, const31); + code->sar(result, shift.cvt8()); } } else { EraseInstruction(block, carry_inst); @@ -689,43 +695,46 @@ void EmitX64::EmitArithmeticShiftRight(IR::Block& block, IR::Inst* inst) { if (shift_arg.IsImmediate()) { u8 shift = shift_arg.GetU8(); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg carry = reg_alloc.UseDefRegister(inst->GetArg(2), carry_inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg8 carry = reg_alloc.UseDefGpr(inst->GetArg(2), carry_inst).cvt8(); if (shift == 0) { // There is nothing more to do. } else if (shift <= 31) { - code->SAR(32, R(result), Imm8(shift)); - code->SETcc(CC_C, R(carry)); + code->sar(result, shift); + code->setc(carry); } else { - code->SAR(32, R(result), Imm8(31)); - code->BT(32, R(result), Imm8(31)); - code->SETcc(CC_C, R(carry)); + code->sar(result, 31); + code->bt(result, 31); + code->setc(carry); } } else { - X64Reg shift = reg_alloc.UseRegister(shift_arg.GetInst(), {HostLoc::RCX}); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg carry = reg_alloc.UseDefRegister(inst->GetArg(2), carry_inst, any_gpr); + Xbyak::Reg8 shift = reg_alloc.UseGpr(shift_arg, {HostLoc::RCX}).cvt8(); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg8 carry = reg_alloc.UseDefGpr(inst->GetArg(2), carry_inst).cvt8(); // TODO: Optimize this. - code->CMP(8, R(shift), Imm8(31)); - auto Rs_gt31 = code->J_CC(CC_A); + code->inLocalLabel(); + + code->cmp(shift, u32(31)); + code->ja(".Rs_gt31"); // if (Rs & 0xFF == 0) goto end; - code->TEST(8, R(shift), R(shift)); - auto Rs_zero = code->J_CC(CC_Z); + code->test(shift, shift); + code->jz(".end"); // if (Rs & 0xFF <= 31) { - code->SAR(32, R(result), R(shift)); - code->SETcc(CC_C, R(carry)); - auto jmp_to_end = code->J(); + code->sar(result, shift); + code->setc(carry); + code->jmp(".end"); // } else if (Rs & 0xFF > 31) { - code->SetJumpTarget(Rs_gt31); - code->SAR(32, R(result), Imm8(31)); // Verified. - code->BT(32, R(result), Imm8(31)); - code->SETcc(CC_C, R(carry)); + code->L(".Rs_gt31"); + code->sar(result, 31); // 31 produces the same results as anything above 31 + code->bt(result, 31); + code->setc(carry); // } - code->SetJumpTarget(jmp_to_end); - code->SetJumpTarget(Rs_zero); + code->L(".end"); + + code->outLocalLabel(); } } } @@ -743,15 +752,15 @@ void EmitX64::EmitRotateRight(IR::Block& block, IR::Inst* inst) { if (shift_arg.IsImmediate()) { u8 shift = shift_arg.GetU8(); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); - code->ROR(32, R(result), Imm8(shift & 0x1F)); + code->ror(result, u8(shift & 0x1F)); } else { - X64Reg shift = reg_alloc.UseRegister(shift_arg.GetInst(), {HostLoc::RCX}); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg8 shift = reg_alloc.UseGpr(shift_arg, {HostLoc::RCX}).cvt8(); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); // x64 ROR instruction does (shift & 0x1F) for us. - code->ROR(32, R(result), R(shift)); + code->ror(result, shift); } } else { EraseInstruction(block, carry_inst); @@ -761,42 +770,45 @@ void EmitX64::EmitRotateRight(IR::Block& block, IR::Inst* inst) { if (shift_arg.IsImmediate()) { u8 shift = shift_arg.GetU8(); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg carry = reg_alloc.UseDefRegister(inst->GetArg(2), carry_inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg8 carry = reg_alloc.UseDefGpr(inst->GetArg(2), carry_inst).cvt8(); if (shift == 0) { // There is nothing more to do. } else if ((shift & 0x1F) == 0) { - code->BT(32, R(result), Imm8(31)); - code->SETcc(CC_C, R(carry)); + code->bt(result, u8(31)); + code->setc(carry); } else { - code->ROR(32, R(result), Imm8(shift)); - code->SETcc(CC_C, R(carry)); + code->ror(result, shift); + code->setc(carry); } } else { - X64Reg shift = reg_alloc.UseScratchRegister(shift_arg.GetInst(), {HostLoc::RCX}); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg carry = reg_alloc.UseDefRegister(inst->GetArg(2), carry_inst, any_gpr); + Xbyak::Reg8 shift = reg_alloc.UseScratchGpr(shift_arg, {HostLoc::RCX}).cvt8(); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg8 carry = reg_alloc.UseDefGpr(inst->GetArg(2), carry_inst).cvt8(); // TODO: Optimize - // if (Rs & 0xFF == 0) goto end; - code->TEST(8, R(shift), R(shift)); - auto Rs_zero = code->J_CC(CC_Z); + code->inLocalLabel(); - code->AND(32, R(shift), Imm8(0x1F)); - auto zero_1F = code->J_CC(CC_Z); + // if (Rs & 0xFF == 0) goto end; + code->test(shift, shift); + code->jz(".end"); + + code->and_(shift.cvt32(), u32(0x1F)); + code->jz(".zero_1F"); // if (Rs & 0x1F != 0) { - code->ROR(32, R(result), R(shift)); - code->SETcc(CC_C, R(carry)); - auto jmp_to_end = code->J(); + code->ror(result, shift); + code->setc(carry); + code->jmp(".end"); // } else { - code->SetJumpTarget(zero_1F); - code->BT(32, R(result), Imm8(31)); - code->SETcc(CC_C, R(carry)); + code->L(".zero_1F"); + code->bt(result, u8(31)); + code->setc(carry); // } - code->SetJumpTarget(jmp_to_end); - code->SetJumpTarget(Rs_zero); + code->L(".end"); + + code->outLocalLabel(); } } } @@ -804,27 +816,28 @@ void EmitX64::EmitRotateRight(IR::Block& block, IR::Inst* inst) { void EmitX64::EmitRotateRightExtended(IR::Block& block, IR::Inst* inst) { auto carry_inst = FindUseWithOpcode(inst, IR::Opcode::GetCarryFromOp); - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - X64Reg carry = carry_inst - ? reg_alloc.UseDefRegister(inst->GetArg(1), carry_inst, any_gpr) - : reg_alloc.UseRegister(inst->GetArg(1), any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); + Xbyak::Reg8 carry = carry_inst + ? reg_alloc.UseDefGpr(inst->GetArg(1), carry_inst).cvt8() + : reg_alloc.UseGpr(inst->GetArg(1)).cvt8(); - code->BT(32, R(carry), Imm8(0)); - code->RCR(32, R(result), Imm8(1)); + code->bt(carry.cvt32(), 0); + code->rcr(result, 1); if (carry_inst) { EraseInstruction(block, carry_inst); reg_alloc.DecrementRemainingUses(inst); - code->SETcc(CC_C, R(carry)); + code->setc(carry); } } -static X64Reg DoCarry(RegAlloc& reg_alloc, const IR::Value& carry_in, IR::Inst* carry_out) { +const Xbyak::Reg64 INVALID_REG = Xbyak::Reg64(-1); + +static Xbyak::Reg8 DoCarry(RegAlloc& reg_alloc, const IR::Value& carry_in, IR::Inst* carry_out) { if (carry_in.IsImmediate()) { - return carry_out ? reg_alloc.DefRegister(carry_out, any_gpr) : INVALID_REG; + return carry_out ? reg_alloc.DefGpr(carry_out).cvt8() : INVALID_REG.cvt8(); } else { - IR::Inst* in = carry_in.GetInst(); - return carry_out ? reg_alloc.UseDefRegister(in, carry_out, any_gpr) : reg_alloc.UseRegister(in, any_gpr); + return carry_out ? reg_alloc.UseDefGpr(carry_in, carry_out).cvt8() : reg_alloc.UseGpr(carry_in).cvt8(); } } @@ -836,35 +849,50 @@ void EmitX64::EmitAddWithCarry(IR::Block& block, IR::Inst* inst) { IR::Value b = inst->GetArg(1); IR::Value carry_in = inst->GetArg(2); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_gpr); - X64Reg carry = DoCarry(reg_alloc, carry_in, carry_inst); - X64Reg overflow = overflow_inst ? reg_alloc.DefRegister(overflow_inst, any_gpr) : INVALID_REG; + Xbyak::Reg32 result = reg_alloc.UseDefGpr(a, inst).cvt32(); + Xbyak::Reg8 carry = DoCarry(reg_alloc, carry_in, carry_inst); + Xbyak::Reg8 overflow = overflow_inst ? reg_alloc.DefGpr(overflow_inst).cvt8() : INVALID_REG.cvt8(); // TODO: Consider using LEA. - OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); - - if (carry_in.IsImmediate()) { - if (carry_in.GetU1()) { - code->STC(); - code->ADC(32, R(result), op_arg); + if (b.IsImmediate()) { + u32 op_arg = b.GetU32(); + if (carry_in.IsImmediate()) { + if (carry_in.GetU1()) { + code->stc(); + code->adc(result, op_arg); + } else { + code->add(result, op_arg); + } } else { - code->ADD(32, R(result), op_arg); + code->bt(carry.cvt32(), 0); + code->adc(result, op_arg); } } else { - code->BT(32, R(carry), Imm8(0)); - code->ADC(32, R(result), op_arg); + OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); + op_arg.setBit(32); + if (carry_in.IsImmediate()) { + if (carry_in.GetU1()) { + code->stc(); + code->adc(result, *op_arg); + } else { + code->add(result, *op_arg); + } + } else { + code->bt(carry.cvt32(), 0); + code->adc(result, *op_arg); + } } if (carry_inst) { EraseInstruction(block, carry_inst); reg_alloc.DecrementRemainingUses(inst); - code->SETcc(Gen::CC_C, R(carry)); + code->setc(carry); } if (overflow_inst) { EraseInstruction(block, overflow_inst); reg_alloc.DecrementRemainingUses(inst); - code->SETcc(Gen::CC_O, R(overflow)); + code->seto(overflow); } } @@ -872,10 +900,10 @@ void EmitX64::EmitAdd64(IR::Block& block, IR::Inst* inst) { IR::Value a = inst->GetArg(0); IR::Value b = inst->GetArg(1); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_gpr); - OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); + Xbyak::Reg64 result = reg_alloc.UseDefGpr(a, inst); + Xbyak::Reg64 op_arg = reg_alloc.UseGpr(b); - code->ADD(64, R(result), op_arg); + code->add(result, op_arg); } void EmitX64::EmitSubWithCarry(IR::Block& block, IR::Inst* inst) { @@ -886,38 +914,54 @@ void EmitX64::EmitSubWithCarry(IR::Block& block, IR::Inst* inst) { IR::Value b = inst->GetArg(1); IR::Value carry_in = inst->GetArg(2); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_gpr); - X64Reg carry = DoCarry(reg_alloc, carry_in, carry_inst); - X64Reg overflow = overflow_inst ? reg_alloc.DefRegister(overflow_inst, any_gpr) : INVALID_REG; + Xbyak::Reg32 result = reg_alloc.UseDefGpr(a, inst).cvt32(); + Xbyak::Reg8 carry = DoCarry(reg_alloc, carry_in, carry_inst); + Xbyak::Reg8 overflow = overflow_inst ? reg_alloc.DefGpr(overflow_inst).cvt8() : INVALID_REG.cvt8(); // TODO: Consider using LEA. // TODO: Optimize CMP case. // Note that x64 CF is inverse of what the ARM carry flag is here. - OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); - - if (carry_in.IsImmediate()) { - if (carry_in.GetU1()) { - code->SUB(32, R(result), op_arg); + if (b.IsImmediate()) { + u32 op_arg = b.GetU32(); + if (carry_in.IsImmediate()) { + if (carry_in.GetU1()) { + code->sub(result, op_arg); + } else { + code->stc(); + code->sbb(result, op_arg); + } } else { - code->STC(); - code->SBB(32, R(result), op_arg); + code->bt(carry.cvt32(), 0); + code->cmc(); + code->sbb(result, op_arg); } } else { - code->BT(32, R(carry), Imm8(0)); - code->CMC(); - code->SBB(32, R(result), op_arg); + OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); + op_arg.setBit(32); + if (carry_in.IsImmediate()) { + if (carry_in.GetU1()) { + code->sub(result, *op_arg); + } else { + code->stc(); + code->sbb(result, *op_arg); + } + } else { + code->bt(carry.cvt32(), 0); + code->cmc(); + code->sbb(result, *op_arg); + } } if (carry_inst) { EraseInstruction(block, carry_inst); reg_alloc.DecrementRemainingUses(inst); - code->SETcc(Gen::CC_NC, R(carry)); + code->setnc(carry); } if (overflow_inst) { EraseInstruction(block, overflow_inst); reg_alloc.DecrementRemainingUses(inst); - code->SETcc(Gen::CC_O, R(overflow)); + code->seto(overflow); } } @@ -925,10 +969,10 @@ void EmitX64::EmitSub64(IR::Block& block, IR::Inst* inst) { IR::Value a = inst->GetArg(0); IR::Value b = inst->GetArg(1); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_gpr); - OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); + Xbyak::Reg64 result = reg_alloc.UseDefGpr(a, inst); + Xbyak::Reg64 op_arg = reg_alloc.UseGpr(b); - code->SUB(64, R(result), op_arg); + code->sub(result, op_arg); } void EmitX64::EmitMul(IR::Block&, IR::Inst* inst) { @@ -937,12 +981,14 @@ void EmitX64::EmitMul(IR::Block&, IR::Inst* inst) { if (a.IsImmediate()) std::swap(a, b); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(a, inst).cvt32(); if (b.IsImmediate()) { - code->IMUL(32, result, R(result), Imm32(b.GetU32())); + code->imul(result, result, b.GetU32()); } else { OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); - code->IMUL(32, result, op_arg); + op_arg.setBit(32); + + code->imul(result, *op_arg); } } @@ -950,288 +996,347 @@ void EmitX64::EmitMul64(IR::Block&, IR::Inst* inst) { IR::Value a = inst->GetArg(0); IR::Value b = inst->GetArg(1); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_gpr); + Xbyak::Reg64 result = reg_alloc.UseDefGpr(a, inst); OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); - code->IMUL(64, result, op_arg); + code->imul(result, *op_arg); } void EmitX64::EmitAnd(IR::Block&, IR::Inst* inst) { IR::Value a = inst->GetArg(0); IR::Value b = inst->GetArg(1); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_gpr); - OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(a, inst).cvt32(); - code->AND(32, R(result), op_arg); + if (b.IsImmediate()) { + u32 op_arg = b.GetU32(); + + code->and_(result, op_arg); + } else { + OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); + op_arg.setBit(32); + + code->and_(result, *op_arg); + } } void EmitX64::EmitEor(IR::Block&, IR::Inst* inst) { IR::Value a = inst->GetArg(0); IR::Value b = inst->GetArg(1); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_gpr); - OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(a, inst).cvt32(); - code->XOR(32, R(result), op_arg); + if (b.IsImmediate()) { + u32 op_arg = b.GetU32(); + + code->xor_(result, op_arg); + } else { + OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); + op_arg.setBit(32); + + code->xor_(result, *op_arg); + } } void EmitX64::EmitOr(IR::Block&, IR::Inst* inst) { IR::Value a = inst->GetArg(0); IR::Value b = inst->GetArg(1); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_gpr); - OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(a, inst).cvt32(); - code->OR(32, R(result), op_arg); + if (b.IsImmediate()) { + u32 op_arg = b.GetU32(); + + code->or_(result, op_arg); + } else { + OpArg op_arg = reg_alloc.UseOpArg(b, any_gpr); + op_arg.setBit(32); + + code->or_(result, *op_arg); + } } void EmitX64::EmitNot(IR::Block&, IR::Inst* inst) { IR::Value a = inst->GetArg(0); if (a.IsImmediate()) { - X64Reg result = reg_alloc.DefRegister(inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.DefGpr(inst).cvt32(); - code->MOV(32, R(result), Imm32(~a.GetU32())); + code->mov(result, u32(~a.GetU32())); } else { - X64Reg result = reg_alloc.UseDefRegister(a.GetInst(), inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(a, inst).cvt32(); - code->NOT(32, R(result)); + code->not_(result); } } void EmitX64::EmitSignExtendWordToLong(IR::Block&, IR::Inst* inst) { OpArg source; - X64Reg result; + Xbyak::Reg64 result; if (inst->GetArg(0).IsImmediate()) { // TODO: Optimize - result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - source = Gen::R(result); + result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); + source = result; } else { - std::tie(source, result) = reg_alloc.UseDefOpArg(inst->GetArg(0), inst, any_gpr); + std::tie(source, result) = reg_alloc.UseDefOpArgGpr(inst->GetArg(0), inst); } - code->MOVSX(64, 32, result, source); + source.setBit(32); + code->movsxd(result.cvt64(), *source); } void EmitX64::EmitSignExtendHalfToWord(IR::Block&, IR::Inst* inst) { OpArg source; - X64Reg result; + Xbyak::Reg64 result; if (inst->GetArg(0).IsImmediate()) { // TODO: Optimize - result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - source = Gen::R(result); + result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); + source = result; } else { - std::tie(source, result) = reg_alloc.UseDefOpArg(inst->GetArg(0), inst, any_gpr); + std::tie(source, result) = reg_alloc.UseDefOpArgGpr(inst->GetArg(0), inst); } - code->MOVSX(32, 16, result, source); + source.setBit(16); + code->movsx(result.cvt32(), *source); } void EmitX64::EmitSignExtendByteToWord(IR::Block&, IR::Inst* inst) { OpArg source; - X64Reg result; + Xbyak::Reg64 result; if (inst->GetArg(0).IsImmediate()) { // TODO: Optimize - result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - source = Gen::R(result); + result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); + source = result; } else { - std::tie(source, result) = reg_alloc.UseDefOpArg(inst->GetArg(0), inst, any_gpr); + std::tie(source, result) = reg_alloc.UseDefOpArgGpr(inst->GetArg(0), inst); } - code->MOVSX(32, 8, result, source); + source.setBit(8); + code->movsx(result.cvt32(), *source); } void EmitX64::EmitZeroExtendWordToLong(IR::Block&, IR::Inst* inst) { OpArg source; - X64Reg result; + Xbyak::Reg64 result; if (inst->GetArg(0).IsImmediate()) { // TODO: Optimize - result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - source = Gen::R(result); + result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); + source = result; } else { - std::tie(source, result) = reg_alloc.UseDefOpArg(inst->GetArg(0), inst, any_gpr); + std::tie(source, result) = reg_alloc.UseDefOpArgGpr(inst->GetArg(0), inst); } - code->MOVZX(64, 32, result, source); + source.setBit(32); + code->mov(result.cvt32(), *source); // x64 zeros upper 32 bits on a 32-bit move } void EmitX64::EmitZeroExtendHalfToWord(IR::Block&, IR::Inst* inst) { OpArg source; - X64Reg result; + Xbyak::Reg64 result; if (inst->GetArg(0).IsImmediate()) { // TODO: Optimize - result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - source = Gen::R(result); + result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); + source = result; } else { - std::tie(source, result) = reg_alloc.UseDefOpArg(inst->GetArg(0), inst, any_gpr); + std::tie(source, result) = reg_alloc.UseDefOpArgGpr(inst->GetArg(0), inst); } - code->MOVZX(32, 16, result, source); + source.setBit(16); + code->movzx(result.cvt32(), *source); } void EmitX64::EmitZeroExtendByteToWord(IR::Block&, IR::Inst* inst) { OpArg source; - X64Reg result; + Xbyak::Reg64 result; if (inst->GetArg(0).IsImmediate()) { // TODO: Optimize - result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); - source = Gen::R(result); + result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); + source = result; } else { - std::tie(source, result) = reg_alloc.UseDefOpArg(inst->GetArg(0), inst, any_gpr); + std::tie(source, result) = reg_alloc.UseDefOpArgGpr(inst->GetArg(0), inst); } - code->MOVZX(32, 8, result, source); + source.setBit(8); + code->movzx(result.cvt32(), *source); } void EmitX64::EmitByteReverseWord(IR::Block&, IR::Inst* inst) { - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); - code->BSWAP(32, result); + code->bswap(result); } void EmitX64::EmitByteReverseHalf(IR::Block&, IR::Inst* inst) { - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg16 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt16(); - code->ROL(16, R(result), Imm8(8)); + code->rol(result, 8); } void EmitX64::EmitByteReverseDual(IR::Block&, IR::Inst* inst) { - X64Reg result = reg_alloc.UseDefRegister(inst->GetArg(0), inst, any_gpr); + Xbyak::Reg64 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); - code->BSWAP(64, result); + code->bswap(result); } -static void EmitPackedOperation(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) { +static void EmitPackedOperation(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* inst, void (Xbyak::CodeGenerator::*fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&)) { IR::Value a = inst->GetArg(0); IR::Value b = inst->GetArg(1); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_gpr); - X64Reg arg = reg_alloc.UseRegister(b, any_gpr); + Xbyak::Reg32 result = reg_alloc.UseDefGpr(a, inst).cvt32(); + Xbyak::Reg32 arg = reg_alloc.UseGpr(b).cvt32(); - X64Reg xmm_scratch_a = reg_alloc.ScratchRegister(any_xmm); - X64Reg xmm_scratch_b = reg_alloc.ScratchRegister(any_xmm); + Xbyak::Xmm xmm_scratch_a = reg_alloc.ScratchXmm(); + Xbyak::Xmm xmm_scratch_b = reg_alloc.ScratchXmm(); - code->MOVD_xmm(xmm_scratch_a, R(result)); - code->MOVD_xmm(xmm_scratch_b, R(arg)); + code->movd(xmm_scratch_a, result); + code->movd(xmm_scratch_b, arg); - (code->*fn)(xmm_scratch_a, R(xmm_scratch_b)); + (code->*fn)(xmm_scratch_a, xmm_scratch_b); - code->MOVD_xmm(R(result), xmm_scratch_a); + code->movd(result, xmm_scratch_a); } void EmitX64::EmitPackedSaturatedAddU8(IR::Block& block, IR::Inst* inst) { - EmitPackedOperation(code, reg_alloc, inst, &XEmitter::PADDUSB); + EmitPackedOperation(code, reg_alloc, inst, &Xbyak::CodeGenerator::paddusb); } void EmitX64::EmitPackedSaturatedAddS8(IR::Block& block, IR::Inst* inst) { - EmitPackedOperation(code, reg_alloc, inst, &XEmitter::PADDSB); + EmitPackedOperation(code, reg_alloc, inst, &Xbyak::CodeGenerator::paddsb); } void EmitX64::EmitPackedSaturatedSubU8(IR::Block& block, IR::Inst* inst) { - EmitPackedOperation(code, reg_alloc, inst, &XEmitter::PSUBUSB); + EmitPackedOperation(code, reg_alloc, inst, &Xbyak::CodeGenerator::psubusb); } void EmitX64::EmitPackedSaturatedSubS8(IR::Block& block, IR::Inst* inst) { - EmitPackedOperation(code, reg_alloc, inst, &XEmitter::PSUBSB); + EmitPackedOperation(code, reg_alloc, inst, &Xbyak::CodeGenerator::psubsb); } void EmitX64::EmitPackedSaturatedAddU16(IR::Block& block, IR::Inst* inst) { - EmitPackedOperation(code, reg_alloc, inst, &XEmitter::PADDUSW); + EmitPackedOperation(code, reg_alloc, inst, &Xbyak::CodeGenerator::paddusw); } void EmitX64::EmitPackedSaturatedAddS16(IR::Block& block, IR::Inst* inst) { - EmitPackedOperation(code, reg_alloc, inst, &XEmitter::PADDSW); + EmitPackedOperation(code, reg_alloc, inst, &Xbyak::CodeGenerator::paddsw); } void EmitX64::EmitPackedSaturatedSubU16(IR::Block& block, IR::Inst* inst) { - EmitPackedOperation(code, reg_alloc, inst, &XEmitter::PSUBUSW); + EmitPackedOperation(code, reg_alloc, inst, &Xbyak::CodeGenerator::psubusw); } void EmitX64::EmitPackedSaturatedSubS16(IR::Block& block, IR::Inst* inst) { - EmitPackedOperation(code, reg_alloc, inst, &XEmitter::PSUBSW); + EmitPackedOperation(code, reg_alloc, inst, &Xbyak::CodeGenerator::psubsw); } -static void DenormalsAreZero32(BlockOfCode* code, X64Reg xmm_value, X64Reg gpr_scratch) { +static void DenormalsAreZero32(BlockOfCode* code, Xbyak::Xmm xmm_value, Xbyak::Reg32 gpr_scratch) { + using namespace Xbyak::util; + Xbyak::Label end; + // We need to report back whether we've found a denormal on input. // SSE doesn't do this for us when SSE's DAZ is enabled. - code->MOVD_xmm(R(gpr_scratch), xmm_value); - code->AND(32, R(gpr_scratch), Imm32(0x7FFFFFFF)); - code->SUB(32, R(gpr_scratch), Imm32(1)); - code->CMP(32, R(gpr_scratch), Imm32(0x007FFFFE)); - auto fixup = code->J_CC(CC_A); - code->PXOR(xmm_value, R(xmm_value)); - code->MOV(32, MDisp(R15, offsetof(JitState, FPSCR_IDC)), Imm32(1 << 7)); - code->SetJumpTarget(fixup); + + code->movd(gpr_scratch, xmm_value); + code->and_(gpr_scratch, u32(0x7FFFFFFF)); + code->sub(gpr_scratch, u32(1)); + code->cmp(gpr_scratch, u32(0x007FFFFE)); + code->ja(end); + code->pxor(xmm_value, xmm_value); + code->mov(dword[r15 + offsetof(JitState, FPSCR_IDC)], u32(1 << 7)); + code->L(end); } -static void DenormalsAreZero64(BlockOfCode* code, X64Reg xmm_value, X64Reg gpr_scratch) { - code->MOVQ_xmm(R(gpr_scratch), xmm_value); - code->AND(64, R(gpr_scratch), code->MFloatNonSignMask64()); - code->SUB(64, R(gpr_scratch), Imm32(1)); - code->CMP(64, R(gpr_scratch), code->MFloatPenultimatePositiveDenormal64()); - auto fixup = code->J_CC(CC_A); - code->PXOR(xmm_value, R(xmm_value)); - code->MOV(32, MDisp(R15, offsetof(JitState, FPSCR_IDC)), Imm32(1 << 7)); - code->SetJumpTarget(fixup); +static void DenormalsAreZero64(BlockOfCode* code, Xbyak::Xmm xmm_value, Xbyak::Reg64 gpr_scratch) { + using namespace Xbyak::util; + Xbyak::Label end; + + auto mask = code->MFloatNonSignMask64(); + mask.setBit(64); + auto penult_denormal = code->MFloatPenultimatePositiveDenormal64(); + penult_denormal.setBit(64); + + code->movq(gpr_scratch, xmm_value); + code->and_(gpr_scratch, mask); + code->sub(gpr_scratch, u32(1)); + code->cmp(gpr_scratch, penult_denormal); + code->ja(end); + code->pxor(xmm_value, xmm_value); + code->mov(dword[r15 + offsetof(JitState, FPSCR_IDC)], u32(1 << 7)); + code->L(end); } -static void FlushToZero32(BlockOfCode* code, X64Reg xmm_value, X64Reg gpr_scratch) { - code->MOVD_xmm(R(gpr_scratch), xmm_value); - code->AND(32, R(gpr_scratch), Imm32(0x7FFFFFFF)); - code->SUB(32, R(gpr_scratch), Imm32(1)); - code->CMP(32, R(gpr_scratch), Imm32(0x007FFFFE)); - auto fixup = code->J_CC(CC_A); - code->PXOR(xmm_value, R(xmm_value)); - code->MOV(32, MDisp(R15, offsetof(JitState, FPSCR_UFC)), Imm32(1 << 3)); - code->SetJumpTarget(fixup); +static void FlushToZero32(BlockOfCode* code, Xbyak::Xmm xmm_value, Xbyak::Reg32 gpr_scratch) { + using namespace Xbyak::util; + Xbyak::Label end; + + code->movd(gpr_scratch, xmm_value); + code->and_(gpr_scratch, u32(0x7FFFFFFF)); + code->sub(gpr_scratch, u32(1)); + code->cmp(gpr_scratch, u32(0x007FFFFE)); + code->ja(end); + code->pxor(xmm_value, xmm_value); + code->mov(dword[r15 + offsetof(JitState, FPSCR_UFC)], u32(1 << 3)); + code->L(end); } -static void FlushToZero64(BlockOfCode* code, X64Reg xmm_value, X64Reg gpr_scratch) { - code->MOVQ_xmm(R(gpr_scratch), xmm_value); - code->AND(64, R(gpr_scratch), code->MFloatNonSignMask64()); - code->SUB(64, R(gpr_scratch), Imm32(1)); - code->CMP(64, R(gpr_scratch), code->MFloatPenultimatePositiveDenormal64()); - auto fixup = code->J_CC(CC_A); - code->PXOR(xmm_value, R(xmm_value)); - code->MOV(32, MDisp(R15, offsetof(JitState, FPSCR_UFC)), Imm32(1 << 3)); - code->SetJumpTarget(fixup); +static void FlushToZero64(BlockOfCode* code, Xbyak::Xmm xmm_value, Xbyak::Reg64 gpr_scratch) { + using namespace Xbyak::util; + Xbyak::Label end; + + auto mask = code->MFloatNonSignMask64(); + mask.setBit(64); + auto penult_denormal = code->MFloatPenultimatePositiveDenormal64(); + penult_denormal.setBit(64); + + code->movq(gpr_scratch, xmm_value); + code->and_(gpr_scratch, mask); + code->sub(gpr_scratch, u32(1)); + code->cmp(gpr_scratch, penult_denormal); + code->ja(end); + code->pxor(xmm_value, xmm_value); + code->mov(dword[r15 + offsetof(JitState, FPSCR_UFC)], u32(1 << 3)); + code->L(end); } -static void DefaultNaN32(BlockOfCode* code, X64Reg xmm_value) { - code->UCOMISS(xmm_value, R(xmm_value)); - auto fixup = code->J_CC(CC_NP); - code->MOVAPS(xmm_value, code->MFloatNaN32()); - code->SetJumpTarget(fixup); +static void DefaultNaN32(BlockOfCode* code, Xbyak::Xmm xmm_value) { + Xbyak::Label end; + + code->ucomiss(xmm_value, xmm_value); + code->jnp(end); + code->movaps(xmm_value, code->MFloatNaN32()); + code->L(end); } -static void DefaultNaN64(BlockOfCode* code, X64Reg xmm_value) { - code->UCOMISD(xmm_value, R(xmm_value)); - auto fixup = code->J_CC(CC_NP); - code->MOVAPS(xmm_value, code->MFloatNaN64()); - code->SetJumpTarget(fixup); +static void DefaultNaN64(BlockOfCode* code, Xbyak::Xmm xmm_value) { + Xbyak::Label end; + + code->ucomisd(xmm_value, xmm_value); + code->jnp(end); + code->movaps(xmm_value, code->MFloatNaN64()); + code->L(end); } -static void ZeroIfNaN64(BlockOfCode* code, X64Reg xmm_value) { - code->UCOMISD(xmm_value, R(xmm_value)); - auto fixup = code->J_CC(CC_NP); - code->MOVAPS(xmm_value, code->MFloatPositiveZero64()); - code->SetJumpTarget(fixup); +static void ZeroIfNaN64(BlockOfCode* code, Xbyak::Xmm xmm_value) { + Xbyak::Label end; + + code->ucomisd(xmm_value, xmm_value); + code->jnp(end); + code->pxor(xmm_value, xmm_value); + code->L(end); } -static void FPThreeOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) { +static void FPThreeOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (Xbyak::CodeGenerator::*fn)(const Xbyak::Xmm&, const Xbyak::Operand&)) { IR::Value a = inst->GetArg(0); IR::Value b = inst->GetArg(1); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); - X64Reg operand = reg_alloc.UseRegister(b, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm result = reg_alloc.UseDefXmm(a, inst); + Xbyak::Xmm operand = reg_alloc.UseXmm(b); + Xbyak::Reg32 gpr_scratch = reg_alloc.ScratchGpr().cvt32(); if (block.location.FPSCR().FTZ()) { DenormalsAreZero32(code, result, gpr_scratch); DenormalsAreZero32(code, operand, gpr_scratch); } - (code->*fn)(result, R(operand)); + (code->*fn)(result, operand); if (block.location.FPSCR().FTZ()) { FlushToZero32(code, result, gpr_scratch); } @@ -1240,19 +1345,19 @@ static void FPThreeOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block } } -static void FPThreeOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) { +static void FPThreeOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (Xbyak::CodeGenerator::*fn)(const Xbyak::Xmm&, const Xbyak::Operand&)) { IR::Value a = inst->GetArg(0); IR::Value b = inst->GetArg(1); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); - X64Reg operand = reg_alloc.UseRegister(b, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm result = reg_alloc.UseDefXmm(a, inst); + Xbyak::Xmm operand = reg_alloc.UseXmm(b); + Xbyak::Reg64 gpr_scratch = reg_alloc.ScratchGpr(); if (block.location.FPSCR().FTZ()) { DenormalsAreZero64(code, result, gpr_scratch); DenormalsAreZero64(code, operand, gpr_scratch); } - (code->*fn)(result, R(operand)); + (code->*fn)(result, operand); if (block.location.FPSCR().FTZ()) { FlushToZero64(code, result, gpr_scratch); } @@ -1261,16 +1366,16 @@ static void FPThreeOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block } } -static void FPTwoOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) { +static void FPTwoOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (Xbyak::CodeGenerator::*fn)(const Xbyak::Xmm&, const Xbyak::Operand&)) { IR::Value a = inst->GetArg(0); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm result = reg_alloc.UseDefXmm(a, inst); + Xbyak::Reg32 gpr_scratch = reg_alloc.ScratchGpr().cvt32(); if (block.location.FPSCR().FTZ()) { DenormalsAreZero32(code, result, gpr_scratch); } - (code->*fn)(result, R(result)); + (code->*fn)(result, result); if (block.location.FPSCR().FTZ()) { FlushToZero32(code, result, gpr_scratch); } @@ -1279,16 +1384,16 @@ static void FPTwoOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, } } -static void FPTwoOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) { +static void FPTwoOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (Xbyak::CodeGenerator::*fn)(const Xbyak::Xmm&, const Xbyak::Operand&)) { IR::Value a = inst->GetArg(0); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm result = reg_alloc.UseDefXmm(a, inst); + Xbyak::Reg64 gpr_scratch = reg_alloc.ScratchGpr(); if (block.location.FPSCR().FTZ()) { DenormalsAreZero64(code, result, gpr_scratch); } - (code->*fn)(result, R(result)); + (code->*fn)(result, result); if (block.location.FPSCR().FTZ()) { FlushToZero64(code, result, gpr_scratch); } @@ -1298,115 +1403,115 @@ static void FPTwoOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, } void EmitX64::EmitTransferFromFP32(IR::Block& block, IR::Inst* inst) { - X64Reg result = reg_alloc.DefRegister(inst, any_gpr); - X64Reg source = reg_alloc.UseRegister(inst->GetArg(0), any_xmm); + Xbyak::Reg32 result = reg_alloc.DefGpr(inst).cvt32(); + Xbyak::Xmm source = reg_alloc.UseXmm(inst->GetArg(0)); // TODO: Eliminate this. - code->MOVD_xmm(R(result), source); + code->movd(result, source); } void EmitX64::EmitTransferFromFP64(IR::Block& block, IR::Inst* inst) { - X64Reg result = reg_alloc.DefRegister(inst, any_gpr); - X64Reg source = reg_alloc.UseRegister(inst->GetArg(0), any_xmm); + Xbyak::Reg64 result = reg_alloc.DefGpr(inst); + Xbyak::Xmm source = reg_alloc.UseXmm(inst->GetArg(0)); // TODO: Eliminate this. - code->MOVQ_xmm(R(result), source); + code->movq(result, source); } void EmitX64::EmitTransferToFP32(IR::Block& block, IR::Inst* inst) { - X64Reg result = reg_alloc.DefRegister(inst, any_xmm); - X64Reg source = reg_alloc.UseRegister(inst->GetArg(0), any_gpr); + Xbyak::Xmm result = reg_alloc.DefXmm(inst); + Xbyak::Reg32 source = reg_alloc.UseGpr(inst->GetArg(0)).cvt32(); // TODO: Eliminate this. - code->MOVD_xmm(result, R(source)); + code->movd(result, source); } void EmitX64::EmitTransferToFP64(IR::Block& block, IR::Inst* inst) { - X64Reg result = reg_alloc.DefRegister(inst, any_xmm); - X64Reg source = reg_alloc.UseRegister(inst->GetArg(0), any_gpr); + Xbyak::Xmm result = reg_alloc.DefXmm(inst); + Xbyak::Reg64 source = reg_alloc.UseGpr(inst->GetArg(0)); // TODO: Eliminate this. - code->MOVQ_xmm(result, R(source)); + code->movq(result, source); } void EmitX64::EmitFPAbs32(IR::Block&, IR::Inst* inst) { IR::Value a = inst->GetArg(0); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); + Xbyak::Xmm result = reg_alloc.UseDefXmm(a, inst); - code->PAND(result, code->MFloatNonSignMask32()); + code->pand(result, code->MFloatNonSignMask32()); } void EmitX64::EmitFPAbs64(IR::Block&, IR::Inst* inst) { IR::Value a = inst->GetArg(0); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); + Xbyak::Xmm result = reg_alloc.UseDefXmm(a, inst); - code->PAND(result, code->MFloatNonSignMask64()); + code->pand(result, code->MFloatNonSignMask64()); } void EmitX64::EmitFPNeg32(IR::Block&, IR::Inst* inst) { IR::Value a = inst->GetArg(0); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); + Xbyak::Xmm result = reg_alloc.UseDefXmm(a, inst); - code->PXOR(result, code->MFloatNegativeZero32()); + code->pxor(result, code->MFloatNegativeZero32()); } void EmitX64::EmitFPNeg64(IR::Block&, IR::Inst* inst) { IR::Value a = inst->GetArg(0); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); + Xbyak::Xmm result = reg_alloc.UseDefXmm(a, inst); - code->PXOR(result, code->MFloatNegativeZero64()); + code->pxor(result, code->MFloatNegativeZero64()); } void EmitX64::EmitFPAdd32(IR::Block& block, IR::Inst* inst) { - FPThreeOp32(code, reg_alloc, block, inst, &XEmitter::ADDSS); + FPThreeOp32(code, reg_alloc, block, inst, &Xbyak::CodeGenerator::addss); } void EmitX64::EmitFPAdd64(IR::Block& block, IR::Inst* inst) { - FPThreeOp64(code, reg_alloc, block, inst, &XEmitter::ADDSD); + FPThreeOp64(code, reg_alloc, block, inst, &Xbyak::CodeGenerator::addsd); } void EmitX64::EmitFPDiv32(IR::Block& block, IR::Inst* inst) { - FPThreeOp32(code, reg_alloc, block, inst, &XEmitter::DIVSS); + FPThreeOp32(code, reg_alloc, block, inst, &Xbyak::CodeGenerator::divss); } void EmitX64::EmitFPDiv64(IR::Block& block, IR::Inst* inst) { - FPThreeOp64(code, reg_alloc, block, inst, &XEmitter::DIVSD); + FPThreeOp64(code, reg_alloc, block, inst, &Xbyak::CodeGenerator::divsd); } void EmitX64::EmitFPMul32(IR::Block& block, IR::Inst* inst) { - FPThreeOp32(code, reg_alloc, block, inst, &XEmitter::MULSS); + FPThreeOp32(code, reg_alloc, block, inst, &Xbyak::CodeGenerator::mulss); } void EmitX64::EmitFPMul64(IR::Block& block, IR::Inst* inst) { - FPThreeOp64(code, reg_alloc, block, inst, &XEmitter::MULSD); + FPThreeOp64(code, reg_alloc, block, inst, &Xbyak::CodeGenerator::mulsd); } void EmitX64::EmitFPSqrt32(IR::Block& block, IR::Inst* inst) { - FPTwoOp32(code, reg_alloc, block, inst, &XEmitter::SQRTSS); + FPTwoOp32(code, reg_alloc, block, inst, &Xbyak::CodeGenerator::sqrtss); } void EmitX64::EmitFPSqrt64(IR::Block& block, IR::Inst* inst) { - FPTwoOp64(code, reg_alloc, block, inst, &XEmitter::SQRTSD); + FPTwoOp64(code, reg_alloc, block, inst, &Xbyak::CodeGenerator::sqrtsd); } void EmitX64::EmitFPSub32(IR::Block& block, IR::Inst* inst) { - FPThreeOp32(code, reg_alloc, block, inst, &XEmitter::SUBSS); + FPThreeOp32(code, reg_alloc, block, inst, &Xbyak::CodeGenerator::subss); } void EmitX64::EmitFPSub64(IR::Block& block, IR::Inst* inst) { - FPThreeOp64(code, reg_alloc, block, inst, &XEmitter::SUBSD); + FPThreeOp64(code, reg_alloc, block, inst, &Xbyak::CodeGenerator::subsd); } void EmitX64::EmitFPSingleToDouble(IR::Block& block, IR::Inst* inst) { IR::Value a = inst->GetArg(0); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm result = reg_alloc.UseDefXmm(a, inst); + Xbyak::Reg64 gpr_scratch = reg_alloc.ScratchGpr(); if (block.location.FPSCR().FTZ()) { - DenormalsAreZero32(code, result, gpr_scratch); + DenormalsAreZero32(code, result, gpr_scratch.cvt32()); } - code->CVTSS2SD(result, R(result)); + code->cvtss2sd(result, result); if (block.location.FPSCR().FTZ()) { FlushToZero64(code, result, gpr_scratch); } @@ -1418,15 +1523,15 @@ void EmitX64::EmitFPSingleToDouble(IR::Block& block, IR::Inst* inst) { void EmitX64::EmitFPDoubleToSingle(IR::Block& block, IR::Inst* inst) { IR::Value a = inst->GetArg(0); - X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm result = reg_alloc.UseDefXmm(a, inst); + Xbyak::Reg64 gpr_scratch = reg_alloc.ScratchGpr(); if (block.location.FPSCR().FTZ()) { DenormalsAreZero64(code, result, gpr_scratch); } - code->CVTSD2SS(result, R(result)); + code->cvtsd2ss(result, result); if (block.location.FPSCR().FTZ()) { - FlushToZero32(code, result, gpr_scratch); + FlushToZero32(code, result, gpr_scratch.cvt32()); } if (block.location.FPSCR().DN()) { DefaultNaN32(code, result); @@ -1437,9 +1542,9 @@ void EmitX64::EmitFPSingleToS32(IR::Block& block, IR::Inst* inst) { IR::Value a = inst->GetArg(0); bool round_towards_zero = inst->GetArg(1).GetU1(); - X64Reg from = reg_alloc.UseScratchRegister(a, any_xmm); - X64Reg to = reg_alloc.DefRegister(inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm from = reg_alloc.UseScratchXmm(a); + Xbyak::Xmm to = reg_alloc.DefXmm(inst); + Xbyak::Reg32 gpr_scratch = reg_alloc.ScratchGpr().cvt32(); // ARM saturates on conversion; this differs from x64 which returns a sentinel value. // Conversion to double is lossless, and allows for clamping. @@ -1447,33 +1552,33 @@ void EmitX64::EmitFPSingleToS32(IR::Block& block, IR::Inst* inst) { if (block.location.FPSCR().FTZ()) { DenormalsAreZero32(code, from, gpr_scratch); } - code->CVTSS2SD(from, R(from)); + code->cvtss2sd(from, from); // First time is to set flags if (round_towards_zero) { - code->CVTTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvttsd2si(gpr_scratch, from); // 32 bit gpr } else { - code->CVTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvtsd2si(gpr_scratch, from); // 32 bit gpr } // Clamp to output range ZeroIfNaN64(code, from); - code->MINSD(from, code->MFloatMaxS32()); - code->MAXSD(from, code->MFloatMinS32()); + code->minsd(from, code->MFloatMaxS32()); + code->maxsd(from, code->MFloatMinS32()); // Second time is for real if (round_towards_zero) { - code->CVTTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvttsd2si(gpr_scratch, from); // 32 bit gpr } else { - code->CVTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvtsd2si(gpr_scratch, from); // 32 bit gpr } - code->MOVD_xmm(to, R(gpr_scratch)); + code->movd(to, gpr_scratch); } void EmitX64::EmitFPSingleToU32(IR::Block& block, IR::Inst* inst) { IR::Value a = inst->GetArg(0); bool round_towards_zero = inst->GetArg(1).GetU1(); - X64Reg from = reg_alloc.UseScratchRegister(a, any_xmm); - X64Reg to = reg_alloc.DefRegister(inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm from = reg_alloc.UseScratchXmm(a); + Xbyak::Xmm to = reg_alloc.DefXmm(inst); + Xbyak::Reg32 gpr_scratch = reg_alloc.ScratchGpr().cvt32(); // ARM saturates on conversion; this differs from x64 which returns a sentinel value. // Conversion to double is lossless, and allows for accurate clamping. @@ -1486,47 +1591,47 @@ void EmitX64::EmitFPSingleToU32(IR::Block& block, IR::Inst* inst) { if (block.location.FPSCR().FTZ()) { DenormalsAreZero32(code, from, gpr_scratch); } - code->CVTSS2SD(from, R(from)); + code->cvtss2sd(from, from); ZeroIfNaN64(code, from); // Bring into SSE range - code->ADDSD(from, code->MFloatMinS32()); + code->addsd(from, code->MFloatMinS32()); // First time is to set flags - code->CVTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvtsd2si(gpr_scratch, from); // 32 bit gpr // Clamp to output range - code->MINSD(from, code->MFloatMaxS32()); - code->MAXSD(from, code->MFloatMinS32()); + code->minsd(from, code->MFloatMaxS32()); + code->maxsd(from, code->MFloatMinS32()); // Actually convert - code->CVTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvtsd2si(gpr_scratch, from); // 32 bit gpr // Bring back into original range - code->ADD(32, R(gpr_scratch), Imm32(2147483648u)); - code->MOVQ_xmm(to, R(gpr_scratch)); + code->add(gpr_scratch, u32(2147483648u)); + code->movd(to, gpr_scratch); } else { - X64Reg xmm_mask = reg_alloc.ScratchRegister(any_xmm); - X64Reg gpr_mask = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm xmm_mask = reg_alloc.ScratchXmm(); + Xbyak::Reg32 gpr_mask = reg_alloc.ScratchGpr().cvt32(); if (block.location.FPSCR().FTZ()) { DenormalsAreZero32(code, from, gpr_scratch); } - code->CVTSS2SD(from, R(from)); + code->cvtss2sd(from, from); ZeroIfNaN64(code, from); // Generate masks if out-of-signed-range - code->MOVAPS(xmm_mask, code->MFloatMaxS32()); - code->CMPLTSD(xmm_mask, R(from)); - code->MOVQ_xmm(R(gpr_mask), xmm_mask); - code->PAND(xmm_mask, code->MFloatMinS32()); - code->AND(32, R(gpr_mask), Imm32(2147483648u)); + code->movaps(xmm_mask, code->MFloatMaxS32()); + code->cmpltsd(xmm_mask, from); + code->movd(gpr_mask, xmm_mask); + code->pand(xmm_mask, code->MFloatMinS32()); + code->and_(gpr_mask, u32(2147483648u)); // Bring into range if necessary - code->ADDSD(from, R(xmm_mask)); + code->addsd(from, xmm_mask); // First time is to set flags - code->CVTTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvttsd2si(gpr_scratch, from); // 32 bit gpr // Clamp to output range - code->MINSD(from, code->MFloatMaxS32()); - code->MAXSD(from, code->MFloatMinU32()); + code->minsd(from, code->MFloatMaxS32()); + code->maxsd(from, code->MFloatMinU32()); // Actually convert - code->CVTTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvttsd2si(gpr_scratch, from); // 32 bit gpr // Bring back into original range if necessary - code->ADD(32, R(gpr_scratch), R(gpr_mask)); - code->MOVQ_xmm(to, R(gpr_scratch)); + code->add(gpr_scratch, gpr_mask); + code->movd(to, gpr_scratch); } } @@ -1534,42 +1639,42 @@ void EmitX64::EmitFPDoubleToS32(IR::Block& block, IR::Inst* inst) { IR::Value a = inst->GetArg(0); bool round_towards_zero = inst->GetArg(1).GetU1(); - X64Reg from = reg_alloc.UseScratchRegister(a, any_xmm); - X64Reg to = reg_alloc.DefRegister(inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm from = reg_alloc.UseScratchXmm(a); + Xbyak::Xmm to = reg_alloc.DefXmm(inst); + Xbyak::Reg32 gpr_scratch = reg_alloc.ScratchGpr().cvt32(); // ARM saturates on conversion; this differs from x64 which returns a sentinel value. if (block.location.FPSCR().FTZ()) { - DenormalsAreZero64(code, from, gpr_scratch); + DenormalsAreZero64(code, from, gpr_scratch.cvt64()); } // First time is to set flags if (round_towards_zero) { - code->CVTTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvttsd2si(gpr_scratch, from); // 32 bit gpr } else { - code->CVTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvtsd2si(gpr_scratch, from); // 32 bit gpr } // Clamp to output range ZeroIfNaN64(code, from); - code->MINSD(from, code->MFloatMaxS32()); - code->MAXSD(from, code->MFloatMinS32()); + code->minsd(from, code->MFloatMaxS32()); + code->maxsd(from, code->MFloatMinS32()); // Second time is for real if (round_towards_zero) { - code->CVTTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvttsd2si(gpr_scratch, from); // 32 bit gpr } else { - code->CVTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvtsd2si(gpr_scratch, from); // 32 bit gpr } - code->MOVD_xmm(to, R(gpr_scratch)); + code->movd(to, gpr_scratch); } void EmitX64::EmitFPDoubleToU32(IR::Block& block, IR::Inst* inst) { IR::Value a = inst->GetArg(0); bool round_towards_zero = inst->GetArg(1).GetU1(); - X64Reg from = reg_alloc.UseScratchRegister(a, any_xmm); - X64Reg to = reg_alloc.DefRegister(inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); - X64Reg xmm_scratch = reg_alloc.ScratchRegister(any_xmm); + Xbyak::Xmm from = reg_alloc.UseScratchXmm(a); + Xbyak::Xmm to = reg_alloc.DefXmm(inst); + Xbyak::Reg32 gpr_scratch = reg_alloc.ScratchGpr().cvt32(); + Xbyak::Xmm xmm_scratch = reg_alloc.ScratchXmm(); // ARM saturates on conversion; this differs from x64 which returns a sentinel value. // TODO: Use VCVTPD2UDQ when AVX512VL is available. @@ -1577,47 +1682,47 @@ void EmitX64::EmitFPDoubleToU32(IR::Block& block, IR::Inst* inst) { if (block.location.FPSCR().RMode() != Arm::FPSCR::RoundingMode::TowardsZero && !round_towards_zero) { if (block.location.FPSCR().FTZ()) { - DenormalsAreZero64(code, from, gpr_scratch); + DenormalsAreZero64(code, from, gpr_scratch.cvt64()); } ZeroIfNaN64(code, from); // Bring into SSE range - code->ADDSD(from, code->MFloatMinS32()); + code->addsd(from, code->MFloatMinS32()); // First time is to set flags - code->CVTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvtsd2si(gpr_scratch, from); // 32 bit gpr // Clamp to output range - code->MINSD(from, code->MFloatMaxS32()); - code->MAXSD(from, code->MFloatMinS32()); + code->minsd(from, code->MFloatMaxS32()); + code->maxsd(from, code->MFloatMinS32()); // Actually convert - code->CVTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvtsd2si(gpr_scratch, from); // 32 bit gpr // Bring back into original range - code->ADD(32, R(gpr_scratch), Imm32(2147483648u)); - code->MOVQ_xmm(to, R(gpr_scratch)); + code->add(gpr_scratch, u32(2147483648u)); + code->movd(to, gpr_scratch); } else { - X64Reg xmm_mask = reg_alloc.ScratchRegister(any_xmm); - X64Reg gpr_mask = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm xmm_mask = reg_alloc.ScratchXmm(); + Xbyak::Reg32 gpr_mask = reg_alloc.ScratchGpr().cvt32(); if (block.location.FPSCR().FTZ()) { - DenormalsAreZero64(code, from, gpr_scratch); + DenormalsAreZero64(code, from, gpr_scratch.cvt64()); } ZeroIfNaN64(code, from); // Generate masks if out-of-signed-range - code->MOVAPS(xmm_mask, code->MFloatMaxS32()); - code->CMPLTSD(xmm_mask, R(from)); - code->MOVQ_xmm(R(gpr_mask), xmm_mask); - code->PAND(xmm_mask, code->MFloatMinS32()); - code->AND(32, R(gpr_mask), Imm32(2147483648u)); + code->movaps(xmm_mask, code->MFloatMaxS32()); + code->cmpltsd(xmm_mask, from); + code->movd(gpr_mask, xmm_mask); + code->pand(xmm_mask, code->MFloatMinS32()); + code->and_(gpr_mask, u32(2147483648u)); // Bring into range if necessary - code->ADDSD(from, R(xmm_mask)); + code->addsd(from, xmm_mask); // First time is to set flags - code->CVTTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvttsd2si(gpr_scratch, from); // 32 bit gpr // Clamp to output range - code->MINSD(from, code->MFloatMaxS32()); - code->MAXSD(from, code->MFloatMinU32()); + code->minsd(from, code->MFloatMaxS32()); + code->maxsd(from, code->MFloatMinU32()); // Actually convert - code->CVTTSD2SI(gpr_scratch, R(from)); // 32 bit gpr + code->cvttsd2si(gpr_scratch, from); // 32 bit gpr // Bring back into original range if necessary - code->ADD(32, R(gpr_scratch), R(gpr_mask)); - code->MOVQ_xmm(to, R(gpr_scratch)); + code->add(gpr_scratch, gpr_mask); + code->movd(to, gpr_scratch); } } @@ -1626,12 +1731,12 @@ void EmitX64::EmitFPS32ToSingle(IR::Block& block, IR::Inst* inst) { bool round_to_nearest = inst->GetArg(1).GetU1(); ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); - X64Reg from = reg_alloc.UseRegister(a, any_xmm); - X64Reg to = reg_alloc.DefRegister(inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm from = reg_alloc.UseXmm(a); + Xbyak::Xmm to = reg_alloc.DefXmm(inst); + Xbyak::Reg32 gpr_scratch = reg_alloc.ScratchGpr().cvt32(); - code->MOVD_xmm(R(gpr_scratch), from); - code->CVTSI2SS(32, to, R(gpr_scratch)); + code->movd(gpr_scratch, from); + code->cvtsi2ss(to, gpr_scratch); } void EmitX64::EmitFPU32ToSingle(IR::Block& block, IR::Inst* inst) { @@ -1639,12 +1744,12 @@ void EmitX64::EmitFPU32ToSingle(IR::Block& block, IR::Inst* inst) { bool round_to_nearest = inst->GetArg(1).GetU1(); ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); - X64Reg from = reg_alloc.UseRegister(a, any_xmm); - X64Reg to = reg_alloc.DefRegister(inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm from = reg_alloc.UseXmm(a); + Xbyak::Xmm to = reg_alloc.DefXmm(inst); + Xbyak::Reg32 gpr_scratch = reg_alloc.ScratchGpr().cvt32(); - code->MOVD_xmm(R(gpr_scratch), from); - code->CVTSI2SS(64, to, R(gpr_scratch)); + code->movd(gpr_scratch, from); + code->cvtsi2ss(to, gpr_scratch); } void EmitX64::EmitFPS32ToDouble(IR::Block& block, IR::Inst* inst) { @@ -1652,12 +1757,12 @@ void EmitX64::EmitFPS32ToDouble(IR::Block& block, IR::Inst* inst) { bool round_to_nearest = inst->GetArg(1).GetU1(); ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); - X64Reg from = reg_alloc.UseRegister(a, any_xmm); - X64Reg to = reg_alloc.DefRegister(inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm from = reg_alloc.UseXmm(a); + Xbyak::Xmm to = reg_alloc.DefXmm(inst); + Xbyak::Reg32 gpr_scratch = reg_alloc.ScratchGpr().cvt32(); - code->MOVD_xmm(R(gpr_scratch), from); - code->CVTSI2SD(32, to, R(gpr_scratch)); + code->movd(gpr_scratch, from); + code->cvtsi2sd(to, gpr_scratch); } void EmitX64::EmitFPU32ToDouble(IR::Block& block, IR::Inst* inst) { @@ -1665,92 +1770,98 @@ void EmitX64::EmitFPU32ToDouble(IR::Block& block, IR::Inst* inst) { bool round_to_nearest = inst->GetArg(1).GetU1(); ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); - X64Reg from = reg_alloc.UseRegister(a, any_xmm); - X64Reg to = reg_alloc.DefRegister(inst, any_xmm); - X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); + Xbyak::Xmm from = reg_alloc.UseXmm(a); + Xbyak::Xmm to = reg_alloc.DefXmm(inst); + Xbyak::Reg32 gpr_scratch = reg_alloc.ScratchGpr().cvt32(); - code->MOVD_xmm(R(gpr_scratch), from); - code->CVTSI2SD(64, to, R(gpr_scratch)); + code->movd(gpr_scratch, from); + code->cvtsi2sd(to, gpr_scratch); } void EmitX64::EmitClearExclusive(IR::Block&, IR::Inst*) { - code->MOV(8, MDisp(R15, offsetof(JitState, exclusive_state)), Imm8(0)); + using namespace Xbyak::util; + + code->mov(code->byte[r15 + offsetof(JitState, exclusive_state)], u8(0)); } void EmitX64::EmitSetExclusive(IR::Block&, IR::Inst* inst) { - ASSERT(inst->GetArg(1).IsImmediate()); - X64Reg address = reg_alloc.UseRegister(inst->GetArg(0), any_gpr); + using namespace Xbyak::util; - code->MOV(8, MDisp(R15, offsetof(JitState, exclusive_state)), Imm8(1)); - code->MOV(32, MDisp(R15, offsetof(JitState, exclusive_address)), R(address)); + ASSERT(inst->GetArg(1).IsImmediate()); + Xbyak::Reg32 address = reg_alloc.UseGpr(inst->GetArg(0)).cvt32(); + + code->mov(code->byte[r15 + offsetof(JitState, exclusive_state)], u8(1)); + code->mov(dword[r15 + offsetof(JitState, exclusive_address)], address); } void EmitX64::EmitReadMemory8(IR::Block&, IR::Inst* inst) { reg_alloc.HostCall(inst, inst->GetArg(0)); - code->ABI_CallFunction(reinterpret_cast(cb.MemoryRead8)); + code->CallFunction(reinterpret_cast(cb.MemoryRead8)); } void EmitX64::EmitReadMemory16(IR::Block&, IR::Inst* inst) { reg_alloc.HostCall(inst, inst->GetArg(0)); - code->ABI_CallFunction(reinterpret_cast(cb.MemoryRead16)); + code->CallFunction(reinterpret_cast(cb.MemoryRead16)); } void EmitX64::EmitReadMemory32(IR::Block&, IR::Inst* inst) { reg_alloc.HostCall(inst, inst->GetArg(0)); - code->ABI_CallFunction(reinterpret_cast(cb.MemoryRead32)); + code->CallFunction(reinterpret_cast(cb.MemoryRead32)); } void EmitX64::EmitReadMemory64(IR::Block&, IR::Inst* inst) { reg_alloc.HostCall(inst, inst->GetArg(0)); - code->ABI_CallFunction(reinterpret_cast(cb.MemoryRead64)); + code->CallFunction(reinterpret_cast(cb.MemoryRead64)); } void EmitX64::EmitWriteMemory8(IR::Block&, IR::Inst* inst) { reg_alloc.HostCall(nullptr, inst->GetArg(0), inst->GetArg(1)); - code->ABI_CallFunction(reinterpret_cast(cb.MemoryWrite8)); + code->CallFunction(reinterpret_cast(cb.MemoryWrite8)); } void EmitX64::EmitWriteMemory16(IR::Block&, IR::Inst* inst) { reg_alloc.HostCall(nullptr, inst->GetArg(0), inst->GetArg(1)); - code->ABI_CallFunction(reinterpret_cast(cb.MemoryWrite16)); + code->CallFunction(reinterpret_cast(cb.MemoryWrite16)); } void EmitX64::EmitWriteMemory32(IR::Block&, IR::Inst* inst) { reg_alloc.HostCall(nullptr, inst->GetArg(0), inst->GetArg(1)); - code->ABI_CallFunction(reinterpret_cast(cb.MemoryWrite32)); + code->CallFunction(reinterpret_cast(cb.MemoryWrite32)); } void EmitX64::EmitWriteMemory64(IR::Block&, IR::Inst* inst) { reg_alloc.HostCall(nullptr, inst->GetArg(0), inst->GetArg(1)); - code->ABI_CallFunction(reinterpret_cast(cb.MemoryWrite64)); + code->CallFunction(reinterpret_cast(cb.MemoryWrite64)); } static void ExclusiveWrite(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* inst, void* fn) { - reg_alloc.HostCall(nullptr, inst->GetArg(0), inst->GetArg(1)); - X64Reg passed = reg_alloc.DefRegister(inst, any_gpr); - X64Reg tmp = ABI_RETURN; // Use one of the unusued HostCall registers. + using namespace Xbyak::util; + Xbyak::Label end; - code->MOV(32, R(passed), Imm32(1)); - code->CMP(8, MDisp(R15, offsetof(JitState, exclusive_state)), Imm8(0)); - auto fail1_fixup = code->J_CC(CC_E); - code->MOV(32, R(tmp), R(ABI_PARAM1)); - code->XOR(32, R(tmp), MDisp(R15, offsetof(JitState, exclusive_address))); - code->TEST(32, R(tmp), Imm32(JitState::RESERVATION_GRANULE_MASK)); - auto fail2_fixup = code->J_CC(CC_NE); - code->MOV(8, MDisp(R15, offsetof(JitState, exclusive_state)), Imm8(0)); - code->ABI_CallFunction(fn); - code->XOR(32, R(passed), R(passed)); - code->SetJumpTarget(fail1_fixup); - code->SetJumpTarget(fail2_fixup); + reg_alloc.HostCall(nullptr, inst->GetArg(0), inst->GetArg(1)); + Xbyak::Reg32 passed = reg_alloc.DefGpr(inst).cvt32(); + Xbyak::Reg32 tmp = code->ABI_RETURN.cvt32(); // Use one of the unusued HostCall registers. + + code->mov(passed, u32(1)); + code->cmp(code->byte[r15 + offsetof(JitState, exclusive_state)], u8(0)); + code->je(end); + code->mov(tmp, code->ABI_PARAM1); + code->xor_(tmp, dword[r15 + offsetof(JitState, exclusive_address)]); + code->test(tmp, JitState::RESERVATION_GRANULE_MASK); + code->jne(end); + code->mov(code->byte[r15 + offsetof(JitState, exclusive_state)], u8(0)); + code->CallFunction(fn); + code->xor_(passed, passed); + code->L(end); } void EmitX64::EmitExclusiveWriteMemory8(IR::Block&, IR::Inst* inst) { @@ -1766,162 +1877,141 @@ void EmitX64::EmitExclusiveWriteMemory32(IR::Block&, IR::Inst* inst) { } void EmitX64::EmitExclusiveWriteMemory64(IR::Block&, IR::Inst* inst) { - reg_alloc.HostCall(nullptr, inst->GetArg(0), inst->GetArg(1)); - X64Reg passed = reg_alloc.DefRegister(inst, any_gpr); - X64Reg value_hi = reg_alloc.UseScratchRegister(inst->GetArg(2), any_gpr); - X64Reg value = ABI_PARAM2; - X64Reg tmp = ABI_RETURN; // Use one of the unusued HostCall registers. + using namespace Xbyak::util; + Xbyak::Label end; - code->MOV(32, R(passed), Imm32(1)); - code->CMP(8, MDisp(R15, offsetof(JitState, exclusive_state)), Imm8(0)); - auto fail1_fixup = code->J_CC(CC_E); - code->MOV(32, R(tmp), R(ABI_PARAM1)); - code->XOR(32, R(tmp), MDisp(R15, offsetof(JitState, exclusive_address))); - code->TEST(32, R(tmp), Imm32(JitState::RESERVATION_GRANULE_MASK)); - auto fail2_fixup = code->J_CC(CC_NE); - code->MOV(8, MDisp(R15, offsetof(JitState, exclusive_state)), Imm8(0)); - code->MOVZX(64, 32, value, R(value)); - code->SHL(64, R(value_hi), Imm8(32)); - code->OR(64, R(value), R(value_hi)); - code->ABI_CallFunction(reinterpret_cast(cb.MemoryWrite64)); - code->XOR(32, R(passed), R(passed)); - code->SetJumpTarget(fail1_fixup); - code->SetJumpTarget(fail2_fixup); + reg_alloc.HostCall(nullptr, inst->GetArg(0), inst->GetArg(1)); + Xbyak::Reg32 passed = reg_alloc.DefGpr(inst).cvt32(); + Xbyak::Reg64 value_hi = reg_alloc.UseScratchGpr(inst->GetArg(2)); + Xbyak::Reg64 value = code->ABI_PARAM2; + Xbyak::Reg32 tmp = code->ABI_RETURN.cvt32(); // Use one of the unusued HostCall registers. + + code->mov(passed, u32(1)); + code->cmp(code->byte[r15 + offsetof(JitState, exclusive_state)], u8(0)); + code->je(end); + code->mov(tmp, code->ABI_PARAM1); + code->xor_(tmp, dword[r15 + offsetof(JitState, exclusive_address)]); + code->test(tmp, JitState::RESERVATION_GRANULE_MASK); + code->jne(end); + code->mov(code->byte[r15 + offsetof(JitState, exclusive_state)], u8(0)); + code->mov(value.cvt32(), value.cvt32()); // zero extend to 64-bits + code->shl(value_hi, 32); + code->or_(value, value_hi); + code->CallFunction(reinterpret_cast(cb.MemoryWrite64)); + code->xor_(passed, passed); + code->L(end); } void EmitX64::EmitAddCycles(size_t cycles) { + using namespace Xbyak::util; ASSERT(cycles < std::numeric_limits::max()); - code->SUB(64, MDisp(R15, offsetof(JitState, cycles_remaining)), Imm32(static_cast(cycles))); + code->sub(qword[r15 + offsetof(JitState, cycles_remaining)], static_cast(cycles)); } -static CCFlags EmitCond(BlockOfCode* code, Arm::Cond cond) { - // TODO: This code is a quick copy-paste-and-quickly-modify job from a previous JIT. Clean this up. +static Xbyak::Label EmitCond(BlockOfCode* code, Arm::Cond cond) { + using namespace Xbyak::util; - auto NFlag = [code](X64Reg reg){ - code->MOV(32, R(reg), MJitStateCpsr()); - code->SHR(32, R(reg), Imm8(31)); - code->AND(32, R(reg), Imm32(1)); - }; + Xbyak::Label label; - auto ZFlag = [code](X64Reg reg){ - code->MOV(32, R(reg), MJitStateCpsr()); - code->SHR(32, R(reg), Imm8(30)); - code->AND(32, R(reg), Imm32(1)); - }; + const Xbyak::Reg32 cpsr = eax; + code->mov(cpsr, MJitStateCpsr()); - auto CFlag = [code](X64Reg reg){ - code->MOV(32, R(reg), MJitStateCpsr()); - code->SHR(32, R(reg), Imm8(29)); - code->AND(32, R(reg), Imm32(1)); - }; - - auto VFlag = [code](X64Reg reg){ - code->MOV(32, R(reg), MJitStateCpsr()); - code->SHR(32, R(reg), Imm8(28)); - code->AND(32, R(reg), Imm32(1)); - }; - - CCFlags cc; + constexpr size_t n_shift = 31; + constexpr size_t z_shift = 30; + constexpr size_t c_shift = 29; + constexpr size_t v_shift = 28; + constexpr u32 n_mask = 1u << n_shift; + constexpr u32 z_mask = 1u << z_shift; + constexpr u32 c_mask = 1u << c_shift; + constexpr u32 v_mask = 1u << v_shift; switch (cond) { case Arm::Cond::EQ: //z - ZFlag(RAX); - code->CMP(8, R(RAX), Imm8(0)); - cc = CC_NE; + code->test(cpsr, z_mask); + code->jnz(label); break; case Arm::Cond::NE: //!z - ZFlag(RAX); - code->CMP(8, R(RAX), Imm8(0)); - cc = CC_E; + code->test(cpsr, z_mask); + code->jz(label); break; case Arm::Cond::CS: //c - CFlag(RBX); - code->CMP(8, R(RBX), Imm8(0)); - cc = CC_NE; + code->test(cpsr, c_mask); + code->jnz(label); break; case Arm::Cond::CC: //!c - CFlag(RBX); - code->CMP(8, R(RBX), Imm8(0)); - cc = CC_E; + code->test(cpsr, c_mask); + code->jz(label); break; case Arm::Cond::MI: //n - NFlag(RCX); - code->CMP(8, R(RCX), Imm8(0)); - cc = CC_NE; + code->test(cpsr, n_mask); + code->jnz(label); break; case Arm::Cond::PL: //!n - NFlag(RCX); - code->CMP(8, R(RCX), Imm8(0)); - cc = CC_E; + code->test(cpsr, n_mask); + code->jz(label); break; case Arm::Cond::VS: //v - VFlag(RDX); - code->CMP(8, R(RDX), Imm8(0)); - cc = CC_NE; + code->test(cpsr, v_mask); + code->jnz(label); break; case Arm::Cond::VC: //!v - VFlag(RDX); - code->CMP(8, R(RDX), Imm8(0)); - cc = CC_E; + code->test(cpsr, v_mask); + code->jz(label); break; case Arm::Cond::HI: { //c & !z - const X64Reg tmp = RSI; - ZFlag(RAX); - code->MOVZX(64, 8, tmp, R(RAX)); - CFlag(RBX); - code->CMP(8, R(RBX), R(tmp)); - cc = CC_A; + code->and_(cpsr, z_mask | c_mask); + code->cmp(cpsr, c_mask); + code->je(label); break; } case Arm::Cond::LS: { //!c | z - const X64Reg tmp = RSI; - ZFlag(RAX); - code->MOVZX(64, 8, tmp, R(RAX)); - CFlag(RBX); - code->CMP(8, R(RBX), R(tmp)); - cc = CC_BE; + code->and_(cpsr, z_mask | c_mask); + code->cmp(cpsr, c_mask); + code->jne(label); break; } case Arm::Cond::GE: { // n == v - const X64Reg tmp = RSI; - VFlag(RDX); - code->MOVZX(64, 8, tmp, R(RDX)); - NFlag(RCX); - code->CMP(8, R(RCX), R(tmp)); - cc = CC_E; + code->and_(cpsr, n_mask | v_mask); + code->jz(label); + code->cmp(cpsr, n_mask | v_mask); + code->je(label); break; } case Arm::Cond::LT: { // n != v - const X64Reg tmp = RSI; - VFlag(RDX); - code->MOVZX(64, 8, tmp, R(RDX)); - NFlag(RCX); - code->CMP(8, R(RCX), R(tmp)); - cc = CC_NE; + Xbyak::Label fail; + code->and_(cpsr, n_mask | v_mask); + code->jz(fail); + code->cmp(cpsr, n_mask | v_mask); + code->jne(label); + code->L(fail); break; } case Arm::Cond::GT: { // !z & (n == v) - const X64Reg tmp = RSI; - NFlag(RCX); - code->MOVZX(64, 8, tmp, R(RCX)); - VFlag(RDX); - code->XOR(8, R(tmp), R(RDX)); - ZFlag(RAX); - code->OR(8, R(tmp), R(RAX)); - code->TEST(8, R(tmp), R(tmp)); - cc = CC_Z; + const Xbyak::Reg32 tmp1 = ebx; + const Xbyak::Reg32 tmp2 = esi; + code->mov(tmp1, cpsr); + code->mov(tmp2, cpsr); + code->shr(tmp1, n_shift); + code->shr(tmp2, v_shift); + code->shr(cpsr, z_shift); + code->xor_(tmp1, tmp2); + code->or_(tmp1, cpsr); + code->test(tmp1, 1); + code->jz(label); break; } case Arm::Cond::LE: { // z | (n != v) - X64Reg tmp = RSI; - NFlag(RCX); - code->MOVZX(64, 8, tmp, R(RCX)); - VFlag(RDX); - code->XOR(8, R(tmp), R(RDX)); - ZFlag(RAX); - code->OR(8, R(tmp), R(RAX)); - code->TEST(8, R(tmp), R(tmp)); - cc = CC_NZ; + const Xbyak::Reg32 tmp1 = ebx; + const Xbyak::Reg32 tmp2 = esi; + code->mov(tmp1, cpsr); + code->mov(tmp2, cpsr); + code->shr(tmp1, n_shift); + code->shr(tmp2, v_shift); + code->shr(cpsr, z_shift); + code->xor_(tmp1, tmp2); + code->or_(tmp1, cpsr); + code->test(tmp1, 1); + code->jnz(label); break; } default: @@ -1929,7 +2019,7 @@ static CCFlags EmitCond(BlockOfCode* code, Arm::Cond cond) { break; } - return cc; + return label; } void EmitX64::EmitCondPrelude(const IR::Block& block) { @@ -1940,13 +2030,10 @@ void EmitX64::EmitCondPrelude(const IR::Block& block) { ASSERT(block.cond_failed.is_initialized()); - CCFlags cc = EmitCond(code, block.cond); - - // TODO: Improve, maybe. - auto fixup = code->J_CC(cc, true); + Xbyak::Label pass = EmitCond(code, block.cond); EmitAddCycles(block.cond_failed_cycle_count); EmitTerminalLinkBlock(IR::Term::LinkBlock{block.cond_failed.get()}, block.location); - code->SetJumpTarget(fixup); + code->L(pass); } void EmitX64::EmitTerminal(IR::Terminal terminal, Arm::LocationDescriptor initial_location) { @@ -1982,11 +2069,11 @@ void EmitX64::EmitTerminalInterpret(IR::Term::Interpret terminal, Arm::LocationD ASSERT_MSG(terminal.next.TFlag() == initial_location.TFlag(), "Unimplemented"); ASSERT_MSG(terminal.next.EFlag() == initial_location.EFlag(), "Unimplemented"); - code->MOV(64, R(ABI_PARAM1), Imm64(terminal.next.PC())); - code->MOV(64, R(ABI_PARAM2), Imm64(reinterpret_cast(jit_interface))); - code->MOV(32, MJitStateReg(Arm::Reg::PC), R(ABI_PARAM1)); + code->mov(code->ABI_PARAM1.cvt32(), terminal.next.PC()); + code->mov(code->ABI_PARAM2, reinterpret_cast(jit_interface)); + code->mov(MJitStateReg(Arm::Reg::PC), code->ABI_PARAM1.cvt32()); code->SwitchMxcsrOnExit(); - code->ABI_CallFunction(reinterpret_cast(cb.InterpreterFallback)); + code->CallFunction(reinterpret_cast(cb.InterpreterFallback)); code->ReturnFromRunCode(false); // TODO: Check cycles } @@ -1995,110 +2082,123 @@ void EmitX64::EmitTerminalReturnToDispatch(IR::Term::ReturnToDispatch, Arm::Loca } void EmitX64::EmitTerminalLinkBlock(IR::Term::LinkBlock terminal, Arm::LocationDescriptor initial_location) { + using namespace Xbyak::util; + if (terminal.next.TFlag() != initial_location.TFlag()) { if (terminal.next.TFlag()) { - code->OR(32, MJitStateCpsr(), Imm32(1 << 5)); + code->or_(MJitStateCpsr(), u32(1 << 5)); } else { - code->AND(32, MJitStateCpsr(), Imm32(~(1 << 5))); + code->and_(MJitStateCpsr(), u32(~(1 << 5))); } } if (terminal.next.EFlag() != initial_location.EFlag()) { if (terminal.next.EFlag()) { - code->OR(32, MJitStateCpsr(), Imm32(1 << 9)); + code->or_(MJitStateCpsr(), u32(1 << 9)); } else { - code->AND(32, MJitStateCpsr(), Imm32(~(1 << 9))); + code->and_(MJitStateCpsr(), u32(~(1 << 9))); } } - code->CMP(64, MDisp(R15, offsetof(JitState, cycles_remaining)), Imm32(0)); + code->cmp(qword[r15 + offsetof(JitState, cycles_remaining)], 0); - patch_jg_locations[terminal.next].emplace_back(code->GetWritableCodePtr()); + CodePtr patch_location = code->getCurr(); + patch_jg_locations[terminal.next].emplace_back(patch_location); if (auto next_bb = GetBasicBlock(terminal.next)) { - code->J_CC(CC_G, next_bb->code_ptr, true); - } else { - code->NOP(6); // Leave enough space for a jg instruction. + code->jg(next_bb->code_ptr); } - code->MOV(32, MJitStateReg(Arm::Reg::PC), Imm32(terminal.next.PC())); + code->EnsurePatchLocationSize(patch_location, 6); + + code->mov(MJitStateReg(Arm::Reg::PC), terminal.next.PC()); code->ReturnFromRunCode(); // TODO: Check cycles, Properly do a link } void EmitX64::EmitTerminalLinkBlockFast(IR::Term::LinkBlockFast terminal, Arm::LocationDescriptor initial_location) { + using namespace Xbyak::util; + if (terminal.next.TFlag() != initial_location.TFlag()) { if (terminal.next.TFlag()) { - code->OR(32, MJitStateCpsr(), Imm32(1 << 5)); + code->or_(MJitStateCpsr(), u32(1 << 5)); } else { - code->AND(32, MJitStateCpsr(), Imm32(~(1 << 5))); + code->and_(MJitStateCpsr(), u32(~(1 << 5))); } } if (terminal.next.EFlag() != initial_location.EFlag()) { if (terminal.next.EFlag()) { - code->OR(32, MJitStateCpsr(), Imm32(1 << 9)); + code->or_(MJitStateCpsr(), u32(1 << 9)); } else { - code->AND(32, MJitStateCpsr(), Imm32(~(1 << 9))); + code->and_(MJitStateCpsr(), u32(~(1 << 9))); } } - patch_jmp_locations[terminal.next].emplace_back(code->GetWritableCodePtr()); + CodePtr patch_location = code->getCurr(); + patch_jmp_locations[terminal.next].emplace_back(patch_location); if (auto next_bb = GetBasicBlock(terminal.next)) { - code->JMP(next_bb->code_ptr, true); + code->jmp(next_bb->code_ptr); + code->EnsurePatchLocationSize(patch_location, 5); } else { - code->MOV(32, MJitStateReg(Arm::Reg::PC), Imm32(terminal.next.PC())); - code->JMP(code->GetReturnFromRunCodeAddress(), true); + code->mov(MJitStateReg(Arm::Reg::PC), terminal.next.PC()); + code->jmp(code->GetReturnFromRunCodeAddress()); + code->nop(3); } } void EmitX64::EmitTerminalPopRSBHint(IR::Term::PopRSBHint, Arm::LocationDescriptor initial_location) { - // This calculation has to match up with IREmitter::PushRSB - code->MOV(32, R(RBX), MJitStateCpsr()); - code->MOVZX(64, 32, RCX, MJitStateReg(Arm::Reg::PC)); - code->AND(32, R(RBX), Imm32((1 << 5) | (1 << 9))); - code->SHR(32, R(RBX), Imm8(2)); - code->OR(32, R(RBX), MDisp(R15, offsetof(JitState, guest_FPSCR_mode))); - code->SHL(64, R(RBX), Imm8(32)); - code->OR(64, R(RBX), R(RCX)); + using namespace Xbyak::util; - code->MOV(64, R(RAX), Imm64(u64(code->GetReturnFromRunCodeAddress()))); + // This calculation has to match up with IREmitter::PushRSB + code->mov(ebx, MJitStateCpsr()); + code->mov(ecx, MJitStateReg(Arm::Reg::PC)); + code->and_(ebx, u32((1 << 5) | (1 << 9))); + code->shr(ebx, 2); + code->or_(ebx, dword[r15 + offsetof(JitState, guest_FPSCR_mode)]); + code->shl(rbx, 32); + code->or_(rbx, rcx); + + code->mov(rax, u64(code->GetReturnFromRunCodeAddress())); for (size_t i = 0; i < JitState::RSBSize; ++i) { - code->CMP(64, R(RBX), MDisp(R15, int(offsetof(JitState, rsb_location_descriptors) + i * sizeof(u64)))); - code->CMOVcc(64, RAX, MDisp(R15, int(offsetof(JitState, rsb_codeptrs) + i * sizeof(u64))), CC_E); + code->cmp(rbx, qword[r15 + offsetof(JitState, rsb_location_descriptors) + i * sizeof(u64)]); + code->cmove(rax, qword[r15 + offsetof(JitState, rsb_codeptrs) + i * sizeof(u64)]); } - code->JMPptr(R(RAX)); + code->jmp(rax); } void EmitX64::EmitTerminalIf(IR::Term::If terminal, Arm::LocationDescriptor initial_location) { - CCFlags cc = EmitCond(code, terminal.if_); - auto fixup = code->J_CC(cc, true); + Xbyak::Label pass = EmitCond(code, terminal.if_); EmitTerminal(terminal.else_, initial_location); - code->SetJumpTarget(fixup); + code->L(pass); EmitTerminal(terminal.then_, initial_location); } void EmitX64::EmitTerminalCheckHalt(IR::Term::CheckHalt terminal, Arm::LocationDescriptor initial_location) { - code->CMP(8, MDisp(R15, offsetof(JitState, halt_requested)), Imm8(0)); - code->J_CC(CC_NE, code->GetReturnFromRunCodeAddress(), true); + using namespace Xbyak::util; + + code->cmp(code->byte[r15 + offsetof(JitState, halt_requested)], u8(0)); + code->jne(code->GetReturnFromRunCodeAddress()); EmitTerminal(terminal.else_, initial_location); } void EmitX64::Patch(Arm::LocationDescriptor desc, CodePtr bb) { - u8* const save_code_ptr = code->GetWritableCodePtr(); + using namespace Xbyak::util; + + const CodePtr save_code_ptr = code->getCurr(); for (CodePtr location : patch_jg_locations[desc]) { - code->SetCodePtr(const_cast(location)); - code->J_CC(CC_G, bb, true); - ASSERT(code->GetCodePtr() - location == 6); + code->SetCodePtr(location); + code->jg(bb); + code->EnsurePatchLocationSize(location, 6); } for (CodePtr location : patch_jmp_locations[desc]) { - code->SetCodePtr(const_cast(location)); - code->JMP(bb, true); - ASSERT(code->GetCodePtr() - location == 5); + code->SetCodePtr(location); + code->jmp(bb); + code->EnsurePatchLocationSize(location, 5); } for (CodePtr location : patch_unique_hash_locations[desc.UniqueHash()]) { - code->SetCodePtr(const_cast(location)); - code->MOV(64, R(RCX), Imm64(u64(bb))); - ASSERT((code->GetCodePtr() - location) == 10); + code->SetCodePtr(location); + code->mov(rcx, u64(bb)); + code->EnsurePatchLocationSize(location, 10); } code->SetCodePtr(save_code_ptr); diff --git a/src/backend_x64/emit_x64.h b/src/backend_x64/emit_x64.h index 8f05e51f..b53014bf 100644 --- a/src/backend_x64/emit_x64.h +++ b/src/backend_x64/emit_x64.h @@ -11,10 +11,10 @@ #include #include +#include #include "backend_x64/block_of_code.h" #include "backend_x64/reg_alloc.h" -#include "common/x64/emitter.h" #include "frontend/arm_types.h" #include "frontend/ir/basic_block.h" #include "frontend/ir/microinstruction.h" diff --git a/src/backend_x64/hostloc.cpp b/src/backend_x64/hostloc.cpp new file mode 100644 index 00000000..3de11ca3 --- /dev/null +++ b/src/backend_x64/hostloc.cpp @@ -0,0 +1,35 @@ +/* This file is part of the dynarmic project. + * Copyright (c) 2016 MerryMage + * This software may be used and distributed according to the terms of the GNU + * General Public License version 2 or any later version. + */ + +#include "backend_x64/hostloc.h" + +namespace Dynarmic { +namespace BackendX64 { + +Xbyak::Reg64 HostLocToReg64(HostLoc loc) { + DEBUG_ASSERT(HostLocIsGPR(loc)); + DEBUG_ASSERT(loc != HostLoc::RSP); + DEBUG_ASSERT(loc != HostLoc::R15); + return Xbyak::Reg64(static_cast(loc)); +} + +Xbyak::Xmm HostLocToXmm(HostLoc loc) { + DEBUG_ASSERT(HostLocIsXMM(loc)); + return Xbyak::Xmm(static_cast(loc) - static_cast(HostLoc::XMM0)); +} + +Xbyak::Address SpillToOpArg(HostLoc loc) { + using namespace Xbyak::util; + + static_assert(std::is_same::value, "Spill must be u64"); + DEBUG_ASSERT(HostLocIsSpill(loc)); + + size_t i = static_cast(loc) - static_cast(HostLoc::FirstSpill); + return qword[r15 + offsetof(JitState, Spill) + i * sizeof(u64)]; +} + +} // namespace BackendX64 +} // namespace Dynarmic diff --git a/src/backend_x64/hostloc.h b/src/backend_x64/hostloc.h new file mode 100644 index 00000000..26aeed75 --- /dev/null +++ b/src/backend_x64/hostloc.h @@ -0,0 +1,98 @@ +/* This file is part of the dynarmic project. + * Copyright (c) 2016 MerryMage + * This software may be used and distributed according to the terms of the GNU + * General Public License version 2 or any later version. + */ +#pragma once + +#include + +#include "backend_x64/jitstate.h" +#include "common/assert.h" +#include "common/common_types.h" + +namespace Dynarmic { +namespace BackendX64 { + +enum class HostLoc { + // Ordering of the registers is intentional. See also: HostLocToX64. + RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15, + XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, + XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, + CF, PF, AF, ZF, SF, OF, + FirstSpill, +}; + +constexpr size_t HostLocCount = static_cast(HostLoc::FirstSpill) + SpillCount; + +inline bool HostLocIsGPR(HostLoc reg) { + return reg >= HostLoc::RAX && reg <= HostLoc::R14; +} + +inline bool HostLocIsXMM(HostLoc reg) { + return reg >= HostLoc::XMM0 && reg <= HostLoc::XMM15; +} + +inline bool HostLocIsRegister(HostLoc reg) { + return HostLocIsGPR(reg) || HostLocIsXMM(reg); +} + +inline bool HostLocIsFlag(HostLoc reg) { + return reg >= HostLoc::CF && reg <= HostLoc::OF; +} + +inline HostLoc HostLocSpill(size_t i) { + ASSERT_MSG(i < SpillCount, "Invalid spill"); + return static_cast(static_cast(HostLoc::FirstSpill) + i); +} + +inline bool HostLocIsSpill(HostLoc reg) { + return reg >= HostLoc::FirstSpill && reg <= HostLocSpill(SpillCount - 1); +} + +using HostLocList = std::initializer_list; + +// RSP is preserved for function calls +// R15 contains the JitState pointer +const HostLocList any_gpr = { + HostLoc::RAX, + HostLoc::RBX, + HostLoc::RCX, + HostLoc::RDX, + HostLoc::RSI, + HostLoc::RDI, + HostLoc::RBP, + HostLoc::R8, + HostLoc::R9, + HostLoc::R10, + HostLoc::R11, + HostLoc::R12, + HostLoc::R13, + HostLoc::R14, +}; + +const HostLocList any_xmm = { + HostLoc::XMM0, + HostLoc::XMM1, + HostLoc::XMM2, + HostLoc::XMM3, + HostLoc::XMM4, + HostLoc::XMM5, + HostLoc::XMM6, + HostLoc::XMM7, + HostLoc::XMM8, + HostLoc::XMM9, + HostLoc::XMM10, + HostLoc::XMM11, + HostLoc::XMM12, + HostLoc::XMM13, + HostLoc::XMM14, + HostLoc::XMM15, +}; + +Xbyak::Reg64 HostLocToReg64(HostLoc loc); +Xbyak::Xmm HostLocToXmm(HostLoc loc); +Xbyak::Address SpillToOpArg(HostLoc loc); + +} // namespace BackendX64 +} // namespace Dynarmic diff --git a/src/backend_x64/jitstate.h b/src/backend_x64/jitstate.h index 7df16ea6..48ee9902 100644 --- a/src/backend_x64/jitstate.h +++ b/src/backend_x64/jitstate.h @@ -15,7 +15,7 @@ namespace BackendX64 { class BlockOfCode; -constexpr size_t SpillCount = 32; +constexpr size_t SpillCount = 64; struct JitState { explicit JitState(const BlockOfCode* code) { ResetRSB(code); } @@ -54,7 +54,7 @@ struct JitState { void SetFpscr(u32 FPSCR); }; -using CodePtr = const u8*; +using CodePtr = const void*; } // namespace BackendX64 } // namespace Dynarmic diff --git a/src/backend_x64/reg_alloc.cpp b/src/backend_x64/reg_alloc.cpp index 82e32446..0b346695 100644 --- a/src/backend_x64/reg_alloc.cpp +++ b/src/backend_x64/reg_alloc.cpp @@ -7,52 +7,42 @@ #include #include +#include + +#include "backend_x64/jitstate.h" #include "backend_x64/reg_alloc.h" #include "common/assert.h" -#include "common/x64/emitter.h" namespace Dynarmic { namespace BackendX64 { -static Gen::OpArg ImmediateToOpArg(const IR::Value& imm) { +static u32 ImmediateToU32(const IR::Value& imm) { switch (imm.GetType()) { case IR::Type::U1: - return Gen::Imm32(imm.GetU1()); + return u32(imm.GetU1()); break; case IR::Type::U8: - return Gen::Imm32(imm.GetU8()); + return u32(imm.GetU8()); break; case IR::Type::U32: - return Gen::Imm32(imm.GetU32()); + return u32(imm.GetU32()); break; default: ASSERT_MSG(false, "This should never happen."); } } -static Gen::X64Reg HostLocToX64(HostLoc loc) { - DEBUG_ASSERT(HostLocIsRegister(loc)); - DEBUG_ASSERT(loc != HostLoc::RSP); - // HostLoc is ordered such that the numbers line up. - if (HostLocIsGPR(loc)) { - return static_cast(loc); +static Xbyak::Reg HostLocToX64(HostLoc hostloc) { + if (HostLocIsGPR(hostloc)) { + return HostLocToReg64(hostloc); } - if (HostLocIsXMM(loc)) { - return static_cast(size_t(loc) - size_t(HostLoc::XMM0)); + if (HostLocIsXMM(hostloc)) { + return HostLocToXmm(hostloc); } ASSERT_MSG(false, "This should never happen."); - return Gen::INVALID_REG; } -static Gen::OpArg SpillToOpArg(HostLoc loc) { - static_assert(std::is_same::value, "Spill must be u64"); - DEBUG_ASSERT(HostLocIsSpill(loc)); - - size_t i = static_cast(loc) - static_cast(HostLoc::FirstSpill); - return Gen::MDisp(Gen::R15, static_cast(offsetof(JitState, Spill) + i * sizeof(u64))); -} - -Gen::X64Reg RegAlloc::DefRegister(IR::Inst* def_inst, HostLocList desired_locations) { +HostLoc RegAlloc::DefHostLocReg(IR::Inst* def_inst, HostLocList desired_locations) { DEBUG_ASSERT(std::all_of(desired_locations.begin(), desired_locations.end(), HostLocIsRegister)); DEBUG_ASSERT_MSG(!ValueLocation(def_inst), "def_inst has already been defined"); @@ -66,14 +56,14 @@ Gen::X64Reg RegAlloc::DefRegister(IR::Inst* def_inst, HostLocList desired_locati LocInfo(location).def = def_inst; DEBUG_ASSERT(LocInfo(location).IsDef()); - return HostLocToX64(location); + return location; } void RegAlloc::RegisterAddDef(IR::Inst* def_inst, const IR::Value& use_inst) { DEBUG_ASSERT_MSG(!ValueLocation(def_inst), "def_inst has already been defined"); if (use_inst.IsImmediate()) { - LoadImmediateIntoRegister(use_inst, DefRegister(def_inst, any_gpr)); + LoadImmediateIntoHostLocReg(use_inst, DefHostLocReg(def_inst, any_gpr)); return; } @@ -84,15 +74,15 @@ void RegAlloc::RegisterAddDef(IR::Inst* def_inst, const IR::Value& use_inst) { DEBUG_ASSERT(LocInfo(location).IsIdle()); } -Gen::X64Reg RegAlloc::UseDefRegister(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations) { +HostLoc RegAlloc::UseDefHostLocReg(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations) { if (!use_value.IsImmediate()) { - return UseDefRegister(use_value.GetInst(), def_inst, desired_locations); + return UseDefHostLocReg(use_value.GetInst(), def_inst, desired_locations); } - return LoadImmediateIntoRegister(use_value, DefRegister(def_inst, desired_locations)); + return LoadImmediateIntoHostLocReg(use_value, DefHostLocReg(def_inst, desired_locations)); } -Gen::X64Reg RegAlloc::UseDefRegister(IR::Inst* use_inst, IR::Inst* def_inst, HostLocList desired_locations) { +HostLoc RegAlloc::UseDefHostLocReg(IR::Inst* use_inst, IR::Inst* def_inst, HostLocList desired_locations) { DEBUG_ASSERT(std::all_of(desired_locations.begin(), desired_locations.end(), HostLocIsRegister)); DEBUG_ASSERT_MSG(!ValueLocation(def_inst), "def_inst has already been defined"); DEBUG_ASSERT_MSG(ValueLocation(use_inst), "use_inst has not been defined"); @@ -112,9 +102,9 @@ Gen::X64Reg RegAlloc::UseDefRegister(IR::Inst* use_inst, IR::Inst* def_inst, Hos EmitMove(new_location, current_location); LocInfo(new_location) = LocInfo(current_location); LocInfo(current_location) = {}; - return HostLocToX64(new_location); + return new_location; } else { - return HostLocToX64(current_location); + return current_location; } } } @@ -122,17 +112,17 @@ Gen::X64Reg RegAlloc::UseDefRegister(IR::Inst* use_inst, IR::Inst* def_inst, Hos bool is_floating_point = HostLocIsXMM(*desired_locations.begin()); if (is_floating_point) DEBUG_ASSERT(use_inst->GetType() == IR::Type::F32 || use_inst->GetType() == IR::Type::F64); - Gen::X64Reg use_reg = UseRegister(use_inst, is_floating_point ? any_xmm : any_gpr); - Gen::X64Reg def_reg = DefRegister(def_inst, desired_locations); + HostLoc use_reg = UseHostLocReg(use_inst, is_floating_point ? any_xmm : any_gpr); + HostLoc def_reg = DefHostLocReg(def_inst, desired_locations); if (is_floating_point) { - code->MOVAPD(def_reg, Gen::R(use_reg)); + code->movapd(HostLocToXmm(def_reg), HostLocToXmm(use_reg)); } else { - code->MOV(64, Gen::R(def_reg), Gen::R(use_reg)); + code->mov(HostLocToReg64(def_reg), HostLocToReg64(use_reg)); } return def_reg; } -std::tuple RegAlloc::UseDefOpArg(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations) { +std::tuple RegAlloc::UseDefOpArgHostLocReg(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations) { DEBUG_ASSERT(std::all_of(desired_locations.begin(), desired_locations.end(), HostLocIsRegister)); DEBUG_ASSERT_MSG(!ValueLocation(def_inst), "def_inst has already been defined"); DEBUG_ASSERT_MSG(use_value.IsImmediate() || ValueLocation(use_value.GetInst()), "use_inst has not been defined"); @@ -147,37 +137,37 @@ std::tuple RegAlloc::UseDefOpArg(IR::Value use_value, I if (HostLocIsSpill(current_location)) { loc_info.is_being_used = true; DEBUG_ASSERT(loc_info.IsUse()); - return std::make_tuple(SpillToOpArg(current_location), DefRegister(def_inst, desired_locations)); + return std::make_tuple(SpillToOpArg(current_location), DefHostLocReg(def_inst, desired_locations)); } else { loc_info.is_being_used = true; loc_info.def = def_inst; DEBUG_ASSERT(loc_info.IsUseDef()); - return std::make_tuple(Gen::R(HostLocToX64(current_location)), HostLocToX64(current_location)); + return std::make_tuple(HostLocToX64(current_location), current_location); } } } } - Gen::OpArg use_oparg = UseOpArg(use_value, any_gpr); - Gen::X64Reg def_reg = DefRegister(def_inst, desired_locations); + OpArg use_oparg = UseOpArg(use_value, any_gpr); + HostLoc def_reg = DefHostLocReg(def_inst, desired_locations); return std::make_tuple(use_oparg, def_reg); } -Gen::X64Reg RegAlloc::UseRegister(IR::Value use_value, HostLocList desired_locations) { +HostLoc RegAlloc::UseHostLocReg(IR::Value use_value, HostLocList desired_locations) { if (!use_value.IsImmediate()) { - return UseRegister(use_value.GetInst(), desired_locations); + return UseHostLocReg(use_value.GetInst(), desired_locations); } - return LoadImmediateIntoRegister(use_value, ScratchRegister(desired_locations)); + return LoadImmediateIntoHostLocReg(use_value, ScratchHostLocReg(desired_locations)); } -Gen::X64Reg RegAlloc::UseRegister(IR::Inst* use_inst, HostLocList desired_locations) { +HostLoc RegAlloc::UseHostLocReg(IR::Inst* use_inst, HostLocList desired_locations) { HostLoc current_location; bool was_being_used; std::tie(current_location, was_being_used) = UseHostLoc(use_inst, desired_locations); if (HostLocIsRegister(current_location)) { - return HostLocToX64(current_location); + return current_location; } else if (HostLocIsSpill(current_location)) { HostLoc new_location = SelectARegister(desired_locations); if (IsRegisterOccupied(new_location)) { @@ -192,16 +182,15 @@ Gen::X64Reg RegAlloc::UseRegister(IR::Inst* use_inst, HostLocList desired_locati LocInfo(new_location).is_being_used = true; DEBUG_ASSERT(LocInfo(new_location).IsScratch()); } - return HostLocToX64(new_location); + return new_location; } ASSERT_MSG(false, "Unknown current_location type"); - return Gen::INVALID_REG; } -Gen::OpArg RegAlloc::UseOpArg(IR::Value use_value, HostLocList desired_locations) { +OpArg RegAlloc::UseOpArg(IR::Value use_value, HostLocList desired_locations) { if (use_value.IsImmediate()) { - return ImmediateToOpArg(use_value); + return Xbyak::Operand(); // return a None } IR::Inst* use_inst = use_value.GetInst(); @@ -211,24 +200,23 @@ Gen::OpArg RegAlloc::UseOpArg(IR::Value use_value, HostLocList desired_locations std::tie(current_location, was_being_used) = UseHostLoc(use_inst, desired_locations); if (HostLocIsRegister(current_location)) { - return Gen::R(HostLocToX64(current_location)); + return HostLocToX64(current_location); } else if (HostLocIsSpill(current_location)) { return SpillToOpArg(current_location); } ASSERT_MSG(false, "Unknown current_location type"); - return Gen::R(Gen::INVALID_REG); } -Gen::X64Reg RegAlloc::UseScratchRegister(IR::Value use_value, HostLocList desired_locations) { +HostLoc RegAlloc::UseScratchHostLocReg(IR::Value use_value, HostLocList desired_locations) { if (!use_value.IsImmediate()) { - return UseScratchRegister(use_value.GetInst(), desired_locations); + return UseScratchHostLocReg(use_value.GetInst(), desired_locations); } - return LoadImmediateIntoRegister(use_value, ScratchRegister(desired_locations)); + return LoadImmediateIntoHostLocReg(use_value, ScratchHostLocReg(desired_locations)); } -Gen::X64Reg RegAlloc::UseScratchRegister(IR::Inst* use_inst, HostLocList desired_locations) { +HostLoc RegAlloc::UseScratchHostLocReg(IR::Inst* use_inst, HostLocList desired_locations) { DEBUG_ASSERT(std::all_of(desired_locations.begin(), desired_locations.end(), HostLocIsRegister)); DEBUG_ASSERT_MSG(ValueLocation(use_inst), "use_inst has not been defined"); ASSERT_MSG(use_inst->HasUses(), "use_inst ran out of uses. (Use-d an IR::Inst* too many times)"); @@ -244,7 +232,7 @@ Gen::X64Reg RegAlloc::UseScratchRegister(IR::Inst* use_inst, HostLocList desired LocInfo(new_location).is_being_used = true; DecrementRemainingUses(use_inst); DEBUG_ASSERT(LocInfo(new_location).IsScratch()); - return HostLocToX64(new_location); + return new_location; } else if (HostLocIsRegister(current_location)) { ASSERT(LocInfo(current_location).IsIdle() || LocInfo(current_location).IsUse() @@ -260,14 +248,13 @@ Gen::X64Reg RegAlloc::UseScratchRegister(IR::Inst* use_inst, HostLocList desired LocInfo(new_location).values.clear(); DecrementRemainingUses(use_inst); DEBUG_ASSERT(LocInfo(new_location).IsScratch()); - return HostLocToX64(new_location); + return new_location; } - ASSERT_MSG(0, "Invalid current_location"); - return Gen::INVALID_REG; + ASSERT_MSG(false, "Invalid current_location"); } -Gen::X64Reg RegAlloc::ScratchRegister(HostLocList desired_locations) { +HostLoc RegAlloc::ScratchHostLocReg(HostLocList desired_locations) { DEBUG_ASSERT(std::all_of(desired_locations.begin(), desired_locations.end(), HostLocIsRegister)); HostLoc location = SelectARegister(desired_locations); @@ -280,7 +267,7 @@ Gen::X64Reg RegAlloc::ScratchRegister(HostLocList desired_locations) { LocInfo(location).is_being_used = true; DEBUG_ASSERT(LocInfo(location).IsScratch()); - return HostLocToX64(location); + return location; } void RegAlloc::HostCall(IR::Inst* result_def, IR::Value arg0_use, IR::Value arg1_use, IR::Value arg2_use, IR::Value arg3_use) { @@ -300,26 +287,26 @@ void RegAlloc::HostCall(IR::Inst* result_def, IR::Value arg0_use, IR::Value arg1 // TODO: This works but almost certainly leads to suboptimal generated code. for (HostLoc caller_save : OtherCallerSave) { - ScratchRegister({caller_save}); + ScratchHostLocReg({caller_save}); } if (result_def) { - DefRegister(result_def, {AbiReturn}); + DefHostLocReg(result_def, {AbiReturn}); } else { - ScratchRegister({AbiReturn}); + ScratchHostLocReg({AbiReturn}); } for (size_t i = 0; i < AbiArgs.size(); i++) { if (!args[i]->IsEmpty()) { - UseScratchRegister(*args[i], {AbiArgs[i]}); + UseScratchHostLocReg(*args[i], {AbiArgs[i]}); } else { - ScratchRegister({AbiArgs[i]}); + ScratchHostLocReg({AbiArgs[i]}); } } // Flush all xmm registers for (auto xmm : any_xmm) { - ScratchRegister({xmm}); + ScratchHostLocReg({xmm}); } } @@ -420,17 +407,17 @@ void RegAlloc::Reset() { void RegAlloc::EmitMove(HostLoc to, HostLoc from) { if (HostLocIsXMM(to) && HostLocIsSpill(from)) { - code->MOVSD(HostLocToX64(to), SpillToOpArg(from)); + code->movsd(HostLocToXmm(to), SpillToOpArg(from)); } else if (HostLocIsSpill(to) && HostLocIsXMM(from)) { - code->MOVSD(SpillToOpArg(to), HostLocToX64(from)); + code->movsd(SpillToOpArg(to), HostLocToXmm(from)); } else if (HostLocIsXMM(to) && HostLocIsXMM(from)) { - code->MOVAPS(HostLocToX64(to), Gen::R(HostLocToX64(from))); + code->movaps(HostLocToXmm(to), HostLocToXmm(from)); } else if (HostLocIsGPR(to) && HostLocIsSpill(from)) { - code->MOV(64, Gen::R(HostLocToX64(to)), SpillToOpArg(from)); + code->mov(HostLocToReg64(to), SpillToOpArg(from)); } else if (HostLocIsSpill(to) && HostLocIsGPR(from)) { - code->MOV(64, SpillToOpArg(to), Gen::R(HostLocToX64(from))); + code->mov(SpillToOpArg(to), HostLocToReg64(from)); } else if (HostLocIsGPR(to) && HostLocIsGPR(from)){ - code->MOV(64, Gen::R(HostLocToX64(to)), Gen::R(HostLocToX64(from))); + code->mov(HostLocToReg64(to), HostLocToReg64(from)); } else { ASSERT_MSG(false, "Invalid RegAlloc::EmitMove"); } @@ -438,7 +425,7 @@ void RegAlloc::EmitMove(HostLoc to, HostLoc from) { void RegAlloc::EmitExchange(HostLoc a, HostLoc b) { if (HostLocIsGPR(a) && HostLocIsGPR(b)) { - code->XCHG(64, Gen::R(HostLocToX64(a)), Gen::R(HostLocToX64(b))); + code->xchg(HostLocToReg64(a), HostLocToReg64(b)); } else if (HostLocIsXMM(a) && HostLocIsXMM(b)) { ASSERT_MSG(false, "Exchange is unnecessary for XMM registers"); } else { @@ -495,14 +482,17 @@ std::tuple RegAlloc::UseHostLoc(IR::Inst* use_inst, HostLocList d return std::make_tuple(static_cast(-1), false); } -Gen::X64Reg RegAlloc::LoadImmediateIntoRegister(IR::Value imm, Gen::X64Reg reg) { +HostLoc RegAlloc::LoadImmediateIntoHostLocReg(IR::Value imm, HostLoc host_loc) { ASSERT_MSG(imm.IsImmediate(), "imm is not an immediate"); - Gen::OpArg op_arg = ImmediateToOpArg(imm); - if (op_arg.GetImmValue() == 0) - code->XOR(32, Gen::R(reg), Gen::R(reg)); + + Xbyak::Reg64 reg = HostLocToReg64(host_loc); + + u32 imm_value = ImmediateToU32(imm); + if (imm_value == 0) + code->xor_(reg, reg); else - code->MOV(32, Gen::R(reg), op_arg); - return reg; + code->mov(reg.cvt32(), imm_value); + return host_loc; } } // namespace BackendX64 diff --git a/src/backend_x64/reg_alloc.h b/src/backend_x64/reg_alloc.h index a48e71af..9123e7c1 100644 --- a/src/backend_x64/reg_alloc.h +++ b/src/backend_x64/reg_alloc.h @@ -11,93 +11,55 @@ #include #include +#include #include "backend_x64/block_of_code.h" +#include "backend_x64/hostloc.h" #include "backend_x64/jitstate.h" #include "common/common_types.h" -#include "common/x64/emitter.h" #include "frontend/ir/microinstruction.h" #include "frontend/ir/value.h" namespace Dynarmic { namespace BackendX64 { -enum class HostLoc { - // Ordering of the registers is intentional. See also: HostLocToX64. - RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, - XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, - XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, - CF, PF, AF, ZF, SF, OF, - FirstSpill, -}; +struct OpArg { + OpArg() : type(OPERAND), inner_operand() {} + OpArg(const Xbyak::Address& address) : type(ADDRESS), inner_address(address) {} + OpArg(const Xbyak::Operand& operand) : type(OPERAND), inner_operand(operand) {} -constexpr size_t HostLocCount = static_cast(HostLoc::FirstSpill) + SpillCount; + Xbyak::Operand& operator*() { + switch (type) { + case ADDRESS: + return inner_address; + case OPERAND: + return inner_operand; + } + ASSERT_MSG(false, "Unreachable"); + } -enum class HostLocState { - Idle, Def, Use, Scratch -}; + void setBit(int bits) { + switch (type) { + case ADDRESS: + inner_address.setBit(bits); + return; + case OPERAND: + inner_operand.setBit(bits); + return; + } + ASSERT_MSG(false, "Unreachable"); + } -inline bool HostLocIsGPR(HostLoc reg) { - return reg >= HostLoc::RAX && reg <= HostLoc::R14; -} +private: + enum { + OPERAND, + ADDRESS, + } type; -inline bool HostLocIsXMM(HostLoc reg) { - return reg >= HostLoc::XMM0 && reg <= HostLoc::XMM15; -} - -inline bool HostLocIsRegister(HostLoc reg) { - return HostLocIsGPR(reg) || HostLocIsXMM(reg); -} - -inline bool HostLocIsFlag(HostLoc reg) { - return reg >= HostLoc::CF && reg <= HostLoc::OF; -} - -inline HostLoc HostLocSpill(size_t i) { - ASSERT_MSG(i < SpillCount, "Invalid spill"); - return static_cast(static_cast(HostLoc::FirstSpill) + i); -} - -inline bool HostLocIsSpill(HostLoc reg) { - return reg >= HostLoc::FirstSpill && reg <= HostLocSpill(SpillCount - 1); -} - -using HostLocList = std::initializer_list; - -const HostLocList any_gpr = { - HostLoc::RAX, - HostLoc::RBX, - HostLoc::RCX, - HostLoc::RDX, - HostLoc::RSI, - HostLoc::RDI, - HostLoc::RBP, - HostLoc::R8, - HostLoc::R9, - HostLoc::R10, - HostLoc::R11, - HostLoc::R12, - HostLoc::R13, - HostLoc::R14, -}; - -const HostLocList any_xmm = { - HostLoc::XMM0, - HostLoc::XMM1, - HostLoc::XMM2, - HostLoc::XMM3, - HostLoc::XMM4, - HostLoc::XMM5, - HostLoc::XMM6, - HostLoc::XMM7, - HostLoc::XMM8, - HostLoc::XMM9, - HostLoc::XMM10, - HostLoc::XMM11, - HostLoc::XMM12, - HostLoc::XMM13, - HostLoc::XMM14, - HostLoc::XMM15, + union { + Xbyak::Operand inner_operand; + Xbyak::Address inner_address; + }; }; class RegAlloc final { @@ -105,21 +67,54 @@ public: RegAlloc(BlockOfCode* code) : code(code) {} /// Late-def - Gen::X64Reg DefRegister(IR::Inst* def_inst, HostLocList desired_locations); + Xbyak::Reg64 DefGpr(IR::Inst* def_inst, HostLocList desired_locations = any_gpr) { + return HostLocToReg64(DefHostLocReg(def_inst, desired_locations)); + } + Xbyak::Xmm DefXmm(IR::Inst* def_inst, HostLocList desired_locations = any_xmm) { + return HostLocToXmm(DefHostLocReg(def_inst, desired_locations)); + } void RegisterAddDef(IR::Inst* def_inst, const IR::Value& use_inst); /// Early-use, Late-def - Gen::X64Reg UseDefRegister(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations); - Gen::X64Reg UseDefRegister(IR::Inst* use_inst, IR::Inst* def_inst, HostLocList desired_locations); - std::tuple UseDefOpArg(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations); + Xbyak::Reg64 UseDefGpr(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations = any_gpr) { + return HostLocToReg64(UseDefHostLocReg(use_value, def_inst, desired_locations)); + } + Xbyak::Xmm UseDefXmm(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations = any_xmm) { + return HostLocToXmm(UseDefHostLocReg(use_value, def_inst, desired_locations)); + } + std::tuple UseDefOpArgGpr(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations = any_gpr) { + OpArg op; + HostLoc host_loc; + std::tie(op, host_loc) = UseDefOpArgHostLocReg(use_value, def_inst, desired_locations); + return std::make_tuple(op, HostLocToReg64(host_loc)); + } + std::tuple UseDefOpArgXmm(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations = any_gpr) { + OpArg op; + HostLoc host_loc; + std::tie(op, host_loc) = UseDefOpArgHostLocReg(use_value, def_inst, desired_locations); + return std::make_tuple(op, HostLocToXmm(host_loc)); + } /// Early-use - Gen::X64Reg UseRegister(IR::Value use_value, HostLocList desired_locations); - Gen::X64Reg UseRegister(IR::Inst* use_inst, HostLocList desired_locations); - Gen::OpArg UseOpArg(IR::Value use_value, HostLocList desired_locations); + Xbyak::Reg64 UseGpr(IR::Value use_value, HostLocList desired_locations = any_gpr) { + return HostLocToReg64(UseHostLocReg(use_value, desired_locations)); + } + Xbyak::Xmm UseXmm(IR::Value use_value, HostLocList desired_locations = any_xmm) { + return HostLocToXmm(UseHostLocReg(use_value, desired_locations)); + } + OpArg UseOpArg(IR::Value use_value, HostLocList desired_locations); /// Early-use, Destroyed - Gen::X64Reg UseScratchRegister(IR::Value use_value, HostLocList desired_locations); - Gen::X64Reg UseScratchRegister(IR::Inst* use_inst, HostLocList desired_locations); + Xbyak::Reg64 UseScratchGpr(IR::Value use_value, HostLocList desired_locations = any_gpr) { + return HostLocToReg64(UseScratchHostLocReg(use_value, desired_locations)); + } + Xbyak::Xmm UseScratchXmm(IR::Value use_value, HostLocList desired_locations = any_xmm) { + return HostLocToXmm(UseScratchHostLocReg(use_value, desired_locations)); + } /// Early-def, Late-use, single-use - Gen::X64Reg ScratchRegister(HostLocList desired_locations); + Xbyak::Reg64 ScratchGpr(HostLocList desired_locations = any_gpr) { + return HostLocToReg64(ScratchHostLocReg(desired_locations)); + } + Xbyak::Xmm ScratchXmm(HostLocList desired_locations = any_xmm) { + return HostLocToXmm(ScratchHostLocReg(desired_locations)); + } /// Late-def for result register, Early-use for all arguments, Each value is placed into registers according to host ABI. void HostCall(IR::Inst* result_def = nullptr, IR::Value arg0_use = {}, IR::Value arg1_use = {}, IR::Value arg2_use = {}, IR::Value arg3_use = {}); @@ -141,11 +136,20 @@ private: bool IsRegisterAllocated(HostLoc loc) const; bool IsLastUse(IR::Inst* inst) const; + HostLoc DefHostLocReg(IR::Inst* def_inst, HostLocList desired_locations); + HostLoc UseDefHostLocReg(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations); + HostLoc UseDefHostLocReg(IR::Inst* use_inst, IR::Inst* def_inst, HostLocList desired_locations); + std::tuple UseDefOpArgHostLocReg(IR::Value use_value, IR::Inst* def_inst, HostLocList desired_locations); + HostLoc UseHostLocReg(IR::Value use_value, HostLocList desired_locations); + HostLoc UseHostLocReg(IR::Inst* use_inst, HostLocList desired_locations); std::tuple UseHostLoc(IR::Inst* use_inst, HostLocList desired_locations); + HostLoc UseScratchHostLocReg(IR::Value use_value, HostLocList desired_locations); + HostLoc UseScratchHostLocReg(IR::Inst* use_inst, HostLocList desired_locations); + HostLoc ScratchHostLocReg(HostLocList desired_locations); void EmitMove(HostLoc to, HostLoc from); void EmitExchange(HostLoc a, HostLoc b); - Gen::X64Reg LoadImmediateIntoRegister(IR::Value imm, Gen::X64Reg reg); + HostLoc LoadImmediateIntoHostLocReg(IR::Value imm, HostLoc reg); void SpillRegister(HostLoc loc); HostLoc FindFreeSpill() const; diff --git a/src/common/bit_set.h b/src/common/bit_set.h deleted file mode 100644 index 4ccbb402..00000000 --- a/src/common/bit_set.h +++ /dev/null @@ -1,190 +0,0 @@ -// This file is under the public domain. - -#pragma once - -#include -#ifdef _WIN32 -#include -#endif -#include -#include -#include -#include "common/common_types.h" - -// namespace avoids conflict with OS X Carbon; don't use BitSet directly -namespace Common { - -// Helper functions: - -#ifdef _WIN32 -template -static inline int CountSetBits(T v) -{ - // from https://graphics.stanford.edu/~seander/bithacks.html - // GCC has this built in, but MSVC's intrinsic will only emit the actual - // POPCNT instruction, which we're not depending on - v = v - ((v >> 1) & (T)~(T)0/3); - v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); - v = (v + (v >> 4)) & (T)~(T)0/255*15; - return (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * 8; -} -static inline int LeastSignificantSetBit(uint8_t val) -{ - unsigned long index; - _BitScanForward(&index, val); - return (int)index; -} -static inline int LeastSignificantSetBit(uint16_t val) -{ - unsigned long index; - _BitScanForward(&index, val); - return (int)index; -} -static inline int LeastSignificantSetBit(uint32_t val) -{ - unsigned long index; - _BitScanForward(&index, val); - return (int)index; -} -static inline int LeastSignificantSetBit(uint64_t val) -{ - unsigned long index; - _BitScanForward64(&index, val); - return (int)index; -} -#else -static inline int CountSetBits(uint8_t val) { return __builtin_popcount(val); } -static inline int CountSetBits(uint16_t val) { return __builtin_popcount(val); } -static inline int CountSetBits(uint32_t val) { return __builtin_popcount(val); } -static inline int CountSetBits(uint64_t val) { return __builtin_popcountll(val); } -static inline int LeastSignificantSetBit(uint8_t val) { return __builtin_ctz(val); } -static inline int LeastSignificantSetBit(uint16_t val) { return __builtin_ctz(val); } -static inline int LeastSignificantSetBit(uint32_t val) { return __builtin_ctz(val); } -static inline int LeastSignificantSetBit(uint64_t val) { return __builtin_ctzll(val); } -#endif - -// Similar to std::bitset, this is a class which encapsulates a bitset, i.e. -// using the set bits of an integer to represent a set of integers. Like that -// class, it acts like an array of bools: -// BitSet32 bs; -// bs[1] = true; -// but also like the underlying integer ([0] = least significant bit): -// BitSet32 bs2 = ...; -// bs = (bs ^ bs2) & BitSet32(0xffff); -// The following additional functionality is provided: -// - Construction using an initializer list. -// BitSet bs { 1, 2, 4, 8 }; -// - Efficiently iterating through the set bits: -// for (int i : bs) -// [i is the *index* of a set bit] -// (This uses the appropriate CPU instruction to find the next set bit in one -// operation.) -// - Counting set bits using .Count() - see comment on that method. - -// TODO: use constexpr when MSVC gets out of the Dark Ages - -template -class BitSet -{ - static_assert(!std::is_signed::value, "BitSet should not be used with signed types"); -public: - // A reference to a particular bit, returned from operator[]. - class Ref - { - public: - Ref(Ref&& other) : m_bs(other.m_bs), m_mask(other.m_mask) {} - Ref(BitSet* bs, IntTy mask) : m_bs(bs), m_mask(mask) {} - operator bool() const { return (m_bs->m_val & m_mask) != 0; } - bool operator=(bool set) - { - m_bs->m_val = (m_bs->m_val & ~m_mask) | (set ? m_mask : 0); - return set; - } - private: - BitSet* m_bs; - IntTy m_mask; - }; - - // A STL-like iterator is required to be able to use range-based for loops. - class Iterator - { - public: - Iterator(const Iterator& other) : m_val(other.m_val), m_bit(other.m_bit) {} - Iterator(IntTy val, int bit) : m_val(val), m_bit(bit) {} - Iterator& operator=(Iterator other) { new (this) Iterator(other); return *this; } - int operator*() { return m_bit; } - Iterator& operator++() - { - if (m_val == 0) - { - m_bit = -1; - } - else - { - int bit = LeastSignificantSetBit(m_val); - m_val &= ~(1 << bit); - m_bit = bit; - } - return *this; - } - Iterator operator++(int _) - { - Iterator other(*this); - ++*this; - return other; - } - bool operator==(Iterator other) const { return m_bit == other.m_bit; } - bool operator!=(Iterator other) const { return m_bit != other.m_bit; } - private: - IntTy m_val; - int m_bit; - }; - - BitSet() : m_val(0) {} - explicit BitSet(IntTy val) : m_val(val) {} - BitSet(std::initializer_list init) - { - m_val = 0; - for (int bit : init) - m_val |= (IntTy)1 << bit; - } - - static BitSet AllTrue(size_t count) - { - return BitSet(count == sizeof(IntTy)*8 ? ~(IntTy)0 : (((IntTy)1 << count) - 1)); - } - - Ref operator[](size_t bit) { return Ref(this, (IntTy)1 << bit); } - const Ref operator[](size_t bit) const { return (*const_cast(this))[bit]; } - bool operator==(BitSet other) const { return m_val == other.m_val; } - bool operator!=(BitSet other) const { return m_val != other.m_val; } - bool operator<(BitSet other) const { return m_val < other.m_val; } - bool operator>(BitSet other) const { return m_val > other.m_val; } - BitSet operator|(BitSet other) const { return BitSet(m_val | other.m_val); } - BitSet operator&(BitSet other) const { return BitSet(m_val & other.m_val); } - BitSet operator^(BitSet other) const { return BitSet(m_val ^ other.m_val); } - BitSet operator~() const { return BitSet(~m_val); } - BitSet& operator|=(BitSet other) { return *this = *this | other; } - BitSet& operator&=(BitSet other) { return *this = *this & other; } - BitSet& operator^=(BitSet other) { return *this = *this ^ other; } - operator uint32_t() = delete; - operator bool() { return m_val != 0; } - - // Warning: Even though on modern CPUs this is a single fast instruction, - // Dolphin's official builds do not currently assume POPCNT support on x86, - // so slower explicit bit twiddling is generated. Still should generally - // be faster than a loop. - unsigned int Count() const { return CountSetBits(m_val); } - - Iterator begin() const { Iterator it(m_val, 0); return ++it; } - Iterator end() const { return Iterator(m_val, -1); } - - IntTy m_val; -}; - -} // Common - -typedef Common::BitSet BitSet8; -typedef Common::BitSet BitSet16; -typedef Common::BitSet BitSet32; -typedef Common::BitSet BitSet64; diff --git a/src/common/code_block.h b/src/common/code_block.h deleted file mode 100644 index b1c8eb55..00000000 --- a/src/common/code_block.h +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2013 Dolphin Emulator Project -// Licensed under GPLv2 -// Refer to the license.txt file included. - -#pragma once - -#include - -#include "common/common_types.h" -#include "common/memory_util.h" - -// Everything that needs to generate code should inherit from this. -// You get memory management for free, plus, you can use all emitter functions without -// having to prefix them with gen-> or something similar. -// Example implementation: -// class JIT : public CodeBlock {} -template class CodeBlock : public T -{ -private: - // A privately used function to set the executable RAM space to something invalid. - // For debugging usefulness it should be used to set the RAM to a host specific breakpoint instruction - virtual void PoisonMemory() = 0; - -protected: - u8 *region; - size_t region_size; - -public: - CodeBlock() : region(nullptr), region_size(0) {} - virtual ~CodeBlock() { if (region) FreeCodeSpace(); } - - CodeBlock(const CodeBlock&) = delete; - CodeBlock& operator=(const CodeBlock&) = delete; - - // Call this before you generate any code. - void AllocCodeSpace(int size) - { - region_size = size; - region = (u8*)AllocateExecutableMemory(region_size); - T::SetCodePtr(region); - } - - // Always clear code space with breakpoints, so that if someone accidentally executes - // uninitialized, it just breaks into the debugger. - void ClearCodeSpace() - { - PoisonMemory(); - ResetCodePtr(); - } - - // Call this when shutting down. Don't rely on the destructor, even though it'll do the job. - void FreeCodeSpace() - { -#ifdef __SYMBIAN32__ - ResetExecutableMemory(region); -#else - FreeMemoryPages(region, region_size); -#endif - region = nullptr; - region_size = 0; - } - - bool IsInSpace(const u8 *ptr) - { - return (ptr >= region) && (ptr < (region + region_size)); - } - - // Cannot currently be undone. Will write protect the entire code region. - // Start over if you need to change the code (call FreeCodeSpace(), AllocCodeSpace()). - void WriteProtect() - { - WriteProtectMemory(region, region_size, true); - } - - void ResetCodePtr() - { - T::SetCodePtr(region); - } - - size_t GetSpaceLeft() const - { - return region_size - (T::GetCodePtr() - region); - } - - u8 *GetBasePtr() { - return region; - } - - size_t GetOffset(const u8 *ptr) const { - return ptr - region; - } -}; diff --git a/src/common/iterator_util.h b/src/common/iterator_util.h new file mode 100644 index 00000000..ebce315a --- /dev/null +++ b/src/common/iterator_util.h @@ -0,0 +1,39 @@ +/* This file is part of the dynarmic project. + * Copyright (c) 2016 MerryMage + * This software may be used and distributed according to the terms of the GNU + * General Public License version 2 or any later version. + */ + +#pragma once + +#include + +namespace Dynarmic { +namespace Common { + +namespace detail { + +template +struct ReverseAdapter { + T& iterable; + + auto begin() { + using namespace std; + return rbegin(iterable); + } + + auto end() { + using namespace std; + return rend(iterable); + } +}; + +} // namespace detail + +template +detail::ReverseAdapter Reverse(T&& iterable) { + return detail::ReverseAdapter{iterable}; +} + +} // namespace Common +} // namespace Dynarmic diff --git a/src/common/memory_util.cpp b/src/common/memory_util.cpp deleted file mode 100644 index 55ea11c7..00000000 --- a/src/common/memory_util.cpp +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include "common/assert.h" -#include "common/memory_util.h" - -#ifdef _WIN32 - #include - #include - #include "common/string_util.h" -#else - #include - #include - #include -#endif - -#if !defined(_WIN32) && defined(ARCHITECTURE_X64) && !defined(MAP_32BIT) -#include -#define PAGE_MASK (getpagesize() - 1) -#define round_page(x) ((((unsigned long)(x)) + PAGE_MASK) & ~(PAGE_MASK)) -#endif - -// Generic function to get last error message. -// Call directly after the command or use the error num. -// This function might change the error code. -const char* GetLastErrorMsg() -{ - static const size_t buff_size = 255; - -#ifdef _WIN32 - static thread_local char err_str[buff_size] = {}; - - FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, nullptr, GetLastError(), - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - err_str, buff_size, nullptr); -#else - static __thread char err_str[buff_size] = {}; - - // Thread safe (XSI-compliant) - strerror_r(errno, err_str, buff_size); -#endif - - return err_str; -} - - -// This is purposely not a full wrapper for virtualalloc/mmap, but it -// provides exactly the primitive operations that Dolphin needs. - -void* AllocateExecutableMemory(size_t size, bool low) -{ -#if defined(_WIN32) - void* ptr = VirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE); -#else - static char* map_hint = nullptr; -#if defined(ARCHITECTURE_X64) && !defined(MAP_32BIT) - // This OS has no flag to enforce allocation below the 4 GB boundary, - // but if we hint that we want a low address it is very likely we will - // get one. - // An older version of this code used MAP_FIXED, but that has the side - // effect of discarding already mapped pages that happen to be in the - // requested virtual memory range (such as the emulated RAM, sometimes). - if (low && (!map_hint)) - map_hint = (char*)round_page(512*1024*1024); /* 0.5 GB rounded up to the next page */ -#endif - void* ptr = mmap(map_hint, size, PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_ANON | MAP_PRIVATE -#if defined(ARCHITECTURE_X64) && defined(MAP_32BIT) - | (low ? MAP_32BIT : 0) -#endif - , -1, 0); -#endif /* defined(_WIN32) */ - -#ifdef _WIN32 - if (ptr == nullptr) - { -#else - if (ptr == MAP_FAILED) - { - ptr = nullptr; -#endif - ASSERT_MSG(false, "Failed to allocate executable memory"); - } -#if !defined(_WIN32) && defined(ARCHITECTURE_X64) && !defined(MAP_32BIT) - else - { - if (low) - { - map_hint += size; - map_hint = (char*)round_page(map_hint); /* round up to the next page */ - } - } -#endif - -#if EMU_ARCH_BITS == 64 - if ((u64)ptr >= 0x80000000 && low == true) - ASSERT_MSG(false, "Executable memory ended up above 2GB!"); -#endif - - return ptr; -} - -void* AllocateMemoryPages(size_t size) -{ -#ifdef _WIN32 - void* ptr = VirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE); -#else - void* ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); - - if (ptr == MAP_FAILED) - ptr = nullptr; -#endif - - if (ptr == nullptr) - ASSERT_MSG(false, "Failed to allocate raw memory"); - - return ptr; -} - -void* AllocateAlignedMemory(size_t size,size_t alignment) -{ -#ifdef _WIN32 - void* ptr = _aligned_malloc(size,alignment); -#else - void* ptr = nullptr; -#ifdef ANDROID - ptr = memalign(alignment, size); -#else - if (posix_memalign(&ptr, alignment, size) != 0) - ASSERT_MSG(false, "Failed to allocate aligned memory"); -#endif -#endif - - if (ptr == nullptr) - ASSERT_MSG(false, "Failed to allocate aligned memory"); - - return ptr; -} - -void FreeMemoryPages(void* ptr, size_t size) -{ - if (ptr) - { -#ifdef _WIN32 - if (!VirtualFree(ptr, 0, MEM_RELEASE)) - ASSERT_MSG(false, "FreeMemoryPages failed!\n%s", GetLastErrorMsg()); -#else - munmap(ptr, size); -#endif - } -} - -void FreeAlignedMemory(void* ptr) -{ - if (ptr) - { -#ifdef _WIN32 - _aligned_free(ptr); -#else - free(ptr); -#endif - } -} - -void WriteProtectMemory(void* ptr, size_t size, bool allowExecute) -{ -#ifdef _WIN32 - DWORD oldValue; - if (!VirtualProtect(ptr, size, allowExecute ? PAGE_EXECUTE_READ : PAGE_READONLY, &oldValue)) - ASSERT_MSG(false, "WriteProtectMemory failed!\n%s", GetLastErrorMsg()); -#else - mprotect(ptr, size, allowExecute ? (PROT_READ | PROT_EXEC) : PROT_READ); -#endif -} - -void UnWriteProtectMemory(void* ptr, size_t size, bool allowExecute) -{ -#ifdef _WIN32 - DWORD oldValue; - if (!VirtualProtect(ptr, size, allowExecute ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE, &oldValue)) - ASSERT_MSG(false, "UnWriteProtectMemory failed!\n%s", GetLastErrorMsg()); -#else - mprotect(ptr, size, allowExecute ? (PROT_READ | PROT_WRITE | PROT_EXEC) : PROT_WRITE | PROT_READ); -#endif -} - -std::string MemUsage() -{ - return ""; -} diff --git a/src/common/memory_util.h b/src/common/memory_util.h deleted file mode 100644 index 9bf37c44..00000000 --- a/src/common/memory_util.h +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include -#include - -void* AllocateExecutableMemory(size_t size, bool low = true); -void* AllocateMemoryPages(size_t size); -void FreeMemoryPages(void* ptr, size_t size); -void* AllocateAlignedMemory(size_t size,size_t alignment); -void FreeAlignedMemory(void* ptr); -void WriteProtectMemory(void* ptr, size_t size, bool executable = false); -void UnWriteProtectMemory(void* ptr, size_t size, bool allowExecute = false); -std::string MemUsage(); - -inline int GetPageSize() { return 4096; } diff --git a/src/common/x64/abi.cpp b/src/common/x64/abi.cpp deleted file mode 100644 index 955eb86c..00000000 --- a/src/common/x64/abi.cpp +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright (C) 2003 Dolphin Project. - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, version 2.0 or later versions. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License 2.0 for more details. - -// A copy of the GPL 2.0 should have been included with the program. -// If not, see http://www.gnu.org/licenses/ - -// Official SVN repository and contact information can be found at -// http://code.google.com/p/dolphin-emu/ - -#include "abi.h" -#include "emitter.h" - -using namespace Gen; - -// Shared code between Win64 and Unix64 - -void XEmitter::ABI_CalculateFrameSize(BitSet32 mask, size_t rsp_alignment, size_t needed_frame_size, size_t* shadowp, size_t* subtractionp, size_t* xmm_offsetp) { - size_t shadow = 0; -#if defined(_WIN32) - shadow = 0x20; -#endif - - int count = (mask & ABI_ALL_GPRS).Count(); - rsp_alignment -= count * 8; - size_t subtraction = 0; - int fpr_count = (mask & ABI_ALL_FPRS).Count(); - if (fpr_count) { - // If we have any XMMs to save, we must align the stack here. - subtraction = rsp_alignment & 0xf; - } - subtraction += 16 * fpr_count; - size_t xmm_base_subtraction = subtraction; - subtraction += needed_frame_size; - subtraction += shadow; - // Final alignment. - rsp_alignment -= subtraction; - subtraction += rsp_alignment & 0xf; - - *shadowp = shadow; - *subtractionp = subtraction; - *xmm_offsetp = subtraction - xmm_base_subtraction; -} - -size_t XEmitter::ABI_PushRegistersAndAdjustStack(BitSet32 mask, size_t rsp_alignment, size_t needed_frame_size) { - size_t shadow, subtraction, xmm_offset; - ABI_CalculateFrameSize(mask, rsp_alignment, needed_frame_size, &shadow, &subtraction, &xmm_offset); - - for (int r : mask & ABI_ALL_GPRS) - PUSH((X64Reg)r); - - if (subtraction) - SUB(64, R(RSP), subtraction >= 0x80 ? Imm32((u32)subtraction) : Imm8((u8)subtraction)); - - for (int x : mask & ABI_ALL_FPRS) { - MOVAPD(MDisp(RSP, (int)xmm_offset), (X64Reg)(x - 16)); - xmm_offset += 16; - } - - return shadow; -} - -void XEmitter::ABI_PopRegistersAndAdjustStack(BitSet32 mask, size_t rsp_alignment, size_t needed_frame_size) { - size_t shadow, subtraction, xmm_offset; - ABI_CalculateFrameSize(mask, rsp_alignment, needed_frame_size, &shadow, &subtraction, &xmm_offset); - - for (int x : mask & ABI_ALL_FPRS) { - MOVAPD((X64Reg) (x - 16), MDisp(RSP, (int)xmm_offset)); - xmm_offset += 16; - } - - if (subtraction) - ADD(64, R(RSP), subtraction >= 0x80 ? Imm32((u32)subtraction) : Imm8((u8)subtraction)); - - for (int r = 15; r >= 0; r--) { - if (mask[r]) - POP((X64Reg)r); - } -} - -// Common functions -void XEmitter::ABI_CallFunction(const void *func) { - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionC16(const void *func, u16 param1) { - MOV(32, R(ABI_PARAM1), Imm32((u32)param1)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionCC16(const void *func, u32 param1, u16 param2) { - MOV(32, R(ABI_PARAM1), Imm32(param1)); - MOV(32, R(ABI_PARAM2), Imm32((u32)param2)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionC(const void *func, u32 param1) { - MOV(32, R(ABI_PARAM1), Imm32(param1)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionCC(const void *func, u32 param1, u32 param2) { - MOV(32, R(ABI_PARAM1), Imm32(param1)); - MOV(32, R(ABI_PARAM2), Imm32(param2)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionCCC(const void *func, u32 param1, u32 param2, u32 param3) { - MOV(32, R(ABI_PARAM1), Imm32(param1)); - MOV(32, R(ABI_PARAM2), Imm32(param2)); - MOV(32, R(ABI_PARAM3), Imm32(param3)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionCCP(const void *func, u32 param1, u32 param2, void *param3) { - MOV(32, R(ABI_PARAM1), Imm32(param1)); - MOV(32, R(ABI_PARAM2), Imm32(param2)); - MOV(64, R(ABI_PARAM3), ImmPtr(param3)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionCCCP(const void *func, u32 param1, u32 param2, u32 param3, void *param4) { - MOV(32, R(ABI_PARAM1), Imm32(param1)); - MOV(32, R(ABI_PARAM2), Imm32(param2)); - MOV(32, R(ABI_PARAM3), Imm32(param3)); - MOV(64, R(ABI_PARAM4), ImmPtr(param4)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionP(const void *func, void *param1) { - MOV(64, R(ABI_PARAM1), ImmPtr(param1)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionPA(const void *func, void *param1, const Gen::OpArg &arg2) { - MOV(64, R(ABI_PARAM1), ImmPtr(param1)); - if (!arg2.IsSimpleReg(ABI_PARAM2)) - MOV(32, R(ABI_PARAM2), arg2); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionPAA(const void *func, void *param1, const Gen::OpArg &arg2, const Gen::OpArg &arg3) { - MOV(64, R(ABI_PARAM1), ImmPtr(param1)); - if (!arg2.IsSimpleReg(ABI_PARAM2)) - MOV(32, R(ABI_PARAM2), arg2); - if (!arg3.IsSimpleReg(ABI_PARAM3)) - MOV(32, R(ABI_PARAM3), arg3); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionPPC(const void *func, void *param1, void *param2, u32 param3) { - MOV(64, R(ABI_PARAM1), ImmPtr(param1)); - MOV(64, R(ABI_PARAM2), ImmPtr(param2)); - MOV(32, R(ABI_PARAM3), Imm32(param3)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -// Pass a register as a parameter. -void XEmitter::ABI_CallFunctionR(const void *func, X64Reg reg1) { - if (reg1 != ABI_PARAM1) - MOV(32, R(ABI_PARAM1), R(reg1)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -// Pass two registers as parameters. -void XEmitter::ABI_CallFunctionRR(const void *func, X64Reg reg1, X64Reg reg2) { - if (reg2 != ABI_PARAM1) { - if (reg1 != ABI_PARAM1) - MOV(64, R(ABI_PARAM1), R(reg1)); - if (reg2 != ABI_PARAM2) - MOV(64, R(ABI_PARAM2), R(reg2)); - } else { - if (reg2 != ABI_PARAM2) - MOV(64, R(ABI_PARAM2), R(reg2)); - if (reg1 != ABI_PARAM1) - MOV(64, R(ABI_PARAM1), R(reg1)); - } - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionAC(const void *func, const Gen::OpArg &arg1, u32 param2) -{ - if (!arg1.IsSimpleReg(ABI_PARAM1)) - MOV(32, R(ABI_PARAM1), arg1); - MOV(32, R(ABI_PARAM2), Imm32(param2)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionACC(const void *func, const Gen::OpArg &arg1, u32 param2, u32 param3) -{ - if (!arg1.IsSimpleReg(ABI_PARAM1)) - MOV(32, R(ABI_PARAM1), arg1); - MOV(32, R(ABI_PARAM2), Imm32(param2)); - MOV(64, R(ABI_PARAM3), Imm64(param3)); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionA(const void *func, const Gen::OpArg &arg1) -{ - if (!arg1.IsSimpleReg(ABI_PARAM1)) - MOV(32, R(ABI_PARAM1), arg1); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} - -void XEmitter::ABI_CallFunctionAA(const void *func, const Gen::OpArg &arg1, const Gen::OpArg &arg2) -{ - if (!arg1.IsSimpleReg(ABI_PARAM1)) - MOV(32, R(ABI_PARAM1), arg1); - if (!arg2.IsSimpleReg(ABI_PARAM2)) - MOV(32, R(ABI_PARAM2), arg2); - u64 distance = u64(func) - (u64(code) + 5); - if (distance >= 0x0000000080000000ULL - && distance < 0xFFFFFFFF80000000ULL) { - // Far call - MOV(64, R(RAX), ImmPtr(func)); - CALLptr(R(RAX)); - } else { - CALL(func); - } -} \ No newline at end of file diff --git a/src/common/x64/abi.h b/src/common/x64/abi.h deleted file mode 100644 index de6d62fd..00000000 --- a/src/common/x64/abi.h +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2008 Dolphin Emulator Project -// Licensed under GPLv2+ -// Refer to the license.txt file included. - -#pragma once - -#include "common/bit_set.h" -#include "emitter.h" - -// x64 ABI:s, and helpers to help follow them when JIT-ing code. -// All convensions return values in EAX (+ possibly EDX). - -// Windows 64-bit -// * 4-reg "fastcall" variant, very new-skool stack handling -// * Callee moves stack pointer, to make room for shadow regs for the biggest function _it itself calls_ -// * Parameters passed in RCX, RDX, ... further parameters are MOVed into the allocated stack space. -// Scratch: RAX RCX RDX R8 R9 R10 R11 -// Callee-save: RBX RSI RDI RBP R12 R13 R14 R15 -// Parameters: RCX RDX R8 R9, further MOV-ed - -// Linux 64-bit -// * 6-reg "fastcall" variant, old skool stack handling (parameters are pushed) -// Scratch: RAX RCX RDX RSI RDI R8 R9 R10 R11 -// Callee-save: RBX RBP R12 R13 R14 R15 -// Parameters: RDI RSI RDX RCX R8 R9 - -#define ABI_ALL_FPRS BitSet32(0xffff0000) -#define ABI_ALL_GPRS BitSet32(0x0000ffff) - -#ifdef _WIN32 // 64-bit Windows - the really exotic calling convention - -#define ABI_PARAM1 RCX -#define ABI_PARAM2 RDX -#define ABI_PARAM3 R8 -#define ABI_PARAM4 R9 - -// xmm0-xmm15 use the upper 16 bits in the functions that push/pop registers. -#define ABI_ALL_CALLER_SAVED \ - (BitSet32 { RAX, RCX, RDX, R8, R9, R10, R11, \ - XMM0+16, XMM1+16, XMM2+16, XMM3+16, XMM4+16, XMM5+16 }) -#else //64-bit Unix / OS X - -#define ABI_PARAM1 RDI -#define ABI_PARAM2 RSI -#define ABI_PARAM3 RDX -#define ABI_PARAM4 RCX -#define ABI_PARAM5 R8 -#define ABI_PARAM6 R9 - -// TODO: Avoid pushing all 16 XMM registers when possible. Most functions we call probably -// don't actually clobber them. -#define ABI_ALL_CALLER_SAVED \ - (BitSet32 { RAX, RCX, RDX, RDI, RSI, R8, R9, R10, R11 } | \ - ABI_ALL_FPRS) -#endif // WIN32 - -#define ABI_ALL_CALLEE_SAVED (~ABI_ALL_CALLER_SAVED) - -#define ABI_RETURN RAX diff --git a/src/common/x64/cpu_detect.cpp b/src/common/x64/cpu_detect.cpp deleted file mode 100644 index 2ff21cd7..00000000 --- a/src/common/x64/cpu_detect.cpp +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include -#include -#include - -#include "common/common_types.h" - -#include "cpu_detect.h" - -namespace Common { - -#ifndef _MSC_VER - -#ifdef __FreeBSD__ -#include -#include -#endif - -static inline void __cpuidex(int info[4], int function_id, int subfunction_id) { -#ifdef __FreeBSD__ - // Despite the name, this is just do_cpuid() with ECX as second input. - cpuid_count((u_int)function_id, (u_int)subfunction_id, (u_int*)info); -#else - info[0] = function_id; // eax - info[2] = subfunction_id; // ecx - __asm__( - "cpuid" - : "=a" (info[0]), - "=b" (info[1]), - "=c" (info[2]), - "=d" (info[3]) - : "a" (function_id), - "c" (subfunction_id) - ); -#endif -} - -static inline void __cpuid(int info[4], int function_id) { - return __cpuidex(info, function_id, 0); -} - -#define _XCR_XFEATURE_ENABLED_MASK 0 -static u64 _xgetbv(u32 index) { - u32 eax, edx; - __asm__ __volatile__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(index)); - return ((u64)edx << 32) | eax; -} - -#endif // ifndef _MSC_VER - -// Detects the various CPU features -static CPUCaps Detect() { - CPUCaps caps = {}; - - caps.num_cores = std::thread::hardware_concurrency(); - - // Assumes the CPU supports the CPUID instruction. Those that don't would likely not support - // Citra at all anyway - - int cpu_id[4]; - memset(caps.brand_string, 0, sizeof(caps.brand_string)); - - // Detect CPU's CPUID capabilities and grab CPU string - __cpuid(cpu_id, 0x00000000); - u32 max_std_fn = cpu_id[0]; // EAX - - std::memcpy(&caps.brand_string[0], &cpu_id[1], sizeof(int)); - std::memcpy(&caps.brand_string[4], &cpu_id[3], sizeof(int)); - std::memcpy(&caps.brand_string[8], &cpu_id[2], sizeof(int)); - - __cpuid(cpu_id, 0x80000000); - - u32 max_ex_fn = cpu_id[0]; - if (!strcmp(caps.brand_string, "GenuineIntel")) - caps.vendor = CPUVendor::INTEL; - else if (!strcmp(caps.brand_string, "AuthenticAMD")) - caps.vendor = CPUVendor::AMD; - else - caps.vendor = CPUVendor::OTHER; - -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif - - // Set reasonable default brand string even if brand string not available - strncpy(caps.cpu_string, caps.brand_string, sizeof(caps.cpu_string)); - caps.cpu_string[sizeof(caps.cpu_string) - 1] = '\0'; - -#ifdef _MSC_VER -#pragma warning(pop) -#endif - - // Detect family and other miscellaneous features - if (max_std_fn >= 1) { - __cpuid(cpu_id, 0x00000001); - - if ((cpu_id[3] >> 25) & 1) caps.sse = true; - if ((cpu_id[3] >> 26) & 1) caps.sse2 = true; - if ((cpu_id[2]) & 1) caps.sse3 = true; - if ((cpu_id[2] >> 9) & 1) caps.ssse3 = true; - if ((cpu_id[2] >> 19) & 1) caps.sse4_1 = true; - if ((cpu_id[2] >> 20) & 1) caps.sse4_2 = true; - if ((cpu_id[2] >> 22) & 1) caps.movbe = true; - if ((cpu_id[2] >> 25) & 1) caps.aes = true; - - if ((cpu_id[3] >> 24) & 1) { - caps.fxsave_fxrstor = true; - } - - // AVX support requires 3 separate checks: - // - Is the AVX bit set in CPUID? - // - Is the XSAVE bit set in CPUID? - // - XGETBV result has the XCR bit set. - if (((cpu_id[2] >> 28) & 1) && ((cpu_id[2] >> 27) & 1)) { - if ((_xgetbv(_XCR_XFEATURE_ENABLED_MASK) & 0x6) == 0x6) { - caps.avx = true; - if ((cpu_id[2] >> 12) & 1) - caps.fma = true; - } - } - - if (max_std_fn >= 7) { - __cpuidex(cpu_id, 0x00000007, 0x00000000); - // Can't enable AVX2 unless the XSAVE/XGETBV checks above passed - if ((cpu_id[1] >> 5) & 1) - caps.avx2 = caps.avx; - if ((cpu_id[1] >> 3) & 1) - caps.bmi1 = true; - if ((cpu_id[1] >> 8) & 1) - caps.bmi2 = true; - } - } - - caps.flush_to_zero = caps.sse; - - if (max_ex_fn >= 0x80000004) { - // Extract CPU model string - __cpuid(cpu_id, 0x80000002); - std::memcpy(caps.cpu_string, cpu_id, sizeof(cpu_id)); - __cpuid(cpu_id, 0x80000003); - std::memcpy(caps.cpu_string + 16, cpu_id, sizeof(cpu_id)); - __cpuid(cpu_id, 0x80000004); - std::memcpy(caps.cpu_string + 32, cpu_id, sizeof(cpu_id)); - } - - if (max_ex_fn >= 0x80000001) { - // Check for more features - __cpuid(cpu_id, 0x80000001); - if (cpu_id[2] & 1) caps.lahf_sahf_64 = true; - if ((cpu_id[2] >> 5) & 1) caps.lzcnt = true; - if ((cpu_id[2] >> 16) & 1) caps.fma4 = true; - if ((cpu_id[3] >> 29) & 1) caps.long_mode = true; - } - - return caps; -} - -const CPUCaps& GetCPUCaps() { - static CPUCaps caps = Detect(); - return caps; -} - -std::string GetCPUCapsString() { - auto caps = GetCPUCaps(); - - std::string sum(caps.cpu_string); - sum += " ("; - sum += caps.brand_string; - sum += ")"; - - if (caps.sse) sum += ", SSE"; - if (caps.sse2) { - sum += ", SSE2"; - if (!caps.flush_to_zero) sum += " (without DAZ)"; - } - - if (caps.sse3) sum += ", SSE3"; - if (caps.ssse3) sum += ", SSSE3"; - if (caps.sse4_1) sum += ", SSE4.1"; - if (caps.sse4_2) sum += ", SSE4.2"; - if (caps.avx) sum += ", AVX"; - if (caps.avx2) sum += ", AVX2"; - if (caps.bmi1) sum += ", BMI1"; - if (caps.bmi2) sum += ", BMI2"; - if (caps.fma) sum += ", FMA"; - if (caps.aes) sum += ", AES"; - if (caps.movbe) sum += ", MOVBE"; - if (caps.long_mode) sum += ", 64-bit support"; - - return sum; -} - -} // namespace Common diff --git a/src/common/x64/cpu_detect.h b/src/common/x64/cpu_detect.h deleted file mode 100644 index 0af3a8ad..00000000 --- a/src/common/x64/cpu_detect.h +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include - -namespace Common { - -/// x86/x64 CPU vendors that may be detected by this module -enum class CPUVendor { - INTEL, - AMD, - OTHER, -}; - -/// x86/x64 CPU capabilities that may be detected by this module -struct CPUCaps { - CPUVendor vendor; - char cpu_string[0x21]; - char brand_string[0x41]; - int num_cores; - bool sse; - bool sse2; - bool sse3; - bool ssse3; - bool sse4_1; - bool sse4_2; - bool lzcnt; - bool avx; - bool avx2; - bool bmi1; - bool bmi2; - bool fma; - bool fma4; - bool aes; - - // Support for the FXSAVE and FXRSTOR instructions - bool fxsave_fxrstor; - - bool movbe; - - // This flag indicates that the hardware supports some mode in which denormal inputs and outputs - // are automatically set to (signed) zero. - bool flush_to_zero; - - // Support for LAHF and SAHF instructions in 64-bit mode - bool lahf_sahf_64; - - bool long_mode; -}; - -/** - * Gets the supported capabilities of the host CPU - * @return Reference to a CPUCaps struct with the detected host CPU capabilities - */ -const CPUCaps& GetCPUCaps(); - -/** - * Gets a string summary of the name and supported capabilities of the host CPU - * @return String summary - */ -std::string GetCPUCapsString(); - -} // namespace Common diff --git a/src/common/x64/emitter.cpp b/src/common/x64/emitter.cpp deleted file mode 100644 index ef569667..00000000 --- a/src/common/x64/emitter.cpp +++ /dev/null @@ -1,2018 +0,0 @@ -// Copyright (C) 2003 Dolphin Project. - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, version 2.0 or later versions. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License 2.0 for more details. - -// A copy of the GPL 2.0 should have been included with the program. -// If not, see http://www.gnu.org/licenses/ - -// Official SVN repository and contact information can be found at -// http://code.google.com/p/dolphin-emu/ - -#include -#include - -#include "common/assert.h" -#include "common/memory_util.h" - -#include "abi.h" -#include "cpu_detect.h" -#include "emitter.h" - -namespace Gen -{ - -struct NormalOpDef -{ - u8 toRm8, toRm32, fromRm8, fromRm32, imm8, imm32, simm8, eaximm8, eaximm32, ext; -}; - -// 0xCC is code for invalid combination of immediates -static const NormalOpDef normalops[11] = -{ - {0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x83, 0x04, 0x05, 0}, //ADD - {0x10, 0x11, 0x12, 0x13, 0x80, 0x81, 0x83, 0x14, 0x15, 2}, //ADC - - {0x28, 0x29, 0x2A, 0x2B, 0x80, 0x81, 0x83, 0x2C, 0x2D, 5}, //SUB - {0x18, 0x19, 0x1A, 0x1B, 0x80, 0x81, 0x83, 0x1C, 0x1D, 3}, //SBB - - {0x20, 0x21, 0x22, 0x23, 0x80, 0x81, 0x83, 0x24, 0x25, 4}, //AND - {0x08, 0x09, 0x0A, 0x0B, 0x80, 0x81, 0x83, 0x0C, 0x0D, 1}, //OR - - {0x30, 0x31, 0x32, 0x33, 0x80, 0x81, 0x83, 0x34, 0x35, 6}, //XOR - {0x88, 0x89, 0x8A, 0x8B, 0xC6, 0xC7, 0xCC, 0xCC, 0xCC, 0}, //MOV - - {0x84, 0x85, 0x84, 0x85, 0xF6, 0xF7, 0xCC, 0xA8, 0xA9, 0}, //TEST (to == from) - {0x38, 0x39, 0x3A, 0x3B, 0x80, 0x81, 0x83, 0x3C, 0x3D, 7}, //CMP - - {0x86, 0x87, 0x86, 0x87, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 7}, //XCHG -}; - -enum NormalSSEOps -{ - sseCMP = 0xC2, - sseADD = 0x58, //ADD - sseSUB = 0x5C, //SUB - sseAND = 0x54, //AND - sseANDN = 0x55, //ANDN - sseOR = 0x56, - sseXOR = 0x57, - sseMUL = 0x59, //MUL - sseDIV = 0x5E, //DIV - sseMIN = 0x5D, //MIN - sseMAX = 0x5F, //MAX - sseCOMIS = 0x2F, //COMIS - sseUCOMIS = 0x2E, //UCOMIS - sseSQRT = 0x51, //SQRT - sseRSQRT = 0x52, //RSQRT (NO DOUBLE PRECISION!!!) - sseRCP = 0x53, //RCP - sseMOVAPfromRM = 0x28, //MOVAP from RM - sseMOVAPtoRM = 0x29, //MOVAP to RM - sseMOVUPfromRM = 0x10, //MOVUP from RM - sseMOVUPtoRM = 0x11, //MOVUP to RM - sseMOVLPfromRM= 0x12, - sseMOVLPtoRM = 0x13, - sseMOVHPfromRM= 0x16, - sseMOVHPtoRM = 0x17, - sseMOVHLPS = 0x12, - sseMOVLHPS = 0x16, - sseMOVDQfromRM = 0x6F, - sseMOVDQtoRM = 0x7F, - sseMASKMOVDQU = 0xF7, - sseLDDQU = 0xF0, - sseSHUF = 0xC6, - sseMOVNTDQ = 0xE7, - sseMOVNTP = 0x2B, - sseHADD = 0x7C, -}; - - -void XEmitter::SetCodePtr(u8 *ptr) -{ - code = ptr; -} - -const u8 *XEmitter::GetCodePtr() const -{ - return code; -} - -u8 *XEmitter::GetWritableCodePtr() -{ - return code; -} - -void XEmitter::Write8(u8 value) -{ - *code++ = value; -} - -void XEmitter::Write16(u16 value) -{ - std::memcpy(code, &value, sizeof(u16)); - code += sizeof(u16); -} - -void XEmitter::Write32(u32 value) -{ - std::memcpy(code, &value, sizeof(u32)); - code += sizeof(u32); -} - -void XEmitter::Write64(u64 value) -{ - std::memcpy(code, &value, sizeof(u64)); - code += sizeof(u64); -} - -void XEmitter::ReserveCodeSpace(int bytes) -{ - for (int i = 0; i < bytes; i++) - *code++ = 0xCC; -} - -const u8 *XEmitter::AlignCode4() -{ - int c = int((u64)code & 3); - if (c) - ReserveCodeSpace(4-c); - return code; -} - -const u8 *XEmitter::AlignCode16() -{ - int c = int((u64)code & 15); - if (c) - ReserveCodeSpace(16-c); - return code; -} - -const u8 *XEmitter::AlignCodePage() -{ - int c = int((u64)code & 4095); - if (c) - ReserveCodeSpace(4096-c); - return code; -} - -// This operation modifies flags; check to see the flags are locked. -// If the flags are locked, we should immediately and loudly fail before -// causing a subtle JIT bug. -void XEmitter::CheckFlags() -{ - ASSERT_MSG(!flags_locked, "Attempt to modify flags while flags locked!"); -} - -void XEmitter::WriteModRM(int mod, int reg, int rm) -{ - Write8((u8)((mod << 6) | ((reg & 7) << 3) | (rm & 7))); -} - -void XEmitter::WriteSIB(int scale, int index, int base) -{ - Write8((u8)((scale << 6) | ((index & 7) << 3) | (base & 7))); -} - -void OpArg::WriteRex(XEmitter *emit, int opBits, int bits, int customOp) const -{ - if (customOp == -1) customOp = operandReg; -#ifdef ARCHITECTURE_x86_64 - u8 op = 0x40; - // REX.W (whether operation is a 64-bit operation) - if (opBits == 64) op |= 8; - // REX.R (whether ModR/M reg field refers to R8-R15. - if (customOp & 8) op |= 4; - // REX.X (whether ModR/M SIB index field refers to R8-R15) - if (indexReg & 8) op |= 2; - // REX.B (whether ModR/M rm or SIB base or opcode reg field refers to R8-R15) - if (offsetOrBaseReg & 8) op |= 1; - // Write REX if wr have REX bits to write, or if the operation accesses - // SIL, DIL, BPL, or SPL. - if (op != 0x40 || - (scale == SCALE_NONE && bits == 8 && (offsetOrBaseReg & 0x10c) == 4) || - (opBits == 8 && (customOp & 0x10c) == 4)) - { - emit->Write8(op); - // Check the operation doesn't access AH, BH, CH, or DH. - DEBUG_ASSERT((offsetOrBaseReg & 0x100) == 0); - DEBUG_ASSERT((customOp & 0x100) == 0); - } -#else - DEBUG_ASSERT(opBits != 64); - DEBUG_ASSERT((customOp & 8) == 0 || customOp == -1); - DEBUG_ASSERT((indexReg & 8) == 0); - DEBUG_ASSERT((offsetOrBaseReg & 8) == 0); - DEBUG_ASSERT(opBits != 8 || (customOp & 0x10c) != 4 || customOp == -1); - DEBUG_ASSERT(scale == SCALE_ATREG || bits != 8 || (offsetOrBaseReg & 0x10c) != 4); -#endif -} - -void OpArg::WriteVex(XEmitter* emit, X64Reg regOp1, X64Reg regOp2, int L, int pp, int mmmmm, int W) const -{ - int R = !(regOp1 & 8); - int X = !(indexReg & 8); - int B = !(offsetOrBaseReg & 8); - - int vvvv = (regOp2 == X64Reg::INVALID_REG) ? 0xf : (regOp2 ^ 0xf); - - // do we need any VEX fields that only appear in the three-byte form? - if (X == 1 && B == 1 && W == 0 && mmmmm == 1) - { - u8 RvvvvLpp = (R << 7) | (vvvv << 3) | (L << 2) | pp; - emit->Write8(0xC5); - emit->Write8(RvvvvLpp); - } - else - { - u8 RXBmmmmm = (R << 7) | (X << 6) | (B << 5) | mmmmm; - u8 WvvvvLpp = (W << 7) | (vvvv << 3) | (L << 2) | pp; - emit->Write8(0xC4); - emit->Write8(RXBmmmmm); - emit->Write8(WvvvvLpp); - } -} - -void OpArg::WriteRest(XEmitter *emit, int extraBytes, X64Reg _operandReg, - bool warn_64bit_offset) const -{ - if (_operandReg == INVALID_REG) - _operandReg = (X64Reg)this->operandReg; - int mod = 0; - int ireg = indexReg; - bool SIB = false; - int _offsetOrBaseReg = this->offsetOrBaseReg; - - if (scale == SCALE_RIP) //Also, on 32-bit, just an immediate address - { - // Oh, RIP addressing. - _offsetOrBaseReg = 5; - emit->WriteModRM(0, _operandReg, _offsetOrBaseReg); - //TODO : add some checks -#ifdef ARCHITECTURE_x86_64 - u64 ripAddr = (u64)emit->GetCodePtr() + 4 + extraBytes; - s64 distance = (s64)offset - (s64)ripAddr; - ASSERT_MSG( - (distance < 0x80000000LL && - distance >= -0x80000000LL) || - !warn_64bit_offset, - "WriteRest: op out of range (0x%" PRIx64 " uses 0x%" PRIx64 ")", - ripAddr, offset); - s32 offs = (s32)distance; - emit->Write32((u32)offs); -#else - emit->Write32((u32)offset); -#endif - return; - } - - if (scale == 0) - { - // Oh, no memory, Just a reg. - mod = 3; //11 - } - else if (scale >= 1) - { - //Ah good, no scaling. - if (scale == SCALE_ATREG && !((_offsetOrBaseReg & 7) == 4 || (_offsetOrBaseReg & 7) == 5)) - { - //Okay, we're good. No SIB necessary. - int ioff = (int)offset; - if (ioff == 0) - { - mod = 0; - } - else if (ioff<-128 || ioff>127) - { - mod = 2; //32-bit displacement - } - else - { - mod = 1; //8-bit displacement - } - } - else if (scale >= SCALE_NOBASE_2 && scale <= SCALE_NOBASE_8) - { - SIB = true; - mod = 0; - _offsetOrBaseReg = 5; - } - else //if (scale != SCALE_ATREG) - { - if ((_offsetOrBaseReg & 7) == 4) //this would occupy the SIB encoding :( - { - //So we have to fake it with SIB encoding :( - SIB = true; - } - - if (scale >= SCALE_1 && scale < SCALE_ATREG) - { - SIB = true; - } - - if (scale == SCALE_ATREG && ((_offsetOrBaseReg & 7) == 4)) - { - SIB = true; - ireg = _offsetOrBaseReg; - } - - //Okay, we're fine. Just disp encoding. - //We need displacement. Which size? - int ioff = (int)(s64)offset; - if (ioff < -128 || ioff > 127) - { - mod = 2; //32-bit displacement - } - else - { - mod = 1; //8-bit displacement - } - } - } - - // Okay. Time to do the actual writing - // ModRM byte: - int oreg = _offsetOrBaseReg; - if (SIB) - oreg = 4; - - // TODO(ector): WTF is this if about? I don't remember writing it :-) - //if (RIP) - // oreg = 5; - - emit->WriteModRM(mod, _operandReg&7, oreg&7); - - if (SIB) - { - //SIB byte - int ss; - switch (scale) - { - case SCALE_NONE: _offsetOrBaseReg = 4; ss = 0; break; //RSP - case SCALE_1: ss = 0; break; - case SCALE_2: ss = 1; break; - case SCALE_4: ss = 2; break; - case SCALE_8: ss = 3; break; - case SCALE_NOBASE_2: ss = 1; break; - case SCALE_NOBASE_4: ss = 2; break; - case SCALE_NOBASE_8: ss = 3; break; - case SCALE_ATREG: ss = 0; break; - default: ASSERT_MSG(0, "Invalid scale for SIB byte"); ss = 0; break; - } - emit->Write8((u8)((ss << 6) | ((ireg&7)<<3) | (_offsetOrBaseReg&7))); - } - - if (mod == 1) //8-bit disp - { - emit->Write8((u8)(s8)(s32)offset); - } - else if (mod == 2 || (scale >= SCALE_NOBASE_2 && scale <= SCALE_NOBASE_8)) //32-bit disp - { - emit->Write32((u32)offset); - } -} - -// W = operand extended width (1 if 64-bit) -// R = register# upper bit -// X = scale amnt upper bit -// B = base register# upper bit -void XEmitter::Rex(int w, int r, int x, int b) -{ - w = w ? 1 : 0; - r = r ? 1 : 0; - x = x ? 1 : 0; - b = b ? 1 : 0; - u8 rx = (u8)(0x40 | (w << 3) | (r << 2) | (x << 1) | (b)); - if (rx != 0x40) - Write8(rx); -} - -void XEmitter::JMP(const u8* addr, bool force5Bytes) -{ - u64 fn = (u64)addr; - if (!force5Bytes) - { - s64 distance = (s64)(fn - ((u64)code + 2)); - ASSERT_MSG(distance >= -0x80 && distance < 0x80, - "Jump target too far away, needs force5Bytes = true"); - //8 bits will do - Write8(0xEB); - Write8((u8)(s8)distance); - } - else - { - s64 distance = (s64)(fn - ((u64)code + 5)); - - ASSERT_MSG( - distance >= -0x80000000LL && distance < 0x80000000LL, - "Jump target too far away, needs indirect register"); - Write8(0xE9); - Write32((u32)(s32)distance); - } -} - -void XEmitter::JMPptr(const OpArg& arg2) -{ - OpArg arg = arg2; - if (arg.IsImm()) ASSERT_MSG(0, "JMPptr - Imm argument"); - arg.operandReg = 4; - arg.WriteRex(this, 0, 0); - Write8(0xFF); - arg.WriteRest(this); -} - -//Can be used to trap other processors, before overwriting their code -// not used in dolphin -void XEmitter::JMPself() -{ - Write8(0xEB); - Write8(0xFE); -} - -void XEmitter::CALLptr(OpArg arg) -{ - if (arg.IsImm()) ASSERT_MSG(0, "CALLptr - Imm argument"); - arg.operandReg = 2; - arg.WriteRex(this, 0, 0); - Write8(0xFF); - arg.WriteRest(this); -} - -void XEmitter::CALL(const void* fnptr) -{ - u64 distance = u64(fnptr) - (u64(code) + 5); - ASSERT_MSG( - distance < 0x0000000080000000ULL || - distance >= 0xFFFFFFFF80000000ULL, - "CALL out of range (%p calls %p)", code, fnptr); - Write8(0xE8); - Write32(u32(distance)); -} - -FixupBranch XEmitter::CALL() -{ - FixupBranch branch; - branch.type = 1; - branch.ptr = code + 5; - - Write8(0xE8); - Write32(0); - - return branch; -} - -FixupBranch XEmitter::J(bool force5bytes) -{ - FixupBranch branch; - branch.type = force5bytes ? 1 : 0; - branch.ptr = code + (force5bytes ? 5 : 2); - if (!force5bytes) - { - //8 bits will do - Write8(0xEB); - Write8(0); - } - else - { - Write8(0xE9); - Write32(0); - } - return branch; -} - -FixupBranch XEmitter::J_CC(CCFlags conditionCode, bool force5bytes) -{ - FixupBranch branch; - branch.type = force5bytes ? 1 : 0; - branch.ptr = code + (force5bytes ? 6 : 2); - if (!force5bytes) - { - //8 bits will do - Write8(0x70 + conditionCode); - Write8(0); - } - else - { - Write8(0x0F); - Write8(0x80 + conditionCode); - Write32(0); - } - return branch; -} - -void XEmitter::J_CC(CCFlags conditionCode, const u8* addr, bool force5bytes) -{ - u64 fn = (u64)addr; - s64 distance = (s64)(fn - ((u64)code + 2)); - if (distance < -0x80 || distance >= 0x80 || force5bytes) - { - distance = (s64)(fn - ((u64)code + 6)); - ASSERT_MSG( - distance >= -0x80000000LL && distance < 0x80000000LL, - "Jump target too far away, needs indirect register"); - Write8(0x0F); - Write8(0x80 + conditionCode); - Write32((u32)(s32)distance); - } - else - { - Write8(0x70 + conditionCode); - Write8((u8)(s8)distance); - } -} - -void XEmitter::SetJumpTarget(const FixupBranch& branch) -{ - if (branch.type == 0) - { - s64 distance = (s64)(code - branch.ptr); - ASSERT_MSG(distance >= -0x80 && distance < 0x80, "Jump target too far away, needs force5Bytes = true"); - branch.ptr[-1] = (u8)(s8)distance; - } - else if (branch.type == 1) - { - s64 distance = (s64)(code - branch.ptr); - ASSERT_MSG(distance >= -0x80000000LL && distance < 0x80000000LL, "Jump target too far away, needs indirect register"); - ((s32*)branch.ptr)[-1] = (s32)distance; - } -} - -void XEmitter::SetJumpTarget(const FixupBranch& branch, const u8* target) -{ - if (branch.type == 0) - { - s64 distance = (s64)(target - branch.ptr); - ASSERT_MSG(distance >= -0x80 && distance < 0x80, "Jump target too far away, needs force5Bytes = true"); - branch.ptr[-1] = (u8)(s8)distance; - } - else if (branch.type == 1) - { - s64 distance = (s64)(target - branch.ptr); - ASSERT_MSG(distance >= -0x80000000LL && distance < 0x80000000LL, "Jump target too far away, needs indirect register"); - ((s32*)branch.ptr)[-1] = (s32)distance; - } -} - -//Single byte opcodes -//There is no PUSHAD/POPAD in 64-bit mode. -void XEmitter::INT3() {Write8(0xCC);} -void XEmitter::RET() {Write8(0xC3);} -void XEmitter::RET_FAST() {Write8(0xF3); Write8(0xC3);} //two-byte return (rep ret) - recommended by AMD optimization manual for the case of jumping to a ret - -// The first sign of decadence: optimized NOPs. -void XEmitter::NOP(size_t size) -{ - DEBUG_ASSERT((int)size > 0); - while (true) - { - switch (size) - { - case 0: - return; - case 1: - Write8(0x90); - return; - case 2: - Write8(0x66); Write8(0x90); - return; - case 3: - Write8(0x0F); Write8(0x1F); Write8(0x00); - return; - case 4: - Write8(0x0F); Write8(0x1F); Write8(0x40); Write8(0x00); - return; - case 5: - Write8(0x0F); Write8(0x1F); Write8(0x44); Write8(0x00); - Write8(0x00); - return; - case 6: - Write8(0x66); Write8(0x0F); Write8(0x1F); Write8(0x44); - Write8(0x00); Write8(0x00); - return; - case 7: - Write8(0x0F); Write8(0x1F); Write8(0x80); Write8(0x00); - Write8(0x00); Write8(0x00); Write8(0x00); - return; - case 8: - Write8(0x0F); Write8(0x1F); Write8(0x84); Write8(0x00); - Write8(0x00); Write8(0x00); Write8(0x00); Write8(0x00); - return; - case 9: - Write8(0x66); Write8(0x0F); Write8(0x1F); Write8(0x84); - Write8(0x00); Write8(0x00); Write8(0x00); Write8(0x00); - Write8(0x00); - return; - case 10: - Write8(0x66); Write8(0x66); Write8(0x0F); Write8(0x1F); - Write8(0x84); Write8(0x00); Write8(0x00); Write8(0x00); - Write8(0x00); Write8(0x00); - return; - default: - // Even though x86 instructions are allowed to be up to 15 bytes long, - // AMD advises against using NOPs longer than 11 bytes because they - // carry a performance penalty on CPUs older than AMD family 16h. - Write8(0x66); Write8(0x66); Write8(0x66); Write8(0x0F); - Write8(0x1F); Write8(0x84); Write8(0x00); Write8(0x00); - Write8(0x00); Write8(0x00); Write8(0x00); - size -= 11; - continue; - } - } -} - -void XEmitter::PAUSE() {Write8(0xF3); NOP();} //use in tight spinloops for energy saving on some cpu -void XEmitter::CLC() {CheckFlags(); Write8(0xF8);} //clear carry -void XEmitter::CMC() {CheckFlags(); Write8(0xF5);} //flip carry -void XEmitter::STC() {CheckFlags(); Write8(0xF9);} //set carry - -//TODO: xchg ah, al ??? -void XEmitter::XCHG_AHAL() -{ - Write8(0x86); - Write8(0xe0); - // alt. 86 c4 -} - -//These two can not be executed on early Intel 64-bit CPU:s, only on AMD! -void XEmitter::LAHF() {Write8(0x9F);} -void XEmitter::SAHF() {CheckFlags(); Write8(0x9E);} - -void XEmitter::PUSHF() {Write8(0x9C);} -void XEmitter::POPF() {CheckFlags(); Write8(0x9D);} - -void XEmitter::LFENCE() {Write8(0x0F); Write8(0xAE); Write8(0xE8);} -void XEmitter::MFENCE() {Write8(0x0F); Write8(0xAE); Write8(0xF0);} -void XEmitter::SFENCE() {Write8(0x0F); Write8(0xAE); Write8(0xF8);} - -void XEmitter::WriteSimple1Byte(int bits, u8 byte, X64Reg reg) -{ - if (bits == 16) - Write8(0x66); - Rex(bits == 64, 0, 0, (int)reg >> 3); - Write8(byte + ((int)reg & 7)); -} - -void XEmitter::WriteSimple2Byte(int bits, u8 byte1, u8 byte2, X64Reg reg) -{ - if (bits == 16) - Write8(0x66); - Rex(bits==64, 0, 0, (int)reg >> 3); - Write8(byte1); - Write8(byte2 + ((int)reg & 7)); -} - -void XEmitter::CWD(int bits) -{ - if (bits == 16) - Write8(0x66); - Rex(bits == 64, 0, 0, 0); - Write8(0x99); -} - -void XEmitter::CBW(int bits) -{ - if (bits == 8) - Write8(0x66); - Rex(bits == 32, 0, 0, 0); - Write8(0x98); -} - -//Simple opcodes - - -//push/pop do not need wide to be 64-bit -void XEmitter::PUSH(X64Reg reg) {WriteSimple1Byte(32, 0x50, reg);} -void XEmitter::POP(X64Reg reg) {WriteSimple1Byte(32, 0x58, reg);} - -void XEmitter::PUSH(int bits, const OpArg& reg) -{ - if (reg.IsSimpleReg()) - PUSH(reg.GetSimpleReg()); - else if (reg.IsImm()) - { - switch (reg.GetImmBits()) - { - case 8: - Write8(0x6A); - Write8((u8)(s8)reg.offset); - break; - case 16: - Write8(0x66); - Write8(0x68); - Write16((u16)(s16)(s32)reg.offset); - break; - case 32: - Write8(0x68); - Write32((u32)reg.offset); - break; - default: - ASSERT_MSG(0, "PUSH - Bad imm bits"); - break; - } - } - else - { - if (bits == 16) - Write8(0x66); - reg.WriteRex(this, bits, bits); - Write8(0xFF); - reg.WriteRest(this, 0, (X64Reg)6); - } -} - -void XEmitter::POP(int /*bits*/, const OpArg& reg) -{ - if (reg.IsSimpleReg()) - POP(reg.GetSimpleReg()); - else - ASSERT_MSG(0, "POP - Unsupported encoding"); -} - -void XEmitter::BSWAP(int bits, X64Reg reg) -{ - if (bits >= 32) - { - WriteSimple2Byte(bits, 0x0F, 0xC8, reg); - } - else if (bits == 16) - { - ROL(16, R(reg), Imm8(8)); - } - else if (bits == 8) - { - // Do nothing - can't bswap a single byte... - } - else - { - ASSERT_MSG(0, "BSWAP - Wrong number of bits"); - } -} - -// Undefined opcode - reserved -// If we ever need a way to always cause a non-breakpoint hard exception... -void XEmitter::UD2() -{ - Write8(0x0F); - Write8(0x0B); -} - -void XEmitter::PREFETCH(PrefetchLevel level, OpArg arg) -{ - ASSERT_MSG(!arg.IsImm(), "PREFETCH - Imm argument"); - arg.operandReg = (u8)level; - arg.WriteRex(this, 0, 0); - Write8(0x0F); - Write8(0x18); - arg.WriteRest(this); -} - -void XEmitter::SETcc(CCFlags flag, OpArg dest) -{ - ASSERT_MSG(!dest.IsImm(), "SETcc - Imm argument"); - dest.operandReg = 0; - dest.WriteRex(this, 0, 8); - Write8(0x0F); - Write8(0x90 + (u8)flag); - dest.WriteRest(this); -} - -void XEmitter::CMOVcc(int bits, X64Reg dest, OpArg src, CCFlags flag) -{ - ASSERT_MSG(!src.IsImm(), "CMOVcc - Imm argument"); - ASSERT_MSG(bits != 8, "CMOVcc - 8 bits unsupported"); - if (bits == 16) - Write8(0x66); - src.operandReg = dest; - src.WriteRex(this, bits, bits); - Write8(0x0F); - Write8(0x40 + (u8)flag); - src.WriteRest(this); -} - -void XEmitter::WriteMulDivType(int bits, OpArg src, int ext) -{ - ASSERT_MSG(!src.IsImm(), "WriteMulDivType - Imm argument"); - CheckFlags(); - src.operandReg = ext; - if (bits == 16) - Write8(0x66); - src.WriteRex(this, bits, bits, 0); - if (bits == 8) - { - Write8(0xF6); - } - else - { - Write8(0xF7); - } - src.WriteRest(this); -} - -void XEmitter::MUL(int bits, const OpArg& src) {WriteMulDivType(bits, src, 4);} -void XEmitter::DIV(int bits, const OpArg& src) {WriteMulDivType(bits, src, 6);} -void XEmitter::IMUL(int bits, const OpArg& src) {WriteMulDivType(bits, src, 5);} -void XEmitter::IDIV(int bits, const OpArg& src) {WriteMulDivType(bits, src, 7);} -void XEmitter::NEG(int bits, const OpArg& src) {WriteMulDivType(bits, src, 3);} -void XEmitter::NOT(int bits, const OpArg& src) {WriteMulDivType(bits, src, 2);} - -void XEmitter::WriteBitSearchType(int bits, X64Reg dest, OpArg src, u8 byte2, bool rep) -{ - ASSERT_MSG(!src.IsImm(), "WriteBitSearchType - Imm argument"); - CheckFlags(); - src.operandReg = (u8)dest; - if (bits == 16) - Write8(0x66); - if (rep) - Write8(0xF3); - src.WriteRex(this, bits, bits); - Write8(0x0F); - Write8(byte2); - src.WriteRest(this); -} - -void XEmitter::MOVNTI(int bits, const OpArg& dest, X64Reg src) -{ - if (bits <= 16) - ASSERT_MSG(0, "MOVNTI - bits<=16"); - WriteBitSearchType(bits, src, dest, 0xC3); -} - -void XEmitter::BSF(int bits, X64Reg dest, const OpArg& src) {WriteBitSearchType(bits,dest,src,0xBC);} // Bottom bit to top bit -void XEmitter::BSR(int bits, X64Reg dest, const OpArg& src) {WriteBitSearchType(bits,dest,src,0xBD);} // Top bit to bottom bit - -void XEmitter::TZCNT(int bits, X64Reg dest, const OpArg& src) -{ - CheckFlags(); - if (!Common::GetCPUCaps().bmi1) - ASSERT_MSG(0, "Trying to use BMI1 on a system that doesn't support it. Bad programmer."); - WriteBitSearchType(bits, dest, src, 0xBC, true); -} -void XEmitter::LZCNT(int bits, X64Reg dest, const OpArg& src) -{ - CheckFlags(); - if (!Common::GetCPUCaps().lzcnt) - ASSERT_MSG(0, "Trying to use LZCNT on a system that doesn't support it. Bad programmer."); - WriteBitSearchType(bits, dest, src, 0xBD, true); -} - -void XEmitter::MOVSX(int dbits, int sbits, X64Reg dest, OpArg src) -{ - ASSERT_MSG(!src.IsImm(), "MOVSX - Imm argument"); - if (dbits == sbits) - { - MOV(dbits, R(dest), src); - return; - } - src.operandReg = (u8)dest; - if (dbits == 16) - Write8(0x66); - src.WriteRex(this, dbits, sbits); - if (sbits == 8) - { - Write8(0x0F); - Write8(0xBE); - } - else if (sbits == 16) - { - Write8(0x0F); - Write8(0xBF); - } - else if (sbits == 32 && dbits == 64) - { - Write8(0x63); - } - else - { - ASSERT(0); - } - src.WriteRest(this); -} - -void XEmitter::MOVZX(int dbits, int sbits, X64Reg dest, OpArg src) -{ - ASSERT_MSG(!src.IsImm(), "MOVZX - Imm argument"); - if (dbits == sbits) - { - MOV(dbits, R(dest), src); - return; - } - src.operandReg = (u8)dest; - if (dbits == 16) - Write8(0x66); - //the 32bit result is automatically zero extended to 64bit - src.WriteRex(this, dbits == 64 ? 32 : dbits, sbits); - if (sbits == 8) - { - Write8(0x0F); - Write8(0xB6); - } - else if (sbits == 16) - { - Write8(0x0F); - Write8(0xB7); - } - else if (sbits == 32 && dbits == 64) - { - Write8(0x8B); - } - else - { - ASSERT_MSG(0, "MOVZX - Invalid size"); - } - src.WriteRest(this); -} - -void XEmitter::MOVBE(int bits, const OpArg& dest, const OpArg& src) -{ - ASSERT_MSG(Common::GetCPUCaps().movbe, "Generating MOVBE on a system that does not support it."); - if (bits == 8) - { - MOV(bits, dest, src); - return; - } - - if (bits == 16) - Write8(0x66); - - if (dest.IsSimpleReg()) - { - ASSERT_MSG(!src.IsSimpleReg() && !src.IsImm(), "MOVBE: Loading from !mem"); - src.WriteRex(this, bits, bits, dest.GetSimpleReg()); - Write8(0x0F); Write8(0x38); Write8(0xF0); - src.WriteRest(this, 0, dest.GetSimpleReg()); - } - else if (src.IsSimpleReg()) - { - ASSERT_MSG(!dest.IsSimpleReg() && !dest.IsImm(), "MOVBE: Storing to !mem"); - dest.WriteRex(this, bits, bits, src.GetSimpleReg()); - Write8(0x0F); Write8(0x38); Write8(0xF1); - dest.WriteRest(this, 0, src.GetSimpleReg()); - } - else - { - ASSERT_MSG(0, "MOVBE: Not loading or storing to mem"); - } -} - - -void XEmitter::LEA(int bits, X64Reg dest, OpArg src) -{ - ASSERT_MSG(!src.IsImm(), "LEA - Imm argument"); - src.operandReg = (u8)dest; - if (bits == 16) - Write8(0x66); //TODO: performance warning - src.WriteRex(this, bits, bits); - Write8(0x8D); - src.WriteRest(this, 0, INVALID_REG, bits == 64); -} - -//shift can be either imm8 or cl -void XEmitter::WriteShift(int bits, OpArg dest, const OpArg& shift, int ext) -{ - CheckFlags(); - bool writeImm = false; - if (dest.IsImm()) - { - ASSERT_MSG(0, "WriteShift - can't shift imms"); - } - if ((shift.IsSimpleReg() && shift.GetSimpleReg() != ECX) || (shift.IsImm() && shift.GetImmBits() != 8)) - { - ASSERT_MSG(0, "WriteShift - illegal argument"); - } - dest.operandReg = ext; - if (bits == 16) - Write8(0x66); - dest.WriteRex(this, bits, bits, 0); - if (shift.GetImmBits() == 8) - { - //ok an imm - u8 imm = (u8)shift.offset; - if (imm == 1) - { - Write8(bits == 8 ? 0xD0 : 0xD1); - } - else - { - writeImm = true; - Write8(bits == 8 ? 0xC0 : 0xC1); - } - } - else - { - Write8(bits == 8 ? 0xD2 : 0xD3); - } - dest.WriteRest(this, writeImm ? 1 : 0); - if (writeImm) - Write8((u8)shift.offset); -} - -// large rotates and shift are slower on intel than amd -// intel likes to rotate by 1, and the op is smaller too -void XEmitter::ROL(int bits, const OpArg& dest, const OpArg& shift) {WriteShift(bits, dest, shift, 0);} -void XEmitter::ROR(int bits, const OpArg& dest, const OpArg& shift) {WriteShift(bits, dest, shift, 1);} -void XEmitter::RCL(int bits, const OpArg& dest, const OpArg& shift) {WriteShift(bits, dest, shift, 2);} -void XEmitter::RCR(int bits, const OpArg& dest, const OpArg& shift) {WriteShift(bits, dest, shift, 3);} -void XEmitter::SHL(int bits, const OpArg& dest, const OpArg& shift) {WriteShift(bits, dest, shift, 4);} -void XEmitter::SHR(int bits, const OpArg& dest, const OpArg& shift) {WriteShift(bits, dest, shift, 5);} -void XEmitter::SAR(int bits, const OpArg& dest, const OpArg& shift) {WriteShift(bits, dest, shift, 7);} - -// index can be either imm8 or register, don't use memory destination because it's slow -void XEmitter::WriteBitTest(int bits, const OpArg& dest, const OpArg& index, int ext) -{ - CheckFlags(); - if (dest.IsImm()) - { - ASSERT_MSG(0, "WriteBitTest - can't test imms"); - } - if ((index.IsImm() && index.GetImmBits() != 8)) - { - ASSERT_MSG(0, "WriteBitTest - illegal argument"); - } - if (bits == 16) - Write8(0x66); - if (index.IsImm()) - { - dest.WriteRex(this, bits, bits); - Write8(0x0F); Write8(0xBA); - dest.WriteRest(this, 1, (X64Reg)ext); - Write8((u8)index.offset); - } - else - { - X64Reg operand = index.GetSimpleReg(); - dest.WriteRex(this, bits, bits, operand); - Write8(0x0F); Write8(0x83 + 8*ext); - dest.WriteRest(this, 1, operand); - } -} - -void XEmitter::BT(int bits, const OpArg& dest, const OpArg& index) {WriteBitTest(bits, dest, index, 4);} -void XEmitter::BTS(int bits, const OpArg& dest, const OpArg& index) {WriteBitTest(bits, dest, index, 5);} -void XEmitter::BTR(int bits, const OpArg& dest, const OpArg& index) {WriteBitTest(bits, dest, index, 6);} -void XEmitter::BTC(int bits, const OpArg& dest, const OpArg& index) {WriteBitTest(bits, dest, index, 7);} - -//shift can be either imm8 or cl -void XEmitter::SHRD(int bits, const OpArg& dest, const OpArg& src, const OpArg& shift) -{ - CheckFlags(); - if (dest.IsImm()) - { - ASSERT_MSG(0, "SHRD - can't use imms as destination"); - } - if (!src.IsSimpleReg()) - { - ASSERT_MSG(0, "SHRD - must use simple register as source"); - } - if ((shift.IsSimpleReg() && shift.GetSimpleReg() != ECX) || (shift.IsImm() && shift.GetImmBits() != 8)) - { - ASSERT_MSG(0, "SHRD - illegal shift"); - } - if (bits == 16) - Write8(0x66); - X64Reg operand = src.GetSimpleReg(); - dest.WriteRex(this, bits, bits, operand); - if (shift.GetImmBits() == 8) - { - Write8(0x0F); Write8(0xAC); - dest.WriteRest(this, 1, operand); - Write8((u8)shift.offset); - } - else - { - Write8(0x0F); Write8(0xAD); - dest.WriteRest(this, 0, operand); - } -} - -void XEmitter::SHLD(int bits, const OpArg& dest, const OpArg& src, const OpArg& shift) -{ - CheckFlags(); - if (dest.IsImm()) - { - ASSERT_MSG(0, "SHLD - can't use imms as destination"); - } - if (!src.IsSimpleReg()) - { - ASSERT_MSG(0, "SHLD - must use simple register as source"); - } - if ((shift.IsSimpleReg() && shift.GetSimpleReg() != ECX) || (shift.IsImm() && shift.GetImmBits() != 8)) - { - ASSERT_MSG(0, "SHLD - illegal shift"); - } - if (bits == 16) - Write8(0x66); - X64Reg operand = src.GetSimpleReg(); - dest.WriteRex(this, bits, bits, operand); - if (shift.GetImmBits() == 8) - { - Write8(0x0F); Write8(0xA4); - dest.WriteRest(this, 1, operand); - Write8((u8)shift.offset); - } - else - { - Write8(0x0F); Write8(0xA5); - dest.WriteRest(this, 0, operand); - } -} - -void OpArg::WriteSingleByteOp(XEmitter *emit, u8 op, X64Reg _operandReg, int bits) -{ - if (bits == 16) - emit->Write8(0x66); - - this->operandReg = (u8)_operandReg; - WriteRex(emit, bits, bits); - emit->Write8(op); - WriteRest(emit); -} - -//operand can either be immediate or register -void OpArg::WriteNormalOp(XEmitter *emit, bool toRM, NormalOp op, const OpArg& operand, int bits) const -{ - X64Reg _operandReg; - if (IsImm()) - { - ASSERT_MSG(0, "WriteNormalOp - Imm argument, wrong order"); - } - - if (bits == 16) - emit->Write8(0x66); - - int immToWrite = 0; - - if (operand.IsImm()) - { - WriteRex(emit, bits, bits); - - if (!toRM) - { - ASSERT_MSG(0, "WriteNormalOp - Writing to Imm (!toRM)"); - } - - if (operand.scale == SCALE_IMM8 && bits == 8) - { - // op al, imm8 - if (!scale && offsetOrBaseReg == AL && normalops[op].eaximm8 != 0xCC) - { - emit->Write8(normalops[op].eaximm8); - emit->Write8((u8)operand.offset); - return; - } - // mov reg, imm8 - if (!scale && op == nrmMOV) - { - emit->Write8(0xB0 + (offsetOrBaseReg & 7)); - emit->Write8((u8)operand.offset); - return; - } - // op r/m8, imm8 - emit->Write8(normalops[op].imm8); - immToWrite = 8; - } - else if ((operand.scale == SCALE_IMM16 && bits == 16) || - (operand.scale == SCALE_IMM32 && bits == 32) || - (operand.scale == SCALE_IMM32 && bits == 64)) - { - // Try to save immediate size if we can, but first check to see - // if the instruction supports simm8. - // op r/m, imm8 - if (normalops[op].simm8 != 0xCC && - ((operand.scale == SCALE_IMM16 && (s16)operand.offset == (s8)operand.offset) || - (operand.scale == SCALE_IMM32 && (s32)operand.offset == (s8)operand.offset))) - { - emit->Write8(normalops[op].simm8); - immToWrite = 8; - } - else - { - // mov reg, imm - if (!scale && op == nrmMOV && bits != 64) - { - emit->Write8(0xB8 + (offsetOrBaseReg & 7)); - if (bits == 16) - emit->Write16((u16)operand.offset); - else - emit->Write32((u32)operand.offset); - return; - } - // op eax, imm - if (!scale && offsetOrBaseReg == EAX && normalops[op].eaximm32 != 0xCC) - { - emit->Write8(normalops[op].eaximm32); - if (bits == 16) - emit->Write16((u16)operand.offset); - else - emit->Write32((u32)operand.offset); - return; - } - // op r/m, imm - emit->Write8(normalops[op].imm32); - immToWrite = bits == 16 ? 16 : 32; - } - } - else if ((operand.scale == SCALE_IMM8 && bits == 16) || - (operand.scale == SCALE_IMM8 && bits == 32) || - (operand.scale == SCALE_IMM8 && bits == 64)) - { - // op r/m, imm8 - emit->Write8(normalops[op].simm8); - immToWrite = 8; - } - else if (operand.scale == SCALE_IMM64 && bits == 64) - { - if (scale) - { - ASSERT_MSG(0, "WriteNormalOp - MOV with 64-bit imm requres register destination"); - } - // mov reg64, imm64 - else if (op == nrmMOV) - { - emit->Write8(0xB8 + (offsetOrBaseReg & 7)); - emit->Write64((u64)operand.offset); - return; - } - ASSERT_MSG(0, "WriteNormalOp - Only MOV can take 64-bit imm"); - } - else - { - ASSERT_MSG(0, "WriteNormalOp - Unhandled case"); - } - _operandReg = (X64Reg)normalops[op].ext; //pass extension in REG of ModRM - } - else - { - _operandReg = (X64Reg)operand.offsetOrBaseReg; - WriteRex(emit, bits, bits, _operandReg); - // op r/m, reg - if (toRM) - { - emit->Write8(bits == 8 ? normalops[op].toRm8 : normalops[op].toRm32); - } - // op reg, r/m - else - { - emit->Write8(bits == 8 ? normalops[op].fromRm8 : normalops[op].fromRm32); - } - } - WriteRest(emit, immToWrite >> 3, _operandReg); - switch (immToWrite) - { - case 0: - break; - case 8: - emit->Write8((u8)operand.offset); - break; - case 16: - emit->Write16((u16)operand.offset); - break; - case 32: - emit->Write32((u32)operand.offset); - break; - default: - ASSERT_MSG(0, "WriteNormalOp - Unhandled case"); - } -} - -void XEmitter::WriteNormalOp(XEmitter *emit, int bits, NormalOp op, const OpArg& a1, const OpArg& a2) -{ - if (a1.IsImm()) - { - //Booh! Can't write to an imm - ASSERT_MSG(0, "WriteNormalOp - a1 cannot be imm"); - return; - } - if (a2.IsImm()) - { - a1.WriteNormalOp(emit, true, op, a2, bits); - } - else - { - if (a1.IsSimpleReg()) - { - a2.WriteNormalOp(emit, false, op, a1, bits); - } - else - { - ASSERT_MSG(a2.IsSimpleReg() || a2.IsImm(), "WriteNormalOp - a1 and a2 cannot both be memory"); - a1.WriteNormalOp(emit, true, op, a2, bits); - } - } -} - -void XEmitter::ADD (int bits, const OpArg& a1, const OpArg& a2) {CheckFlags(); WriteNormalOp(this, bits, nrmADD, a1, a2);} -void XEmitter::ADC (int bits, const OpArg& a1, const OpArg& a2) {CheckFlags(); WriteNormalOp(this, bits, nrmADC, a1, a2);} -void XEmitter::SUB (int bits, const OpArg& a1, const OpArg& a2) {CheckFlags(); WriteNormalOp(this, bits, nrmSUB, a1, a2);} -void XEmitter::SBB (int bits, const OpArg& a1, const OpArg& a2) {CheckFlags(); WriteNormalOp(this, bits, nrmSBB, a1, a2);} -void XEmitter::AND (int bits, const OpArg& a1, const OpArg& a2) {CheckFlags(); WriteNormalOp(this, bits, nrmAND, a1, a2);} -void XEmitter::OR (int bits, const OpArg& a1, const OpArg& a2) {CheckFlags(); WriteNormalOp(this, bits, nrmOR , a1, a2);} -void XEmitter::XOR (int bits, const OpArg& a1, const OpArg& a2) {CheckFlags(); WriteNormalOp(this, bits, nrmXOR, a1, a2);} -void XEmitter::MOV (int bits, const OpArg& a1, const OpArg& a2) -{ - if (a1.IsSimpleReg() && a2.IsSimpleReg() && a1.GetSimpleReg() == a2.GetSimpleReg()) - ASSERT_MSG(false, "Redundant MOV @ %p - bug in JIT?", code); - WriteNormalOp(this, bits, nrmMOV, a1, a2); -} -void XEmitter::TEST(int bits, const OpArg& a1, const OpArg& a2) {CheckFlags(); WriteNormalOp(this, bits, nrmTEST, a1, a2);} -void XEmitter::CMP (int bits, const OpArg& a1, const OpArg& a2) {CheckFlags(); WriteNormalOp(this, bits, nrmCMP, a1, a2);} -void XEmitter::XCHG(int bits, const OpArg& a1, const OpArg& a2) {WriteNormalOp(this, bits, nrmXCHG, a1, a2);} - -void XEmitter::IMUL(int bits, X64Reg regOp, const OpArg& a1, const OpArg& a2) -{ - CheckFlags(); - if (bits == 8) - { - ASSERT_MSG(0, "IMUL - illegal bit size!"); - return; - } - - if (a1.IsImm()) - { - ASSERT_MSG(0, "IMUL - second arg cannot be imm!"); - return; - } - - if (!a2.IsImm()) - { - ASSERT_MSG(0, "IMUL - third arg must be imm!"); - return; - } - - if (bits == 16) - Write8(0x66); - a1.WriteRex(this, bits, bits, regOp); - - if (a2.GetImmBits() == 8 || - (a2.GetImmBits() == 16 && (s8)a2.offset == (s16)a2.offset) || - (a2.GetImmBits() == 32 && (s8)a2.offset == (s32)a2.offset)) - { - Write8(0x6B); - a1.WriteRest(this, 1, regOp); - Write8((u8)a2.offset); - } - else - { - Write8(0x69); - if (a2.GetImmBits() == 16 && bits == 16) - { - a1.WriteRest(this, 2, regOp); - Write16((u16)a2.offset); - } - else if (a2.GetImmBits() == 32 && (bits == 32 || bits == 64)) - { - a1.WriteRest(this, 4, regOp); - Write32((u32)a2.offset); - } - else - { - ASSERT_MSG(0, "IMUL - unhandled case!"); - } - } -} - -void XEmitter::IMUL(int bits, X64Reg regOp, const OpArg& a) -{ - CheckFlags(); - if (bits == 8) - { - ASSERT_MSG(0, "IMUL - illegal bit size!"); - return; - } - - if (a.IsImm()) - { - IMUL(bits, regOp, R(regOp), a) ; - return; - } - - if (bits == 16) - Write8(0x66); - a.WriteRex(this, bits, bits, regOp); - Write8(0x0F); - Write8(0xAF); - a.WriteRest(this, 0, regOp); -} - - -void XEmitter::WriteSSEOp(u8 opPrefix, u16 op, X64Reg regOp, OpArg arg, int extrabytes) -{ - WriteSSEOp_opBits(32, opPrefix, op, regOp, arg, extrabytes); -} - -void XEmitter::WriteSSEOp_opBits(int opBits, u8 opPrefix, u16 op, X64Reg regOp, OpArg arg, int extrabytes) -{ - ASSERT(opBits == 32 || opBits == 64); - if (opPrefix) - Write8(opPrefix); - arg.operandReg = regOp; - arg.WriteRex(this, opBits, 0); - Write8(0x0F); - if (op > 0xFF) - Write8((op >> 8) & 0xFF); - Write8(op & 0xFF); - arg.WriteRest(this, extrabytes); -} - -void XEmitter::WriteAVXOp(u8 opPrefix, u16 op, X64Reg regOp, const OpArg& arg, int extrabytes) -{ - WriteAVXOp(opPrefix, op, regOp, INVALID_REG, arg, extrabytes); -} - -static int GetVEXmmmmm(u16 op) -{ - // Currently, only 0x38 and 0x3A are used as secondary escape byte. - if ((op >> 8) == 0x3A) - return 3; - if ((op >> 8) == 0x38) - return 2; - - return 1; -} - -static int GetVEXpp(u8 opPrefix) -{ - if (opPrefix == 0x66) - return 1; - if (opPrefix == 0xF3) - return 2; - if (opPrefix == 0xF2) - return 3; - - return 0; -} - -void XEmitter::WriteAVXOp(u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int extrabytes) -{ - if (!Common::GetCPUCaps().avx) - ASSERT_MSG(0, "Trying to use AVX on a system that doesn't support it. Bad programmer."); - int mmmmm = GetVEXmmmmm(op); - int pp = GetVEXpp(opPrefix); - // FIXME: we currently don't support 256-bit instructions, and "size" is not the vector size here - arg.WriteVex(this, regOp1, regOp2, 0, pp, mmmmm); - Write8(op & 0xFF); - arg.WriteRest(this, extrabytes, regOp1); -} - -// Like the above, but more general; covers GPR-based VEX operations, like BMI1/2 -void XEmitter::WriteVEXOp(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int extrabytes) -{ - if (size != 32 && size != 64) - ASSERT_MSG(0, "VEX GPR instructions only support 32-bit and 64-bit modes!"); - int mmmmm = GetVEXmmmmm(op); - int pp = GetVEXpp(opPrefix); - arg.WriteVex(this, regOp1, regOp2, 0, pp, mmmmm, size == 64); - Write8(op & 0xFF); - arg.WriteRest(this, extrabytes, regOp1); -} - -void XEmitter::WriteBMI1Op(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int extrabytes) -{ - CheckFlags(); - if (!Common::GetCPUCaps().bmi1) - ASSERT_MSG(0, "Trying to use BMI1 on a system that doesn't support it. Bad programmer."); - WriteVEXOp(size, opPrefix, op, regOp1, regOp2, arg, extrabytes); -} - -void XEmitter::WriteBMI2Op(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int extrabytes) -{ - CheckFlags(); - if (!Common::GetCPUCaps().bmi2) - ASSERT_MSG(0, "Trying to use BMI2 on a system that doesn't support it. Bad programmer."); - WriteVEXOp(size, opPrefix, op, regOp1, regOp2, arg, extrabytes); -} - -void XEmitter::MOVD_xmm(X64Reg dest, const OpArg &arg) {WriteSSEOp(0x66, 0x6E, dest, arg, 0);} -void XEmitter::MOVD_xmm(const OpArg &arg, X64Reg src) {WriteSSEOp(0x66, 0x7E, src, arg, 0);} - -void XEmitter::MOVQ_xmm(X64Reg dest, OpArg arg) -{ -#ifdef ARCHITECTURE_x86_64 - // Alternate encoding - // This does not display correctly in MSVC's debugger, it thinks it's a MOVD - arg.operandReg = dest; - Write8(0x66); - arg.WriteRex(this, 64, 0); - Write8(0x0f); - Write8(0x6E); - arg.WriteRest(this, 0); -#else - arg.operandReg = dest; - Write8(0xF3); - Write8(0x0f); - Write8(0x7E); - arg.WriteRest(this, 0); -#endif -} - -void XEmitter::MOVQ_xmm(OpArg arg, X64Reg src) -{ - if (src > 7 || arg.IsSimpleReg()) - { - // Alternate encoding - // This does not display correctly in MSVC's debugger, it thinks it's a MOVD - arg.operandReg = src; - Write8(0x66); - arg.WriteRex(this, 64, 0); - Write8(0x0f); - Write8(0x7E); - arg.WriteRest(this, 0); - } - else - { - arg.operandReg = src; - arg.WriteRex(this, 0, 0); - Write8(0x66); - Write8(0x0f); - Write8(0xD6); - arg.WriteRest(this, 0); - } -} - -void XEmitter::WriteMXCSR(OpArg arg, int ext) -{ - if (arg.IsImm() || arg.IsSimpleReg()) - ASSERT_MSG(0, "MXCSR - invalid operand"); - - arg.operandReg = ext; - arg.WriteRex(this, 0, 0); - Write8(0x0F); - Write8(0xAE); - arg.WriteRest(this); -} - -void XEmitter::STMXCSR(const OpArg& memloc) {WriteMXCSR(memloc, 3);} -void XEmitter::LDMXCSR(const OpArg& memloc) {WriteMXCSR(memloc, 2);} - -void XEmitter::MOVNTDQ(const OpArg& arg, X64Reg regOp) {WriteSSEOp(0x66, sseMOVNTDQ, regOp, arg);} -void XEmitter::MOVNTPS(const OpArg& arg, X64Reg regOp) {WriteSSEOp(0x00, sseMOVNTP, regOp, arg);} -void XEmitter::MOVNTPD(const OpArg& arg, X64Reg regOp) {WriteSSEOp(0x66, sseMOVNTP, regOp, arg);} - -void XEmitter::ADDSS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, sseADD, regOp, arg);} -void XEmitter::ADDSD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, sseADD, regOp, arg);} -void XEmitter::SUBSS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, sseSUB, regOp, arg);} -void XEmitter::SUBSD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, sseSUB, regOp, arg);} -void XEmitter::CMPSS(X64Reg regOp, const OpArg& arg, u8 compare) {WriteSSEOp(0xF3, sseCMP, regOp, arg, 1); Write8(compare);} -void XEmitter::CMPSD(X64Reg regOp, const OpArg& arg, u8 compare) {WriteSSEOp(0xF2, sseCMP, regOp, arg, 1); Write8(compare);} -void XEmitter::MULSS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, sseMUL, regOp, arg);} -void XEmitter::MULSD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, sseMUL, regOp, arg);} -void XEmitter::DIVSS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, sseDIV, regOp, arg);} -void XEmitter::DIVSD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, sseDIV, regOp, arg);} -void XEmitter::MINSS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, sseMIN, regOp, arg);} -void XEmitter::MINSD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, sseMIN, regOp, arg);} -void XEmitter::MAXSS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, sseMAX, regOp, arg);} -void XEmitter::MAXSD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, sseMAX, regOp, arg);} -void XEmitter::SQRTSS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, sseSQRT, regOp, arg);} -void XEmitter::SQRTSD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, sseSQRT, regOp, arg);} -void XEmitter::RCPSS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, sseRCP, regOp, arg);} -void XEmitter::RSQRTSS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, sseRSQRT, regOp, arg);} - -void XEmitter::ADDPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseADD, regOp, arg);} -void XEmitter::ADDPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseADD, regOp, arg);} -void XEmitter::SUBPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseSUB, regOp, arg);} -void XEmitter::SUBPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseSUB, regOp, arg);} -void XEmitter::CMPPS(X64Reg regOp, const OpArg& arg, u8 compare) {WriteSSEOp(0x00, sseCMP, regOp, arg, 1); Write8(compare);} -void XEmitter::CMPPD(X64Reg regOp, const OpArg& arg, u8 compare) {WriteSSEOp(0x66, sseCMP, regOp, arg, 1); Write8(compare);} -void XEmitter::ANDPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseAND, regOp, arg);} -void XEmitter::ANDPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseAND, regOp, arg);} -void XEmitter::ANDNPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseANDN, regOp, arg);} -void XEmitter::ANDNPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseANDN, regOp, arg);} -void XEmitter::ORPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseOR, regOp, arg);} -void XEmitter::ORPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseOR, regOp, arg);} -void XEmitter::XORPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseXOR, regOp, arg);} -void XEmitter::XORPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseXOR, regOp, arg);} -void XEmitter::MULPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseMUL, regOp, arg);} -void XEmitter::MULPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseMUL, regOp, arg);} -void XEmitter::DIVPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseDIV, regOp, arg);} -void XEmitter::DIVPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseDIV, regOp, arg);} -void XEmitter::MINPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseMIN, regOp, arg);} -void XEmitter::MINPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseMIN, regOp, arg);} -void XEmitter::MAXPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseMAX, regOp, arg);} -void XEmitter::MAXPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseMAX, regOp, arg);} -void XEmitter::SQRTPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseSQRT, regOp, arg);} -void XEmitter::SQRTPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseSQRT, regOp, arg);} -void XEmitter::RCPPS(X64Reg regOp, const OpArg& arg) { WriteSSEOp(0x00, sseRCP, regOp, arg); } -void XEmitter::RSQRTPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseRSQRT, regOp, arg);} -void XEmitter::SHUFPS(X64Reg regOp, const OpArg& arg, u8 shuffle) {WriteSSEOp(0x00, sseSHUF, regOp, arg,1); Write8(shuffle);} -void XEmitter::SHUFPD(X64Reg regOp, const OpArg& arg, u8 shuffle) {WriteSSEOp(0x66, sseSHUF, regOp, arg,1); Write8(shuffle);} - -void XEmitter::HADDPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, sseHADD, regOp, arg);} - -void XEmitter::COMISS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseCOMIS, regOp, arg);} //weird that these should be packed -void XEmitter::COMISD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseCOMIS, regOp, arg);} //ordered -void XEmitter::UCOMISS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseUCOMIS, regOp, arg);} //unordered -void XEmitter::UCOMISD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseUCOMIS, regOp, arg);} - -void XEmitter::MOVAPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseMOVAPfromRM, regOp, arg);} -void XEmitter::MOVAPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseMOVAPfromRM, regOp, arg);} -void XEmitter::MOVAPS(const OpArg& arg, X64Reg regOp) {WriteSSEOp(0x00, sseMOVAPtoRM, regOp, arg);} -void XEmitter::MOVAPD(const OpArg& arg, X64Reg regOp) {WriteSSEOp(0x66, sseMOVAPtoRM, regOp, arg);} - -void XEmitter::MOVUPS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, sseMOVUPfromRM, regOp, arg);} -void XEmitter::MOVUPD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseMOVUPfromRM, regOp, arg);} -void XEmitter::MOVUPS(const OpArg& arg, X64Reg regOp) {WriteSSEOp(0x00, sseMOVUPtoRM, regOp, arg);} -void XEmitter::MOVUPD(const OpArg& arg, X64Reg regOp) {WriteSSEOp(0x66, sseMOVUPtoRM, regOp, arg);} - -void XEmitter::MOVDQA(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, sseMOVDQfromRM, regOp, arg);} -void XEmitter::MOVDQA(const OpArg& arg, X64Reg regOp) {WriteSSEOp(0x66, sseMOVDQtoRM, regOp, arg);} -void XEmitter::MOVDQU(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, sseMOVDQfromRM, regOp, arg);} -void XEmitter::MOVDQU(const OpArg& arg, X64Reg regOp) {WriteSSEOp(0xF3, sseMOVDQtoRM, regOp, arg);} - -void XEmitter::MOVSS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, sseMOVUPfromRM, regOp, arg);} -void XEmitter::MOVSD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, sseMOVUPfromRM, regOp, arg);} -void XEmitter::MOVSS(const OpArg& arg, X64Reg regOp) {WriteSSEOp(0xF3, sseMOVUPtoRM, regOp, arg);} -void XEmitter::MOVSD(const OpArg& arg, X64Reg regOp) {WriteSSEOp(0xF2, sseMOVUPtoRM, regOp, arg);} - -void XEmitter::MOVLPS(X64Reg regOp, const OpArg& arg) { WriteSSEOp(0x00, sseMOVLPfromRM, regOp, arg); } -void XEmitter::MOVLPD(X64Reg regOp, const OpArg& arg) { WriteSSEOp(0x66, sseMOVLPfromRM, regOp, arg); } -void XEmitter::MOVLPS(const OpArg& arg, X64Reg regOp) { WriteSSEOp(0x00, sseMOVLPtoRM, regOp, arg); } -void XEmitter::MOVLPD(const OpArg& arg, X64Reg regOp) { WriteSSEOp(0x66, sseMOVLPtoRM, regOp, arg); } - -void XEmitter::MOVHPS(X64Reg regOp, const OpArg& arg) { WriteSSEOp(0x00, sseMOVHPfromRM, regOp, arg); } -void XEmitter::MOVHPD(X64Reg regOp, const OpArg& arg) { WriteSSEOp(0x66, sseMOVHPfromRM, regOp, arg); } -void XEmitter::MOVHPS(const OpArg& arg, X64Reg regOp) { WriteSSEOp(0x00, sseMOVHPtoRM, regOp, arg); } -void XEmitter::MOVHPD(const OpArg& arg, X64Reg regOp) { WriteSSEOp(0x66, sseMOVHPtoRM, regOp, arg); } - -void XEmitter::MOVHLPS(X64Reg regOp1, X64Reg regOp2) {WriteSSEOp(0x00, sseMOVHLPS, regOp1, R(regOp2));} -void XEmitter::MOVLHPS(X64Reg regOp1, X64Reg regOp2) {WriteSSEOp(0x00, sseMOVLHPS, regOp1, R(regOp2));} - -void XEmitter::CVTPS2PD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, 0x5A, regOp, arg);} -void XEmitter::CVTPD2PS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, 0x5A, regOp, arg);} - -void XEmitter::CVTSD2SS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, 0x5A, regOp, arg);} -void XEmitter::CVTSS2SD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, 0x5A, regOp, arg);} -void XEmitter::CVTSD2SI(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, 0x2D, regOp, arg);} -void XEmitter::CVTSS2SI(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, 0x2D, regOp, arg);} -void XEmitter::CVTSI2SD(int opBits, X64Reg regOp, const OpArg& arg) {WriteSSEOp_opBits(opBits, 0xF2, 0x2A, regOp, arg);} -void XEmitter::CVTSI2SS(int opBits, X64Reg regOp, const OpArg& arg) {WriteSSEOp_opBits(opBits, 0xF3, 0x2A, regOp, arg);} - -void XEmitter::CVTDQ2PD(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, 0xE6, regOp, arg);} -void XEmitter::CVTDQ2PS(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x00, 0x5B, regOp, arg);} -void XEmitter::CVTPD2DQ(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, 0xE6, regOp, arg);} -void XEmitter::CVTPS2DQ(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, 0x5B, regOp, arg);} - -void XEmitter::CVTTSD2SI(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF2, 0x2C, regOp, arg);} -void XEmitter::CVTTSS2SI(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, 0x2C, regOp, arg);} -void XEmitter::CVTTPS2DQ(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0xF3, 0x5B, regOp, arg);} -void XEmitter::CVTTPD2DQ(X64Reg regOp, const OpArg& arg) {WriteSSEOp(0x66, 0xE6, regOp, arg);} - -void XEmitter::MASKMOVDQU(X64Reg dest, X64Reg src) {WriteSSEOp(0x66, sseMASKMOVDQU, dest, R(src));} - -void XEmitter::MOVMSKPS(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x00, 0x50, dest, arg);} -void XEmitter::MOVMSKPD(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x50, dest, arg);} - -void XEmitter::LDDQU(X64Reg dest, const OpArg& arg) {WriteSSEOp(0xF2, sseLDDQU, dest, arg);} // For integer data only - -// THESE TWO ARE UNTESTED. -void XEmitter::UNPCKLPS(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x00, 0x14, dest, arg);} -void XEmitter::UNPCKHPS(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x00, 0x15, dest, arg);} - -void XEmitter::UNPCKLPD(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x14, dest, arg);} -void XEmitter::UNPCKHPD(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x15, dest, arg);} - -void XEmitter::MOVDDUP(X64Reg regOp, const OpArg& arg) -{ - if (Common::GetCPUCaps().sse3) - { - WriteSSEOp(0xF2, 0x12, regOp, arg); //SSE3 movddup - } - else - { - // Simulate this instruction with SSE2 instructions - if (!arg.IsSimpleReg(regOp)) - MOVSD(regOp, arg); - UNPCKLPD(regOp, R(regOp)); - } -} - -//There are a few more left - -// Also some integer instructions are missing -void XEmitter::PACKSSDW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x6B, dest, arg);} -void XEmitter::PACKSSWB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x63, dest, arg);} -void XEmitter::PACKUSWB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x67, dest, arg);} - -void XEmitter::PUNPCKLBW(X64Reg dest, const OpArg &arg) {WriteSSEOp(0x66, 0x60, dest, arg);} -void XEmitter::PUNPCKLWD(X64Reg dest, const OpArg &arg) {WriteSSEOp(0x66, 0x61, dest, arg);} -void XEmitter::PUNPCKLDQ(X64Reg dest, const OpArg &arg) {WriteSSEOp(0x66, 0x62, dest, arg);} -void XEmitter::PUNPCKLQDQ(X64Reg dest, const OpArg &arg) {WriteSSEOp(0x66, 0x6C, dest, arg);} - -void XEmitter::PSRLW(X64Reg reg, int shift) -{ - WriteSSEOp(0x66, 0x71, (X64Reg)2, R(reg)); - Write8(shift); -} - -void XEmitter::PSRLD(X64Reg reg, int shift) -{ - WriteSSEOp(0x66, 0x72, (X64Reg)2, R(reg)); - Write8(shift); -} - -void XEmitter::PSRLQ(X64Reg reg, int shift) -{ - WriteSSEOp(0x66, 0x73, (X64Reg)2, R(reg)); - Write8(shift); -} - -void XEmitter::PSRLQ(X64Reg reg, const OpArg& arg) -{ - WriteSSEOp(0x66, 0xd3, reg, arg); -} - -void XEmitter::PSRLDQ(X64Reg reg, int shift) { - WriteSSEOp(0x66, 0x73, (X64Reg)3, R(reg)); - Write8(shift); -} - -void XEmitter::PSLLW(X64Reg reg, int shift) -{ - WriteSSEOp(0x66, 0x71, (X64Reg)6, R(reg)); - Write8(shift); -} - -void XEmitter::PSLLD(X64Reg reg, int shift) -{ - WriteSSEOp(0x66, 0x72, (X64Reg)6, R(reg)); - Write8(shift); -} - -void XEmitter::PSLLQ(X64Reg reg, int shift) -{ - WriteSSEOp(0x66, 0x73, (X64Reg)6, R(reg)); - Write8(shift); -} - -void XEmitter::PSLLDQ(X64Reg reg, int shift) { - WriteSSEOp(0x66, 0x73, (X64Reg)7, R(reg)); - Write8(shift); -} - -void XEmitter::PSRAW(X64Reg reg, int shift) -{ - WriteSSEOp(0x66, 0x71, (X64Reg)4, R(reg)); - Write8(shift); -} - -void XEmitter::PSRAD(X64Reg reg, int shift) -{ - WriteSSEOp(0x66, 0x72, (X64Reg)4, R(reg)); - Write8(shift); -} - -void XEmitter::WriteSSSE3Op(u8 opPrefix, u16 op, X64Reg regOp, const OpArg& arg, int extrabytes) -{ - if (!Common::GetCPUCaps().ssse3) - ASSERT_MSG(0, "Trying to use SSSE3 on a system that doesn't support it. Bad programmer."); - WriteSSEOp(opPrefix, op, regOp, arg, extrabytes); -} - -void XEmitter::WriteSSE41Op(u8 opPrefix, u16 op, X64Reg regOp, const OpArg& arg, int extrabytes) -{ - if (!Common::GetCPUCaps().sse4_1) - ASSERT_MSG(0, "Trying to use SSE4.1 on a system that doesn't support it. Bad programmer."); - WriteSSEOp(opPrefix, op, regOp, arg, extrabytes); -} - -void XEmitter::PSHUFB(X64Reg dest, const OpArg& arg) {WriteSSSE3Op(0x66, 0x3800, dest, arg);} -void XEmitter::PTEST(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3817, dest, arg);} -void XEmitter::PACKUSDW(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x382b, dest, arg);} -void XEmitter::DPPS(X64Reg dest, const OpArg& arg, u8 mask) {WriteSSE41Op(0x66, 0x3A40, dest, arg, 1); Write8(mask);} - -void XEmitter::PMINSB(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3838, dest, arg);} -void XEmitter::PMINSD(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3839, dest, arg);} -void XEmitter::PMINUW(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x383a, dest, arg);} -void XEmitter::PMINUD(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x383b, dest, arg);} -void XEmitter::PMAXSB(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x383c, dest, arg);} -void XEmitter::PMAXSD(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x383d, dest, arg);} -void XEmitter::PMAXUW(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x383e, dest, arg);} -void XEmitter::PMAXUD(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x383f, dest, arg);} - -void XEmitter::PMOVSXBW(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3820, dest, arg);} -void XEmitter::PMOVSXBD(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3821, dest, arg);} -void XEmitter::PMOVSXBQ(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3822, dest, arg);} -void XEmitter::PMOVSXWD(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3823, dest, arg);} -void XEmitter::PMOVSXWQ(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3824, dest, arg);} -void XEmitter::PMOVSXDQ(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3825, dest, arg);} -void XEmitter::PMOVZXBW(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3830, dest, arg);} -void XEmitter::PMOVZXBD(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3831, dest, arg);} -void XEmitter::PMOVZXBQ(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3832, dest, arg);} -void XEmitter::PMOVZXWD(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3833, dest, arg);} -void XEmitter::PMOVZXWQ(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3834, dest, arg);} -void XEmitter::PMOVZXDQ(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3835, dest, arg);} - -void XEmitter::PBLENDVB(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3810, dest, arg);} -void XEmitter::BLENDVPS(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3814, dest, arg);} -void XEmitter::BLENDVPD(X64Reg dest, const OpArg& arg) {WriteSSE41Op(0x66, 0x3815, dest, arg);} -void XEmitter::BLENDPS(X64Reg dest, const OpArg& arg, u8 blend) { WriteSSE41Op(0x66, 0x3A0C, dest, arg, 1); Write8(blend); } -void XEmitter::BLENDPD(X64Reg dest, const OpArg& arg, u8 blend) { WriteSSE41Op(0x66, 0x3A0D, dest, arg, 1); Write8(blend); } - -void XEmitter::ROUNDSS(X64Reg dest, const OpArg& arg, u8 mode) {WriteSSE41Op(0x66, 0x3A0A, dest, arg, 1); Write8(mode);} -void XEmitter::ROUNDSD(X64Reg dest, const OpArg& arg, u8 mode) {WriteSSE41Op(0x66, 0x3A0B, dest, arg, 1); Write8(mode);} -void XEmitter::ROUNDPS(X64Reg dest, const OpArg& arg, u8 mode) {WriteSSE41Op(0x66, 0x3A08, dest, arg, 1); Write8(mode);} -void XEmitter::ROUNDPD(X64Reg dest, const OpArg& arg, u8 mode) {WriteSSE41Op(0x66, 0x3A09, dest, arg, 1); Write8(mode);} - -void XEmitter::PAND(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xDB, dest, arg);} -void XEmitter::PANDN(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xDF, dest, arg);} -void XEmitter::PXOR(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xEF, dest, arg);} -void XEmitter::POR(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xEB, dest, arg);} - -void XEmitter::PADDB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xFC, dest, arg);} -void XEmitter::PADDW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xFD, dest, arg);} -void XEmitter::PADDD(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xFE, dest, arg);} -void XEmitter::PADDQ(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xD4, dest, arg);} - -void XEmitter::PADDSB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xEC, dest, arg);} -void XEmitter::PADDSW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xED, dest, arg);} -void XEmitter::PADDUSB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xDC, dest, arg);} -void XEmitter::PADDUSW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xDD, dest, arg);} - -void XEmitter::PSUBB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xF8, dest, arg);} -void XEmitter::PSUBW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xF9, dest, arg);} -void XEmitter::PSUBD(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xFA, dest, arg);} -void XEmitter::PSUBQ(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xFB, dest, arg);} - -void XEmitter::PSUBSB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xE8, dest, arg);} -void XEmitter::PSUBSW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xE9, dest, arg);} -void XEmitter::PSUBUSB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xD8, dest, arg);} -void XEmitter::PSUBUSW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xD9, dest, arg);} - -void XEmitter::PAVGB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xE0, dest, arg);} -void XEmitter::PAVGW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xE3, dest, arg);} - -void XEmitter::PCMPEQB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x74, dest, arg);} -void XEmitter::PCMPEQW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x75, dest, arg);} -void XEmitter::PCMPEQD(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x76, dest, arg);} - -void XEmitter::PCMPGTB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x64, dest, arg);} -void XEmitter::PCMPGTW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x65, dest, arg);} -void XEmitter::PCMPGTD(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0x66, dest, arg);} - -void XEmitter::PEXTRW(X64Reg dest, const OpArg& arg, u8 subreg) {WriteSSEOp(0x66, 0xC5, dest, arg, 1); Write8(subreg);} -void XEmitter::PINSRW(X64Reg dest, const OpArg& arg, u8 subreg) {WriteSSEOp(0x66, 0xC4, dest, arg, 1); Write8(subreg);} - -void XEmitter::PMADDWD(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xF5, dest, arg); } -void XEmitter::PSADBW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xF6, dest, arg);} - -void XEmitter::PMAXSW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xEE, dest, arg); } -void XEmitter::PMAXUB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xDE, dest, arg); } -void XEmitter::PMINSW(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xEA, dest, arg); } -void XEmitter::PMINUB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xDA, dest, arg); } - -void XEmitter::PMOVMSKB(X64Reg dest, const OpArg& arg) {WriteSSEOp(0x66, 0xD7, dest, arg); } -void XEmitter::PSHUFD(X64Reg regOp, const OpArg& arg, u8 shuffle) {WriteSSEOp(0x66, 0x70, regOp, arg, 1); Write8(shuffle);} -void XEmitter::PSHUFLW(X64Reg regOp, const OpArg& arg, u8 shuffle) {WriteSSEOp(0xF2, 0x70, regOp, arg, 1); Write8(shuffle);} -void XEmitter::PSHUFHW(X64Reg regOp, const OpArg& arg, u8 shuffle) {WriteSSEOp(0xF3, 0x70, regOp, arg, 1); Write8(shuffle);} - -// VEX -void XEmitter::VADDSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteAVXOp(0xF2, sseADD, regOp1, regOp2, arg);} -void XEmitter::VSUBSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteAVXOp(0xF2, sseSUB, regOp1, regOp2, arg);} -void XEmitter::VMULSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteAVXOp(0xF2, sseMUL, regOp1, regOp2, arg);} -void XEmitter::VDIVSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteAVXOp(0xF2, sseDIV, regOp1, regOp2, arg);} -void XEmitter::VADDPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteAVXOp(0x66, sseADD, regOp1, regOp2, arg);} -void XEmitter::VSUBPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteAVXOp(0x66, sseSUB, regOp1, regOp2, arg);} -void XEmitter::VMULPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteAVXOp(0x66, sseMUL, regOp1, regOp2, arg);} -void XEmitter::VDIVPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteAVXOp(0x66, sseDIV, regOp1, regOp2, arg);} -void XEmitter::VSQRTSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteAVXOp(0xF2, sseSQRT, regOp1, regOp2, arg);} -void XEmitter::VSHUFPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg, u8 shuffle) {WriteAVXOp(0x66, sseSHUF, regOp1, regOp2, arg, 1); Write8(shuffle);} -void XEmitter::VUNPCKLPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg){WriteAVXOp(0x66, 0x14, regOp1, regOp2, arg);} -void XEmitter::VUNPCKHPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg){WriteAVXOp(0x66, 0x15, regOp1, regOp2, arg);} - -void XEmitter::VANDPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x00, sseAND, regOp1, regOp2, arg); } -void XEmitter::VANDPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, sseAND, regOp1, regOp2, arg); } -void XEmitter::VANDNPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x00, sseANDN, regOp1, regOp2, arg); } -void XEmitter::VANDNPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, sseANDN, regOp1, regOp2, arg); } -void XEmitter::VORPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x00, sseOR, regOp1, regOp2, arg); } -void XEmitter::VORPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, sseOR, regOp1, regOp2, arg); } -void XEmitter::VXORPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x00, sseXOR, regOp1, regOp2, arg); } -void XEmitter::VXORPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, sseXOR, regOp1, regOp2, arg); } - -void XEmitter::VPAND(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0xDB, regOp1, regOp2, arg); } -void XEmitter::VPANDN(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0xDF, regOp1, regOp2, arg); } -void XEmitter::VPOR(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0xEB, regOp1, regOp2, arg); } -void XEmitter::VPXOR(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0xEF, regOp1, regOp2, arg); } - -void XEmitter::VFMADD132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x3898, regOp1, regOp2, arg); } -void XEmitter::VFMADD213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38A8, regOp1, regOp2, arg); } -void XEmitter::VFMADD231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38B8, regOp1, regOp2, arg); } -void XEmitter::VFMADD132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x3898, regOp1, regOp2, arg, 1); } -void XEmitter::VFMADD213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38A8, regOp1, regOp2, arg, 1); } -void XEmitter::VFMADD231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38B8, regOp1, regOp2, arg, 1); } -void XEmitter::VFMADD132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x3899, regOp1, regOp2, arg); } -void XEmitter::VFMADD213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38A9, regOp1, regOp2, arg); } -void XEmitter::VFMADD231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38B9, regOp1, regOp2, arg); } -void XEmitter::VFMADD132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x3899, regOp1, regOp2, arg, 1); } -void XEmitter::VFMADD213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38A9, regOp1, regOp2, arg, 1); } -void XEmitter::VFMADD231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38B9, regOp1, regOp2, arg, 1); } -void XEmitter::VFMSUB132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389A, regOp1, regOp2, arg); } -void XEmitter::VFMSUB213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AA, regOp1, regOp2, arg); } -void XEmitter::VFMSUB231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BA, regOp1, regOp2, arg); } -void XEmitter::VFMSUB132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389A, regOp1, regOp2, arg, 1); } -void XEmitter::VFMSUB213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AA, regOp1, regOp2, arg, 1); } -void XEmitter::VFMSUB231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BA, regOp1, regOp2, arg, 1); } -void XEmitter::VFMSUB132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389B, regOp1, regOp2, arg); } -void XEmitter::VFMSUB213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AB, regOp1, regOp2, arg); } -void XEmitter::VFMSUB231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BB, regOp1, regOp2, arg); } -void XEmitter::VFMSUB132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389B, regOp1, regOp2, arg, 1); } -void XEmitter::VFMSUB213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AB, regOp1, regOp2, arg, 1); } -void XEmitter::VFMSUB231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BB, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMADD132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389C, regOp1, regOp2, arg); } -void XEmitter::VFNMADD213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AC, regOp1, regOp2, arg); } -void XEmitter::VFNMADD231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BC, regOp1, regOp2, arg); } -void XEmitter::VFNMADD132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389C, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMADD213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AC, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMADD231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BC, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMADD132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389D, regOp1, regOp2, arg); } -void XEmitter::VFNMADD213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AD, regOp1, regOp2, arg); } -void XEmitter::VFNMADD231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BD, regOp1, regOp2, arg); } -void XEmitter::VFNMADD132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389D, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMADD213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AD, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMADD231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BD, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMSUB132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389E, regOp1, regOp2, arg); } -void XEmitter::VFNMSUB213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AE, regOp1, regOp2, arg); } -void XEmitter::VFNMSUB231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BE, regOp1, regOp2, arg); } -void XEmitter::VFNMSUB132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389E, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMSUB213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AE, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMSUB231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BE, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMSUB132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389F, regOp1, regOp2, arg); } -void XEmitter::VFNMSUB213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AF, regOp1, regOp2, arg); } -void XEmitter::VFNMSUB231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BF, regOp1, regOp2, arg); } -void XEmitter::VFNMSUB132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x389F, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMSUB213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38AF, regOp1, regOp2, arg, 1); } -void XEmitter::VFNMSUB231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38BF, regOp1, regOp2, arg, 1); } -void XEmitter::VFMADDSUB132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x3896, regOp1, regOp2, arg); } -void XEmitter::VFMADDSUB213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38A6, regOp1, regOp2, arg); } -void XEmitter::VFMADDSUB231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38B6, regOp1, regOp2, arg); } -void XEmitter::VFMADDSUB132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x3896, regOp1, regOp2, arg, 1); } -void XEmitter::VFMADDSUB213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38A6, regOp1, regOp2, arg, 1); } -void XEmitter::VFMADDSUB231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38B6, regOp1, regOp2, arg, 1); } -void XEmitter::VFMSUBADD132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x3897, regOp1, regOp2, arg); } -void XEmitter::VFMSUBADD213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38A7, regOp1, regOp2, arg); } -void XEmitter::VFMSUBADD231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38B7, regOp1, regOp2, arg); } -void XEmitter::VFMSUBADD132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x3897, regOp1, regOp2, arg, 1); } -void XEmitter::VFMSUBADD213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38A7, regOp1, regOp2, arg, 1); } -void XEmitter::VFMSUBADD231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg) { WriteAVXOp(0x66, 0x38B7, regOp1, regOp2, arg, 1); } - -void XEmitter::SARX(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2) {WriteBMI2Op(bits, 0xF3, 0x38F7, regOp1, regOp2, arg);} -void XEmitter::SHLX(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2) {WriteBMI2Op(bits, 0x66, 0x38F7, regOp1, regOp2, arg);} -void XEmitter::SHRX(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2) {WriteBMI2Op(bits, 0xF2, 0x38F7, regOp1, regOp2, arg);} -void XEmitter::RORX(int bits, X64Reg regOp, const OpArg& arg, u8 rotate) {WriteBMI2Op(bits, 0xF2, 0x3AF0, regOp, INVALID_REG, arg, 1); Write8(rotate);} -void XEmitter::PEXT(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteBMI2Op(bits, 0xF3, 0x38F5, regOp1, regOp2, arg);} -void XEmitter::PDEP(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteBMI2Op(bits, 0xF2, 0x38F5, regOp1, regOp2, arg);} -void XEmitter::MULX(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteBMI2Op(bits, 0xF2, 0x38F6, regOp2, regOp1, arg);} -void XEmitter::BZHI(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2) {WriteBMI2Op(bits, 0x00, 0x38F5, regOp1, regOp2, arg);} -void XEmitter::BLSR(int bits, X64Reg regOp, const OpArg& arg) {WriteBMI1Op(bits, 0x00, 0x38F3, (X64Reg)0x1, regOp, arg);} -void XEmitter::BLSMSK(int bits, X64Reg regOp, const OpArg& arg) {WriteBMI1Op(bits, 0x00, 0x38F3, (X64Reg)0x2, regOp, arg);} -void XEmitter::BLSI(int bits, X64Reg regOp, const OpArg& arg) {WriteBMI1Op(bits, 0x00, 0x38F3, (X64Reg)0x3, regOp, arg);} -void XEmitter::BEXTR(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2){WriteBMI1Op(bits, 0x00, 0x38F7, regOp1, regOp2, arg);} -void XEmitter::ANDN(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg) {WriteBMI1Op(bits, 0x00, 0x38F2, regOp1, regOp2, arg);} - -// Prefixes - -void XEmitter::LOCK() { Write8(0xF0); } -void XEmitter::REP() { Write8(0xF3); } -void XEmitter::REPNE() { Write8(0xF2); } -void XEmitter::FSOverride() { Write8(0x64); } -void XEmitter::GSOverride() { Write8(0x65); } - -void XEmitter::FWAIT() -{ - Write8(0x9B); -} - -// TODO: make this more generic -void XEmitter::WriteFloatLoadStore(int bits, FloatOp op, FloatOp op_80b, const OpArg& arg) -{ - int mf = 0; - ASSERT_MSG(!(bits == 80 && op_80b == floatINVALID), "WriteFloatLoadStore: 80 bits not supported for this instruction"); - switch (bits) - { - case 32: mf = 0; break; - case 64: mf = 4; break; - case 80: mf = 2; break; - default: ASSERT_MSG(0, "WriteFloatLoadStore: invalid bits (should be 32/64/80)"); - } - Write8(0xd9 | mf); - // x87 instructions use the reg field of the ModR/M byte as opcode: - if (bits == 80) - op = op_80b; - arg.WriteRest(this, 0, (X64Reg) op); -} - -void XEmitter::FLD(int bits, const OpArg& src) {WriteFloatLoadStore(bits, floatLD, floatLD80, src);} -void XEmitter::FST(int bits, const OpArg& dest) {WriteFloatLoadStore(bits, floatST, floatINVALID, dest);} -void XEmitter::FSTP(int bits, const OpArg& dest) {WriteFloatLoadStore(bits, floatSTP, floatSTP80, dest);} -void XEmitter::FNSTSW_AX() { Write8(0xDF); Write8(0xE0); } - -void XEmitter::RDTSC() { Write8(0x0F); Write8(0x31); } - -void XCodeBlock::PoisonMemory() { - // x86/64: 0xCC = breakpoint - memset(region, 0xCC, region_size); -} - -} diff --git a/src/common/x64/emitter.h b/src/common/x64/emitter.h deleted file mode 100644 index baa30695..00000000 --- a/src/common/x64/emitter.h +++ /dev/null @@ -1,1057 +0,0 @@ -// Copyright (C) 2003 Dolphin Project. - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, version 2.0 or later versions. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License 2.0 for more details. - -// A copy of the GPL 2.0 should have been included with the program. -// If not, see http://www.gnu.org/licenses/ - -// Official SVN repository and contact information can be found at -// http://code.google.com/p/dolphin-emu/ - -#pragma once - -#include - -#include "common/assert.h" -#include "common/bit_set.h" -#include "common/common_types.h" -#include "common/code_block.h" - -#if defined(ARCHITECTURE_x86_64) && !defined(_ARCH_64) -#define _ARCH_64 -#endif - -#ifdef _ARCH_64 -#define PTRBITS 64 -#else -#define PTRBITS 32 -#endif - -namespace Gen -{ - -enum X64Reg -{ - EAX = 0, EBX = 3, ECX = 1, EDX = 2, - ESI = 6, EDI = 7, EBP = 5, ESP = 4, - - RAX = 0, RBX = 3, RCX = 1, RDX = 2, - RSI = 6, RDI = 7, RBP = 5, RSP = 4, - R8 = 8, R9 = 9, R10 = 10,R11 = 11, - R12 = 12,R13 = 13,R14 = 14,R15 = 15, - - AL = 0, BL = 3, CL = 1, DL = 2, - SIL = 6, DIL = 7, BPL = 5, SPL = 4, - AH = 0x104, BH = 0x107, CH = 0x105, DH = 0x106, - - AX = 0, BX = 3, CX = 1, DX = 2, - SI = 6, DI = 7, BP = 5, SP = 4, - - XMM0=0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, - XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, - - YMM0=0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7, - YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15, - - INVALID_REG = 0xFFFFFFFF -}; - -enum CCFlags -{ - CC_O = 0, - CC_NO = 1, - CC_B = 2, CC_C = 2, CC_NAE = 2, - CC_NB = 3, CC_NC = 3, CC_AE = 3, - CC_Z = 4, CC_E = 4, - CC_NZ = 5, CC_NE = 5, - CC_BE = 6, CC_NA = 6, - CC_NBE = 7, CC_A = 7, - CC_S = 8, - CC_NS = 9, - CC_P = 0xA, CC_PE = 0xA, - CC_NP = 0xB, CC_PO = 0xB, - CC_L = 0xC, CC_NGE = 0xC, - CC_NL = 0xD, CC_GE = 0xD, - CC_LE = 0xE, CC_NG = 0xE, - CC_NLE = 0xF, CC_G = 0xF -}; - -enum -{ - NUMGPRs = 16, - NUMXMMs = 16, -}; - -enum -{ - SCALE_NONE = 0, - SCALE_1 = 1, - SCALE_2 = 2, - SCALE_4 = 4, - SCALE_8 = 8, - SCALE_ATREG = 16, - //SCALE_NOBASE_1 is not supported and can be replaced with SCALE_ATREG - SCALE_NOBASE_2 = 34, - SCALE_NOBASE_4 = 36, - SCALE_NOBASE_8 = 40, - SCALE_RIP = 0xFF, - SCALE_IMM8 = 0xF0, - SCALE_IMM16 = 0xF1, - SCALE_IMM32 = 0xF2, - SCALE_IMM64 = 0xF3, -}; - -enum NormalOp { - nrmADD, - nrmADC, - nrmSUB, - nrmSBB, - nrmAND, - nrmOR , - nrmXOR, - nrmMOV, - nrmTEST, - nrmCMP, - nrmXCHG, -}; - -enum { - CMP_EQ = 0, - CMP_LT = 1, - CMP_LE = 2, - CMP_UNORD = 3, - CMP_NEQ = 4, - CMP_NLT = 5, - CMP_NLE = 6, - CMP_ORD = 7, -}; - -enum FloatOp { - floatLD = 0, - floatST = 2, - floatSTP = 3, - floatLD80 = 5, - floatSTP80 = 7, - - floatINVALID = -1, -}; - -enum FloatRound { - FROUND_NEAREST = 0, - FROUND_FLOOR = 1, - FROUND_CEIL = 2, - FROUND_ZERO = 3, - FROUND_MXCSR = 4, - - FROUND_RAISE_PRECISION = 0, - FROUND_IGNORE_PRECISION = 8, -}; - -class XEmitter; - -// RIP addressing does not benefit from micro op fusion on Core arch -struct OpArg -{ - friend class XEmitter; - - constexpr OpArg() = default; // dummy op arg, used for storage - constexpr OpArg(u64 offset_, int scale_, X64Reg rmReg = RAX, X64Reg scaledReg = RAX) - : scale(static_cast(scale_)) - , offsetOrBaseReg(static_cast(rmReg)) - , indexReg(static_cast(scaledReg)) - , offset(offset_) - { - } - - constexpr bool operator==(const OpArg &b) const - { - return operandReg == b.operandReg && - scale == b.scale && - offsetOrBaseReg == b.offsetOrBaseReg && - indexReg == b.indexReg && - offset == b.offset; - } - - void WriteRex(XEmitter *emit, int opBits, int bits, int customOp = -1) const; - void WriteVex(XEmitter* emit, X64Reg regOp1, X64Reg regOp2, int L, int pp, int mmmmm, int W = 0) const; - void WriteRest(XEmitter *emit, int extraBytes=0, X64Reg operandReg=INVALID_REG, bool warn_64bit_offset = true) const; - void WriteSingleByteOp(XEmitter *emit, u8 op, X64Reg operandReg, int bits); - void WriteNormalOp(XEmitter *emit, bool toRM, NormalOp op, const OpArg &operand, int bits) const; - - constexpr bool IsImm() const { return scale == SCALE_IMM8 || scale == SCALE_IMM16 || scale == SCALE_IMM32 || scale == SCALE_IMM64; } - constexpr bool IsSimpleReg() const { return scale == SCALE_NONE; } - constexpr bool IsSimpleReg(X64Reg reg) const - { - return IsSimpleReg() && GetSimpleReg() == reg; - } - - int GetImmBits() const - { - switch (scale) - { - case SCALE_IMM8: return 8; - case SCALE_IMM16: return 16; - case SCALE_IMM32: return 32; - case SCALE_IMM64: return 64; - default: return -1; - } - } - - void SetImmBits(int bits) { - switch (bits) - { - case 8: scale = SCALE_IMM8; break; - case 16: scale = SCALE_IMM16; break; - case 32: scale = SCALE_IMM32; break; - case 64: scale = SCALE_IMM64; break; - } - } - - constexpr X64Reg GetSimpleReg() const - { - return scale == SCALE_NONE - ? static_cast(offsetOrBaseReg) - : INVALID_REG; - } - - constexpr u32 GetImmValue() const { - return static_cast(offset); - } - - // For loops. - void IncreaseOffset(int sz) { - offset += sz; - } - -private: - u8 scale = 0; - u16 offsetOrBaseReg = 0; - u16 indexReg = 0; - u64 offset = 0; // use RIP-relative as much as possible - 64-bit immediates are not available. - u16 operandReg = 0; -}; - -template -inline OpArg M(const T *ptr) { return OpArg(reinterpret_cast(ptr), static_cast(SCALE_RIP)); } -constexpr OpArg R(X64Reg value) { return OpArg(0, SCALE_NONE, value); } -constexpr OpArg MatR(X64Reg value) { return OpArg(0, SCALE_ATREG, value); } - -constexpr OpArg MDisp(X64Reg value, int offset) -{ - return OpArg(static_cast(offset), SCALE_ATREG, value); -} - -constexpr OpArg MComplex(X64Reg base, X64Reg scaled, int scale, int offset) -{ - return OpArg(offset, scale, base, scaled); -} - -constexpr OpArg MScaled(X64Reg scaled, int scale, int offset) -{ - return scale == SCALE_1 - ? OpArg(offset, SCALE_ATREG, scaled) - : OpArg(offset, scale | 0x20, RAX, scaled); -} - -constexpr OpArg MRegSum(X64Reg base, X64Reg offset) -{ - return MComplex(base, offset, 1, 0); -} - -constexpr OpArg Imm8 (u8 imm) { return OpArg(imm, SCALE_IMM8); } -constexpr OpArg Imm16(u16 imm) { return OpArg(imm, SCALE_IMM16); } //rarely used -constexpr OpArg Imm32(u32 imm) { return OpArg(imm, SCALE_IMM32); } -constexpr OpArg Imm64(u64 imm) { return OpArg(imm, SCALE_IMM64); } -constexpr OpArg UImmAuto(u32 imm) { - return OpArg(imm, imm >= 128 ? SCALE_IMM32 : SCALE_IMM8); -} -constexpr OpArg SImmAuto(s32 imm) { - return OpArg(imm, (imm >= 128 || imm < -128) ? SCALE_IMM32 : SCALE_IMM8); -} - -template -OpArg ImmPtr(const T* imm) -{ -#ifdef _ARCH_64 - return Imm64(reinterpret_cast(imm)); -#else - return Imm32(reinterpret_cast(imm)); -#endif -} - -inline u32 PtrOffset(const void* ptr, const void* base) -{ -#ifdef _ARCH_64 - s64 distance = (s64)ptr-(s64)base; - if (distance >= 0x80000000LL || - distance < -0x80000000LL) - { - ASSERT_MSG(0, "pointer offset out of range"); - return 0; - } - - return (u32)distance; -#else - return (u32)ptr-(u32)base; -#endif -} - -//usage: int a[]; ARRAY_OFFSET(a,10) -#define ARRAY_OFFSET(array,index) ((u32)((u64)&(array)[index]-(u64)&(array)[0])) -//usage: struct {int e;} s; STRUCT_OFFSET(s,e) -#define STRUCT_OFFSET(str,elem) ((u32)((u64)&(str).elem-(u64)&(str))) - -struct FixupBranch -{ - u8 *ptr; - int type; //0 = 8bit 1 = 32bit -}; - -enum SSECompare -{ - EQ = 0, - LT, - LE, - UNORD, - NEQ, - NLT, - NLE, - ORD, -}; - -class XEmitter -{ - friend struct OpArg; // for Write8 etc -private: - u8 *code; - bool flags_locked; - - void CheckFlags(); - - void Rex(int w, int r, int x, int b); - void WriteSimple1Byte(int bits, u8 byte, X64Reg reg); - void WriteSimple2Byte(int bits, u8 byte1, u8 byte2, X64Reg reg); - void WriteMulDivType(int bits, OpArg src, int ext); - void WriteBitSearchType(int bits, X64Reg dest, OpArg src, u8 byte2, bool rep = false); - void WriteShift(int bits, OpArg dest, const OpArg& shift, int ext); - void WriteBitTest(int bits, const OpArg& dest, const OpArg& index, int ext); - void WriteMXCSR(OpArg arg, int ext); - void WriteSSEOp(u8 opPrefix, u16 op, X64Reg regOp, OpArg arg, int extrabytes = 0); - void WriteSSEOp_opBits(int opBits, u8 opPrefix, u16 op, X64Reg regOp, OpArg arg, int extrabytes = 0); - void WriteSSSE3Op(u8 opPrefix, u16 op, X64Reg regOp, const OpArg& arg, int extrabytes = 0); - void WriteSSE41Op(u8 opPrefix, u16 op, X64Reg regOp, const OpArg& arg, int extrabytes = 0); - void WriteAVXOp(u8 opPrefix, u16 op, X64Reg regOp, const OpArg& arg, int extrabytes = 0); - void WriteAVXOp(u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int extrabytes = 0); - void WriteVEXOp(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int extrabytes = 0); - void WriteBMI1Op(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int extrabytes = 0); - void WriteBMI2Op(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int extrabytes = 0); - void WriteFloatLoadStore(int bits, FloatOp op, FloatOp op_80b, const OpArg& arg); - void WriteNormalOp(XEmitter *emit, int bits, NormalOp op, const OpArg& a1, const OpArg& a2); - - void ABI_CalculateFrameSize(BitSet32 mask, size_t rsp_alignment, size_t needed_frame_size, size_t* shadowp, size_t* subtractionp, size_t* xmm_offsetp); - -protected: - void Write8(u8 value); - void Write16(u16 value); - void Write32(u32 value); - void Write64(u64 value); - -public: - XEmitter() { code = nullptr; flags_locked = false; } - XEmitter(u8 *code_ptr) { code = code_ptr; flags_locked = false; } - virtual ~XEmitter() {} - - void WriteModRM(int mod, int reg, int rm); - void WriteSIB(int scale, int index, int base); - - void SetCodePtr(u8 *ptr); - void ReserveCodeSpace(int bytes); - const u8 *AlignCode4(); - const u8 *AlignCode16(); - const u8 *AlignCodePage(); - const u8 *GetCodePtr() const; - u8 *GetWritableCodePtr(); - - void LockFlags() { flags_locked = true; } - void UnlockFlags() { flags_locked = false; } - - // Looking for one of these? It's BANNED!! Some instructions are slow on modern CPU - // INC, DEC, LOOP, LOOPNE, LOOPE, ENTER, LEAVE, XCHG, XLAT, REP MOVSB/MOVSD, REP SCASD + other string instr., - // INC and DEC are slow on Intel Core, but not on AMD. They create a - // false flag dependency because they only update a subset of the flags. - // XCHG is SLOW and should be avoided. - - // Debug breakpoint - void INT3(); - - // Do nothing - void NOP(size_t count = 1); - - // Save energy in wait-loops on P4 only. Probably not too useful. - void PAUSE(); - - // Flag control - void STC(); - void CLC(); - void CMC(); - - // These two can not be executed in 64-bit mode on early Intel 64-bit CPU:s, only on Core2 and AMD! - void LAHF(); // 3 cycle vector path - void SAHF(); // direct path fast - - - // Stack control - void PUSH(X64Reg reg); - void POP(X64Reg reg); - void PUSH(int bits, const OpArg& reg); - void POP(int bits, const OpArg& reg); - void PUSHF(); - void POPF(); - - // Flow control - void RET(); - void RET_FAST(); - void UD2(); - FixupBranch J(bool force5bytes = false); - - void JMP(const u8* addr, bool force5Bytes = false); - void JMPptr(const OpArg& arg); - void JMPself(); //infinite loop! -#ifdef CALL -#undef CALL -#endif - void CALL(const void* fnptr); - FixupBranch CALL(); - void CALLptr(OpArg arg); - - FixupBranch J_CC(CCFlags conditionCode, bool force5bytes = false); - void J_CC(CCFlags conditionCode, const u8* addr, bool force5Bytes = false); - - void SetJumpTarget(const FixupBranch& branch); - void SetJumpTarget(const FixupBranch& branch, const u8* target); - - void SETcc(CCFlags flag, OpArg dest); - // Note: CMOV brings small if any benefit on current cpus. - void CMOVcc(int bits, X64Reg dest, OpArg src, CCFlags flag); - - // Fences - void LFENCE(); - void MFENCE(); - void SFENCE(); - - // Bit scan - void BSF(int bits, X64Reg dest, const OpArg& src); // Bottom bit to top bit - void BSR(int bits, X64Reg dest, const OpArg& src); // Top bit to bottom bit - - // Cache control - enum PrefetchLevel - { - PF_NTA, //Non-temporal (data used once and only once) - PF_T0, //All cache levels - PF_T1, //Levels 2+ (aliased to T0 on AMD) - PF_T2, //Levels 3+ (aliased to T0 on AMD) - }; - void PREFETCH(PrefetchLevel level, OpArg arg); - void MOVNTI(int bits, const OpArg& dest, X64Reg src); - void MOVNTDQ(const OpArg& arg, X64Reg regOp); - void MOVNTPS(const OpArg& arg, X64Reg regOp); - void MOVNTPD(const OpArg& arg, X64Reg regOp); - - // Multiplication / division - void MUL(int bits, const OpArg& src); //UNSIGNED - void IMUL(int bits, const OpArg& src); //SIGNED - void IMUL(int bits, X64Reg regOp, const OpArg& src); - void IMUL(int bits, X64Reg regOp, const OpArg& src, const OpArg& imm); - void DIV(int bits, const OpArg& src); - void IDIV(int bits, const OpArg& src); - - // Shift - void ROL(int bits, const OpArg& dest, const OpArg& shift); - void ROR(int bits, const OpArg& dest, const OpArg& shift); - void RCL(int bits, const OpArg& dest, const OpArg& shift); - void RCR(int bits, const OpArg& dest, const OpArg& shift); - void SHL(int bits, const OpArg& dest, const OpArg& shift); - void SHR(int bits, const OpArg& dest, const OpArg& shift); - void SAR(int bits, const OpArg& dest, const OpArg& shift); - - // Bit Test - void BT(int bits, const OpArg& dest, const OpArg& index); - void BTS(int bits, const OpArg& dest, const OpArg& index); - void BTR(int bits, const OpArg& dest, const OpArg& index); - void BTC(int bits, const OpArg& dest, const OpArg& index); - - // Double-Precision Shift - void SHRD(int bits, const OpArg& dest, const OpArg& src, const OpArg& shift); - void SHLD(int bits, const OpArg& dest, const OpArg& src, const OpArg& shift); - - // Extend EAX into EDX in various ways - void CWD(int bits = 16); - void CDQ() {CWD(32);} - void CQO() {CWD(64);} - void CBW(int bits = 8); - void CWDE() {CBW(16);} - void CDQE() {CBW(32);} - - // Load effective address - void LEA(int bits, X64Reg dest, OpArg src); - - // Integer arithmetic - void NEG(int bits, const OpArg& src); - void ADD(int bits, const OpArg& a1, const OpArg& a2); - void ADC(int bits, const OpArg& a1, const OpArg& a2); - void SUB(int bits, const OpArg& a1, const OpArg& a2); - void SBB(int bits, const OpArg& a1, const OpArg& a2); - void AND(int bits, const OpArg& a1, const OpArg& a2); - void CMP(int bits, const OpArg& a1, const OpArg& a2); - - // Bit operations - void NOT (int bits, const OpArg& src); - void OR(int bits, const OpArg& a1, const OpArg& a2); - void XOR(int bits, const OpArg& a1, const OpArg& a2); - void MOV(int bits, const OpArg& a1, const OpArg& a2); - void TEST(int bits, const OpArg& a1, const OpArg& a2); - - // Are these useful at all? Consider removing. - void XCHG(int bits, const OpArg& a1, const OpArg& a2); - void XCHG_AHAL(); - - // Byte swapping (32 and 64-bit only). - void BSWAP(int bits, X64Reg reg); - - // Sign/zero extension - void MOVSX(int dbits, int sbits, X64Reg dest, OpArg src); //automatically uses MOVSXD if necessary - void MOVZX(int dbits, int sbits, X64Reg dest, OpArg src); - - // Available only on Atom or >= Haswell so far. Test with GetCPUCaps().movbe. - void MOVBE(int dbits, const OpArg& dest, const OpArg& src); - - // Available only on AMD >= Phenom or Intel >= Haswell - void LZCNT(int bits, X64Reg dest, const OpArg& src); - // Note: this one is actually part of BMI1 - void TZCNT(int bits, X64Reg dest, const OpArg& src); - - // WARNING - These two take 11-13 cycles and are VectorPath! (AMD64) - void STMXCSR(const OpArg& memloc); - void LDMXCSR(const OpArg& memloc); - - // Prefixes - void LOCK(); - void REP(); - void REPNE(); - void FSOverride(); - void GSOverride(); - - // x87 - enum x87StatusWordBits { - x87_InvalidOperation = 0x1, - x87_DenormalizedOperand = 0x2, - x87_DivisionByZero = 0x4, - x87_Overflow = 0x8, - x87_Underflow = 0x10, - x87_Precision = 0x20, - x87_StackFault = 0x40, - x87_ErrorSummary = 0x80, - x87_C0 = 0x100, - x87_C1 = 0x200, - x87_C2 = 0x400, - x87_TopOfStack = 0x2000 | 0x1000 | 0x800, - x87_C3 = 0x4000, - x87_FPUBusy = 0x8000, - }; - - void FLD(int bits, const OpArg& src); - void FST(int bits, const OpArg& dest); - void FSTP(int bits, const OpArg& dest); - void FNSTSW_AX(); - void FWAIT(); - - // SSE/SSE2: Floating point arithmetic - void ADDSS(X64Reg regOp, const OpArg& arg); - void ADDSD(X64Reg regOp, const OpArg& arg); - void SUBSS(X64Reg regOp, const OpArg& arg); - void SUBSD(X64Reg regOp, const OpArg& arg); - void MULSS(X64Reg regOp, const OpArg& arg); - void MULSD(X64Reg regOp, const OpArg& arg); - void DIVSS(X64Reg regOp, const OpArg& arg); - void DIVSD(X64Reg regOp, const OpArg& arg); - void MINSS(X64Reg regOp, const OpArg& arg); - void MINSD(X64Reg regOp, const OpArg& arg); - void MAXSS(X64Reg regOp, const OpArg& arg); - void MAXSD(X64Reg regOp, const OpArg& arg); - void SQRTSS(X64Reg regOp, const OpArg& arg); - void SQRTSD(X64Reg regOp, const OpArg& arg); - void RCPSS(X64Reg regOp, const OpArg& arg); - void RSQRTSS(X64Reg regOp, const OpArg& arg); - - // SSE/SSE2: Floating point bitwise (yes) - void CMPSS(X64Reg regOp, const OpArg& arg, u8 compare); - void CMPSD(X64Reg regOp, const OpArg& arg, u8 compare); - - void CMPEQSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_EQ); } - void CMPLTSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_LT); } - void CMPLESS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_LE); } - void CMPUNORDSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_UNORD); } - void CMPNEQSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_NEQ); } - void CMPNLTSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_NLT); } - void CMPORDSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_ORD); } - - void CMPEQSD(X64Reg regOp, const OpArg& arg) { CMPSD(regOp, arg, CMP_EQ); } - void CMPLTSD(X64Reg regOp, const OpArg& arg) { CMPSD(regOp, arg, CMP_LT); } - void CMPLESD(X64Reg regOp, const OpArg& arg) { CMPSD(regOp, arg, CMP_LE); } - void CMPUNORDSD(X64Reg regOp, const OpArg& arg) { CMPSD(regOp, arg, CMP_UNORD); } - void CMPNEQSD(X64Reg regOp, const OpArg& arg) { CMPSD(regOp, arg, CMP_NEQ); } - void CMPNLTSD(X64Reg regOp, const OpArg& arg) { CMPSD(regOp, arg, CMP_NLT); } - void CMPORDSD(X64Reg regOp, const OpArg& arg) { CMPSD(regOp, arg, CMP_ORD); } - - // SSE/SSE2: Floating point packed arithmetic (x4 for float, x2 for double) - void ADDPS(X64Reg regOp, const OpArg& arg); - void ADDPD(X64Reg regOp, const OpArg& arg); - void SUBPS(X64Reg regOp, const OpArg& arg); - void SUBPD(X64Reg regOp, const OpArg& arg); - void CMPPS(X64Reg regOp, const OpArg& arg, u8 compare); - void CMPPD(X64Reg regOp, const OpArg& arg, u8 compare); - void MULPS(X64Reg regOp, const OpArg& arg); - void MULPD(X64Reg regOp, const OpArg& arg); - void DIVPS(X64Reg regOp, const OpArg& arg); - void DIVPD(X64Reg regOp, const OpArg& arg); - void MINPS(X64Reg regOp, const OpArg& arg); - void MINPD(X64Reg regOp, const OpArg& arg); - void MAXPS(X64Reg regOp, const OpArg& arg); - void MAXPD(X64Reg regOp, const OpArg& arg); - void SQRTPS(X64Reg regOp, const OpArg& arg); - void SQRTPD(X64Reg regOp, const OpArg& arg); - void RCPPS(X64Reg regOp, const OpArg& arg); - void RSQRTPS(X64Reg regOp, const OpArg& arg); - - // SSE/SSE2: Floating point packed bitwise (x4 for float, x2 for double) - void ANDPS(X64Reg regOp, const OpArg& arg); - void ANDPD(X64Reg regOp, const OpArg& arg); - void ANDNPS(X64Reg regOp, const OpArg& arg); - void ANDNPD(X64Reg regOp, const OpArg& arg); - void ORPS(X64Reg regOp, const OpArg& arg); - void ORPD(X64Reg regOp, const OpArg& arg); - void XORPS(X64Reg regOp, const OpArg& arg); - void XORPD(X64Reg regOp, const OpArg& arg); - - // SSE/SSE2: Shuffle components. These are tricky - see Intel documentation. - void SHUFPS(X64Reg regOp, const OpArg& arg, u8 shuffle); - void SHUFPD(X64Reg regOp, const OpArg& arg, u8 shuffle); - - // SSE/SSE2: Useful alternative to shuffle in some cases. - void MOVDDUP(X64Reg regOp, const OpArg& arg); - - // SSE3: Horizontal operations in SIMD registers. Very slow! shufps-based code beats it handily on Ivy. - void HADDPS(X64Reg dest, const OpArg& src); - - // SSE4: Further horizontal operations - dot products. These are weirdly flexible, the arg contains both a read mask and a write "mask". - void DPPS(X64Reg dest, const OpArg& src, u8 arg); - - void UNPCKLPS(X64Reg dest, const OpArg& src); - void UNPCKHPS(X64Reg dest, const OpArg& src); - void UNPCKLPD(X64Reg dest, const OpArg& src); - void UNPCKHPD(X64Reg dest, const OpArg& src); - - // SSE/SSE2: Compares. - void COMISS(X64Reg regOp, const OpArg& arg); - void COMISD(X64Reg regOp, const OpArg& arg); - void UCOMISS(X64Reg regOp, const OpArg& arg); - void UCOMISD(X64Reg regOp, const OpArg& arg); - - // SSE/SSE2: Moves. Use the right data type for your data, in most cases. - void MOVAPS(X64Reg regOp, const OpArg& arg); - void MOVAPD(X64Reg regOp, const OpArg& arg); - void MOVAPS(const OpArg& arg, X64Reg regOp); - void MOVAPD(const OpArg& arg, X64Reg regOp); - - void MOVUPS(X64Reg regOp, const OpArg& arg); - void MOVUPD(X64Reg regOp, const OpArg& arg); - void MOVUPS(const OpArg& arg, X64Reg regOp); - void MOVUPD(const OpArg& arg, X64Reg regOp); - - void MOVDQA(X64Reg regOp, const OpArg& arg); - void MOVDQA(const OpArg& arg, X64Reg regOp); - void MOVDQU(X64Reg regOp, const OpArg& arg); - void MOVDQU(const OpArg& arg, X64Reg regOp); - - void MOVSS(X64Reg regOp, const OpArg& arg); - void MOVSD(X64Reg regOp, const OpArg& arg); - void MOVSS(const OpArg& arg, X64Reg regOp); - void MOVSD(const OpArg& arg, X64Reg regOp); - - void MOVLPS(X64Reg regOp, const OpArg& arg); - void MOVLPD(X64Reg regOp, const OpArg& arg); - void MOVLPS(const OpArg& arg, X64Reg regOp); - void MOVLPD(const OpArg& arg, X64Reg regOp); - - void MOVHPS(X64Reg regOp, const OpArg& arg); - void MOVHPD(X64Reg regOp, const OpArg& arg); - void MOVHPS(const OpArg& arg, X64Reg regOp); - void MOVHPD(const OpArg& arg, X64Reg regOp); - - void MOVHLPS(X64Reg regOp1, X64Reg regOp2); - void MOVLHPS(X64Reg regOp1, X64Reg regOp2); - - void MOVD_xmm(X64Reg dest, const OpArg& arg); - void MOVQ_xmm(X64Reg dest, OpArg arg); - void MOVD_xmm(const OpArg& arg, X64Reg src); - void MOVQ_xmm(OpArg arg, X64Reg src); - - // SSE/SSE2: Generates a mask from the high bits of the components of the packed register in question. - void MOVMSKPS(X64Reg dest, const OpArg& arg); - void MOVMSKPD(X64Reg dest, const OpArg& arg); - - // SSE2: Selective byte store, mask in src register. EDI/RDI specifies store address. This is a weird one. - void MASKMOVDQU(X64Reg dest, X64Reg src); - void LDDQU(X64Reg dest, const OpArg& src); - - // SSE/SSE2: Data type conversions. - void CVTPS2PD(X64Reg dest, const OpArg& src); - void CVTPD2PS(X64Reg dest, const OpArg& src); - void CVTSS2SD(X64Reg dest, const OpArg& src); - void CVTSI2SS(int opBits, X64Reg dest, const OpArg& src); - void CVTSD2SS(X64Reg dest, const OpArg& src); - void CVTSI2SD(int opBits, X64Reg dest, const OpArg& src); - void CVTDQ2PD(X64Reg regOp, const OpArg& arg); - void CVTPD2DQ(X64Reg regOp, const OpArg& arg); - void CVTDQ2PS(X64Reg regOp, const OpArg& arg); - void CVTPS2DQ(X64Reg regOp, const OpArg& arg); - - void CVTTPS2DQ(X64Reg regOp, const OpArg& arg); - void CVTTPD2DQ(X64Reg regOp, const OpArg& arg); - - // Destinations are X64 regs (rax, rbx, ...) for these instructions. - void CVTSS2SI(X64Reg xregdest, const OpArg& src); - void CVTSD2SI(X64Reg xregdest, const OpArg& src); - void CVTTSS2SI(X64Reg xregdest, const OpArg& arg); - void CVTTSD2SI(X64Reg xregdest, const OpArg& arg); - - // SSE2: Packed integer instructions - void PACKSSDW(X64Reg dest, const OpArg& arg); - void PACKSSWB(X64Reg dest, const OpArg& arg); - void PACKUSDW(X64Reg dest, const OpArg& arg); - void PACKUSWB(X64Reg dest, const OpArg& arg); - - void PUNPCKLBW(X64Reg dest, const OpArg &arg); - void PUNPCKLWD(X64Reg dest, const OpArg &arg); - void PUNPCKLDQ(X64Reg dest, const OpArg &arg); - void PUNPCKLQDQ(X64Reg dest, const OpArg &arg); - - void PTEST(X64Reg dest, const OpArg& arg); - void PAND(X64Reg dest, const OpArg& arg); - void PANDN(X64Reg dest, const OpArg& arg); - void PXOR(X64Reg dest, const OpArg& arg); - void POR(X64Reg dest, const OpArg& arg); - - void PADDB(X64Reg dest, const OpArg& arg); - void PADDW(X64Reg dest, const OpArg& arg); - void PADDD(X64Reg dest, const OpArg& arg); - void PADDQ(X64Reg dest, const OpArg& arg); - - void PADDSB(X64Reg dest, const OpArg& arg); - void PADDSW(X64Reg dest, const OpArg& arg); - void PADDUSB(X64Reg dest, const OpArg& arg); - void PADDUSW(X64Reg dest, const OpArg& arg); - - void PSUBB(X64Reg dest, const OpArg& arg); - void PSUBW(X64Reg dest, const OpArg& arg); - void PSUBD(X64Reg dest, const OpArg& arg); - void PSUBQ(X64Reg dest, const OpArg& arg); - - void PSUBSB(X64Reg dest, const OpArg& arg); - void PSUBSW(X64Reg dest, const OpArg& arg); - void PSUBUSB(X64Reg dest, const OpArg& arg); - void PSUBUSW(X64Reg dest, const OpArg& arg); - - void PAVGB(X64Reg dest, const OpArg& arg); - void PAVGW(X64Reg dest, const OpArg& arg); - - void PCMPEQB(X64Reg dest, const OpArg& arg); - void PCMPEQW(X64Reg dest, const OpArg& arg); - void PCMPEQD(X64Reg dest, const OpArg& arg); - - void PCMPGTB(X64Reg dest, const OpArg& arg); - void PCMPGTW(X64Reg dest, const OpArg& arg); - void PCMPGTD(X64Reg dest, const OpArg& arg); - - void PEXTRW(X64Reg dest, const OpArg& arg, u8 subreg); - void PINSRW(X64Reg dest, const OpArg& arg, u8 subreg); - - void PMADDWD(X64Reg dest, const OpArg& arg); - void PSADBW(X64Reg dest, const OpArg& arg); - - void PMAXSW(X64Reg dest, const OpArg& arg); - void PMAXUB(X64Reg dest, const OpArg& arg); - void PMINSW(X64Reg dest, const OpArg& arg); - void PMINUB(X64Reg dest, const OpArg& arg); - // SSE4: More MAX/MIN instructions. - void PMINSB(X64Reg dest, const OpArg& arg); - void PMINSD(X64Reg dest, const OpArg& arg); - void PMINUW(X64Reg dest, const OpArg& arg); - void PMINUD(X64Reg dest, const OpArg& arg); - void PMAXSB(X64Reg dest, const OpArg& arg); - void PMAXSD(X64Reg dest, const OpArg& arg); - void PMAXUW(X64Reg dest, const OpArg& arg); - void PMAXUD(X64Reg dest, const OpArg& arg); - - void PMOVMSKB(X64Reg dest, const OpArg& arg); - void PSHUFD(X64Reg dest, const OpArg& arg, u8 shuffle); - void PSHUFB(X64Reg dest, const OpArg& arg); - - void PSHUFLW(X64Reg dest, const OpArg& arg, u8 shuffle); - void PSHUFHW(X64Reg dest, const OpArg& arg, u8 shuffle); - - void PSRLW(X64Reg reg, int shift); - void PSRLD(X64Reg reg, int shift); - void PSRLQ(X64Reg reg, int shift); - void PSRLQ(X64Reg reg, const OpArg& arg); - void PSRLDQ(X64Reg reg, int shift); - - void PSLLW(X64Reg reg, int shift); - void PSLLD(X64Reg reg, int shift); - void PSLLQ(X64Reg reg, int shift); - void PSLLDQ(X64Reg reg, int shift); - - void PSRAW(X64Reg reg, int shift); - void PSRAD(X64Reg reg, int shift); - - // SSE4: data type conversions - void PMOVSXBW(X64Reg dest, const OpArg& arg); - void PMOVSXBD(X64Reg dest, const OpArg& arg); - void PMOVSXBQ(X64Reg dest, const OpArg& arg); - void PMOVSXWD(X64Reg dest, const OpArg& arg); - void PMOVSXWQ(X64Reg dest, const OpArg& arg); - void PMOVSXDQ(X64Reg dest, const OpArg& arg); - void PMOVZXBW(X64Reg dest, const OpArg& arg); - void PMOVZXBD(X64Reg dest, const OpArg& arg); - void PMOVZXBQ(X64Reg dest, const OpArg& arg); - void PMOVZXWD(X64Reg dest, const OpArg& arg); - void PMOVZXWQ(X64Reg dest, const OpArg& arg); - void PMOVZXDQ(X64Reg dest, const OpArg& arg); - - // SSE4: variable blend instructions (xmm0 implicit argument) - void PBLENDVB(X64Reg dest, const OpArg& arg); - void BLENDVPS(X64Reg dest, const OpArg& arg); - void BLENDVPD(X64Reg dest, const OpArg& arg); - void BLENDPS(X64Reg dest, const OpArg& arg, u8 blend); - void BLENDPD(X64Reg dest, const OpArg& arg, u8 blend); - - // SSE4: rounding (see FloatRound for mode or use ROUNDNEARSS, etc. helpers.) - void ROUNDSS(X64Reg dest, const OpArg& arg, u8 mode); - void ROUNDSD(X64Reg dest, const OpArg& arg, u8 mode); - void ROUNDPS(X64Reg dest, const OpArg& arg, u8 mode); - void ROUNDPD(X64Reg dest, const OpArg& arg, u8 mode); - - void ROUNDNEARSS(X64Reg dest, const OpArg& arg) { ROUNDSS(dest, arg, FROUND_NEAREST); } - void ROUNDFLOORSS(X64Reg dest, const OpArg& arg) { ROUNDSS(dest, arg, FROUND_FLOOR); } - void ROUNDCEILSS(X64Reg dest, const OpArg& arg) { ROUNDSS(dest, arg, FROUND_CEIL); } - void ROUNDZEROSS(X64Reg dest, const OpArg& arg) { ROUNDSS(dest, arg, FROUND_ZERO); } - - void ROUNDNEARSD(X64Reg dest, const OpArg& arg) { ROUNDSD(dest, arg, FROUND_NEAREST); } - void ROUNDFLOORSD(X64Reg dest, const OpArg& arg) { ROUNDSD(dest, arg, FROUND_FLOOR); } - void ROUNDCEILSD(X64Reg dest, const OpArg& arg) { ROUNDSD(dest, arg, FROUND_CEIL); } - void ROUNDZEROSD(X64Reg dest, const OpArg& arg) { ROUNDSD(dest, arg, FROUND_ZERO); } - - void ROUNDNEARPS(X64Reg dest, const OpArg& arg) { ROUNDPS(dest, arg, FROUND_NEAREST); } - void ROUNDFLOORPS(X64Reg dest, const OpArg& arg) { ROUNDPS(dest, arg, FROUND_FLOOR); } - void ROUNDCEILPS(X64Reg dest, const OpArg& arg) { ROUNDPS(dest, arg, FROUND_CEIL); } - void ROUNDZEROPS(X64Reg dest, const OpArg& arg) { ROUNDPS(dest, arg, FROUND_ZERO); } - - void ROUNDNEARPD(X64Reg dest, const OpArg& arg) { ROUNDPD(dest, arg, FROUND_NEAREST); } - void ROUNDFLOORPD(X64Reg dest, const OpArg& arg) { ROUNDPD(dest, arg, FROUND_FLOOR); } - void ROUNDCEILPD(X64Reg dest, const OpArg& arg) { ROUNDPD(dest, arg, FROUND_CEIL); } - void ROUNDZEROPD(X64Reg dest, const OpArg& arg) { ROUNDPD(dest, arg, FROUND_ZERO); } - - // AVX - void VADDSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VSUBSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VMULSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VDIVSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VADDPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VSUBPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VMULPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VDIVPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VSQRTSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VSHUFPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg, u8 shuffle); - void VUNPCKLPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VUNPCKHPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - - void VANDPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VANDPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VANDNPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VANDNPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VORPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VORPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VXORPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VXORPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - - void VPAND(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VPANDN(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VPOR(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VPXOR(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - - // FMA3 - void VFMADD132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADD213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADD231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADD132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADD213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADD231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADD132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADD213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADD231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADD132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADD213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADD231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUB231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMADD231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFNMSUB231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADDSUB132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADDSUB213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADDSUB231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADDSUB132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADDSUB213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMADDSUB231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUBADD132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUBADD213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUBADD231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUBADD132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUBADD213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void VFMSUBADD231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - - // VEX GPR instructions - void SARX(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2); - void SHLX(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2); - void SHRX(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2); - void RORX(int bits, X64Reg regOp, const OpArg& arg, u8 rotate); - void PEXT(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void PDEP(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void MULX(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - void BZHI(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2); - void BLSR(int bits, X64Reg regOp, const OpArg& arg); - void BLSMSK(int bits, X64Reg regOp, const OpArg& arg); - void BLSI(int bits, X64Reg regOp, const OpArg& arg); - void BEXTR(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2); - void ANDN(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg); - - void RDTSC(); - - // Utility functions - // The difference between this and CALL is that this aligns the stack - // where appropriate. - void ABI_CallFunction(const void* func); - template - void ABI_CallFunction(T (*func)()) { - ABI_CallFunction((const void*)func); - } - - void ABI_CallFunction(const u8* func) { - ABI_CallFunction((const void*)func); - } - void ABI_CallFunctionC16(const void* func, u16 param1); - void ABI_CallFunctionCC16(const void* func, u32 param1, u16 param2); - - - // These only support u32 parameters, but that's enough for a lot of uses. - // These will destroy the 1 or 2 first "parameter regs". - void ABI_CallFunctionC(const void* func, u32 param1); - void ABI_CallFunctionCC(const void* func, u32 param1, u32 param2); - void ABI_CallFunctionCCC(const void* func, u32 param1, u32 param2, u32 param3); - void ABI_CallFunctionCCP(const void* func, u32 param1, u32 param2, void* param3); - void ABI_CallFunctionCCCP(const void* func, u32 param1, u32 param2, u32 param3, void* param4); - void ABI_CallFunctionP(const void* func, void* param1); - void ABI_CallFunctionPA(const void* func, void* param1, const OpArg& arg2); - void ABI_CallFunctionPAA(const void* func, void* param1, const OpArg& arg2, const OpArg& arg3); - void ABI_CallFunctionPPC(const void* func, void* param1, void* param2, u32 param3); - void ABI_CallFunctionAC(const void* func, const OpArg& arg1, u32 param2); - void ABI_CallFunctionACC(const void* func, const OpArg& arg1, u32 param2, u32 param3); - void ABI_CallFunctionA(const void* func, const OpArg& arg1); - void ABI_CallFunctionAA(const void* func, const OpArg& arg1, const OpArg& arg2); - - // Pass a register as a parameter. - void ABI_CallFunctionR(const void* func, X64Reg reg1); - void ABI_CallFunctionRR(const void* func, X64Reg reg1, X64Reg reg2); - - template - void ABI_CallFunctionC(Tr (*func)(T1), u32 param1) { - ABI_CallFunctionC((const void*)func, param1); - } - - /** - * Saves specified registers and adjusts the stack to be 16-byte aligned as required by the ABI - * - * @param mask Registers to push on the stack (high 16 bits are XMMs, low 16 bits are GPRs) - * @param rsp_alignment Current alignment of the stack pointer, must be 0 or 8 - * @param needed_frame_size Additional space needed, e.g., for function arguments passed on the stack - * @return Size of the shadow space, i.e., offset of the frame - */ - size_t ABI_PushRegistersAndAdjustStack(BitSet32 mask, size_t rsp_alignment, size_t needed_frame_size = 0); - - /** - * Restores specified registers and adjusts the stack to its original alignment, i.e., the alignment before - * the matching PushRegistersAndAdjustStack. - * - * @param mask Registers to restores from the stack (high 16 bits are XMMs, low 16 bits are GPRs) - * @param rsp_alignment Original alignment before the matching PushRegistersAndAdjustStack, must be 0 or 8 - * @param needed_frame_size Additional space that was needed - * @warning Stack must be currently 16-byte aligned - */ - void ABI_PopRegistersAndAdjustStack(BitSet32 mask, size_t rsp_alignment, size_t needed_frame_size = 0); - - #ifdef _M_IX86 - static int ABI_GetNumXMMRegs() { return 8; } - #else - static int ABI_GetNumXMMRegs() { return 16; } - #endif -}; // class XEmitter - - -// Everything that needs to generate X86 code should inherit from this. -// You get memory management for free, plus, you can use all the MOV etc functions without -// having to prefix them with gen-> or something similar. - -class XCodeBlock : public CodeBlock { -public: - void PoisonMemory() override; -}; - -} // namespace diff --git a/src/frontend/disassembler/disassembler_thumb.cpp b/src/frontend/disassembler/disassembler_thumb.cpp index f7b1c05b..d9938b06 100644 --- a/src/frontend/disassembler/disassembler_thumb.cpp +++ b/src/frontend/disassembler/disassembler_thumb.cpp @@ -22,11 +22,11 @@ public: } std::string thumb16_LSR_imm(Imm5 imm5, Reg m, Reg d) { - return Common::StringFromFormat("lsrs %s, %s, #%u", RegToString(d), RegToString(m), imm5); + return Common::StringFromFormat("lsrs %s, %s, #%u", RegToString(d), RegToString(m), imm5 != 0 ? imm5 : 32); } std::string thumb16_ASR_imm(Imm5 imm5, Reg m, Reg d) { - return Common::StringFromFormat("asrs %s, %s, #%u", RegToString(d), RegToString(m), imm5); + return Common::StringFromFormat("asrs %s, %s, #%u", RegToString(d), RegToString(m), imm5 != 0 ? imm5 : 32); } std::string thumb16_ADD_reg_t1(Reg m, Reg n, Reg d) { From d04b9eaa81ccb11d946594b4d148bccb5c91633d Mon Sep 17 00:00:00 2001 From: MerryMage Date: Thu, 25 Aug 2016 02:59:42 +0100 Subject: [PATCH 3/3] backend_x64/block_of_code: Reset labels when ClearCache() is called --- src/backend_x64/block_of_code.cpp | 28 ++++++++-------- src/backend_x64/block_of_code.h | 54 ++++++++++++++++--------------- 2 files changed, 43 insertions(+), 39 deletions(-) diff --git a/src/backend_x64/block_of_code.cpp b/src/backend_x64/block_of_code.cpp index 1d276989..1e0eaa16 100644 --- a/src/backend_x64/block_of_code.cpp +++ b/src/backend_x64/block_of_code.cpp @@ -21,6 +21,8 @@ BlockOfCode::BlockOfCode() : Xbyak::CodeGenerator(128 * 1024 * 1024) { } void BlockOfCode::ClearCache(bool poison_memory) { + consts.~Consts(); + new (&consts) Consts(); reset(); GenConstants(); GenRunCode(); @@ -42,49 +44,49 @@ void BlockOfCode::ReturnFromRunCode(bool MXCSR_switch) { void BlockOfCode::GenConstants() { align(); - L(const_FloatNegativeZero32); + L(consts.FloatNegativeZero32); dd(0x80000000u); align(); - L(const_FloatNaN32); + L(consts.FloatNaN32); dd(0x7fc00000u); align(); - L(const_FloatNonSignMask32); + L(consts.FloatNonSignMask32); dq(0x7fffffffu); align(); - L(const_FloatNegativeZero64); + L(consts.FloatNegativeZero64); dq(0x8000000000000000u); align(); - L(const_FloatNaN64); + L(consts.FloatNaN64); dq(0x7ff8000000000000u); align(); - L(const_FloatNonSignMask64); + L(consts.FloatNonSignMask64); dq(0x7fffffffffffffffu); align(); - L(const_FloatPenultimatePositiveDenormal64); + L(consts.FloatPenultimatePositiveDenormal64); dq(0x000ffffffffffffeu); align(); - L(const_FloatMinS32); + L(consts.FloatMinS32); dq(0xc1e0000000000000u); // -2147483648 as a double align(); - L(const_FloatMaxS32); + L(consts.FloatMaxS32); dq(0x41dfffffffc00000u); // 2147483647 as a double align(); - L(const_FloatPositiveZero32); - L(const_FloatPositiveZero64); - L(const_FloatMinU32); + L(consts.FloatPositiveZero32); + L(consts.FloatPositiveZero64); + L(consts.FloatMinU32); dq(0x0000000000000000u); // 0 as a double align(); - L(const_FloatMaxU32); + L(consts.FloatMaxU32); dq(0x41efffffffe00000u); // 4294967295 as a double align(); diff --git a/src/backend_x64/block_of_code.h b/src/backend_x64/block_of_code.h index 49314a5c..f15df9bb 100644 --- a/src/backend_x64/block_of_code.h +++ b/src/backend_x64/block_of_code.h @@ -36,43 +36,43 @@ public: void CallFunction(const void* fn); Xbyak::Address MFloatPositiveZero32() { - return xword[rip + const_FloatPositiveZero32]; + return xword[rip + consts.FloatPositiveZero32]; } Xbyak::Address MFloatNegativeZero32() { - return xword[rip + const_FloatNegativeZero32]; + return xword[rip + consts.FloatNegativeZero32]; } Xbyak::Address MFloatNaN32() { - return xword[rip + const_FloatNaN32]; + return xword[rip + consts.FloatNaN32]; } Xbyak::Address MFloatNonSignMask32() { - return xword[rip + const_FloatNonSignMask32]; + return xword[rip + consts.FloatNonSignMask32]; } Xbyak::Address MFloatPositiveZero64() { - return xword[rip + const_FloatPositiveZero64]; + return xword[rip + consts.FloatPositiveZero64]; } Xbyak::Address MFloatNegativeZero64() { - return xword[rip + const_FloatNegativeZero64]; + return xword[rip + consts.FloatNegativeZero64]; } Xbyak::Address MFloatNaN64() { - return xword[rip + const_FloatNaN64]; + return xword[rip + consts.FloatNaN64]; } Xbyak::Address MFloatNonSignMask64() { - return xword[rip + const_FloatNonSignMask64]; + return xword[rip + consts.FloatNonSignMask64]; } Xbyak::Address MFloatPenultimatePositiveDenormal64() { - return xword[rip + const_FloatPenultimatePositiveDenormal64]; + return xword[rip + consts.FloatPenultimatePositiveDenormal64]; } Xbyak::Address MFloatMinS32() { - return xword[rip + const_FloatMinS32]; + return xword[rip + consts.FloatMinS32]; } Xbyak::Address MFloatMaxS32() { - return xword[rip + const_FloatMaxS32]; + return xword[rip + consts.FloatMaxS32]; } Xbyak::Address MFloatMinU32() { - return xword[rip + const_FloatMinU32]; + return xword[rip + consts.FloatMinU32]; } Xbyak::Address MFloatMaxU32() { - return xword[rip + const_FloatMaxU32]; + return xword[rip + consts.FloatMaxU32]; } const void* GetReturnFromRunCodeAddress() const { @@ -104,19 +104,21 @@ public: #endif private: - Xbyak::Label const_FloatPositiveZero32; - Xbyak::Label const_FloatNegativeZero32; - Xbyak::Label const_FloatNaN32; - Xbyak::Label const_FloatNonSignMask32; - Xbyak::Label const_FloatPositiveZero64; - Xbyak::Label const_FloatNegativeZero64; - Xbyak::Label const_FloatNaN64; - Xbyak::Label const_FloatNonSignMask64; - Xbyak::Label const_FloatPenultimatePositiveDenormal64; - Xbyak::Label const_FloatMinS32; - Xbyak::Label const_FloatMaxS32; - Xbyak::Label const_FloatMinU32; - Xbyak::Label const_FloatMaxU32; + struct Consts { + Xbyak::Label FloatPositiveZero32; + Xbyak::Label FloatNegativeZero32; + Xbyak::Label FloatNaN32; + Xbyak::Label FloatNonSignMask32; + Xbyak::Label FloatPositiveZero64; + Xbyak::Label FloatNegativeZero64; + Xbyak::Label FloatNaN64; + Xbyak::Label FloatNonSignMask64; + Xbyak::Label FloatPenultimatePositiveDenormal64; + Xbyak::Label FloatMinS32; + Xbyak::Label FloatMaxS32; + Xbyak::Label FloatMinU32; + Xbyak::Label FloatMaxU32; + } consts; void GenConstants(); using RunCodeFuncType = void(*)(JitState*, CodePtr);