BackendX64: Merge Routines into BlockOfCode
This commit is contained in:
parent
0f412247ed
commit
aba705f6b9
9 changed files with 89 additions and 90 deletions
|
@ -30,7 +30,7 @@ memory location and memory reader callback and returns a basic block of IR.
|
|||
* The IR can be found under `src/frontend/ir/`.
|
||||
* Optimization is not implemented yet.
|
||||
* Emission is done by `EmitX64` which can be found in `src/backend_x64/emit_x64.{h,cpp}`.
|
||||
* Execution is performed by calling `Routines::RunCode` in `src/backend_x64/routines.{h,cpp}`.
|
||||
* Execution is performed by calling `BlockOfCode::RunCode` in `src/backend_x64/routines.{h,cpp}`.
|
||||
|
||||
## Decoder
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
include_directories(.)
|
||||
|
||||
set(SRCS
|
||||
backend_x64/block_of_code.cpp
|
||||
backend_x64/emit_x64.cpp
|
||||
backend_x64/interface_x64.cpp
|
||||
backend_x64/jitstate.cpp
|
||||
backend_x64/reg_alloc.cpp
|
||||
backend_x64/routines.cpp
|
||||
common/memory_pool.cpp
|
||||
common/memory_util.cpp
|
||||
common/string_util.cpp
|
||||
|
@ -34,10 +34,10 @@ set(SRCS
|
|||
)
|
||||
|
||||
set(HEADERS
|
||||
backend_x64/block_of_code.h
|
||||
backend_x64/emit_x64.h
|
||||
backend_x64/jitstate.h
|
||||
backend_x64/reg_alloc.h
|
||||
backend_x64/routines.h
|
||||
common/assert.h
|
||||
common/bit_set.h
|
||||
common/bit_util.h
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
|
||||
#include <limits>
|
||||
|
||||
#include "backend_x64/block_of_code.h"
|
||||
#include "backend_x64/jitstate.h"
|
||||
#include "backend_x64/routines.h"
|
||||
#include "common/x64/abi.h"
|
||||
|
||||
using namespace Gen;
|
||||
|
@ -15,14 +15,23 @@ using namespace Gen;
|
|||
namespace Dynarmic {
|
||||
namespace BackendX64 {
|
||||
|
||||
Routines::Routines() {
|
||||
AllocCodeSpace(1024);
|
||||
BlockOfCode::BlockOfCode() {
|
||||
AllocCodeSpace(128 * 1024 * 1024);
|
||||
ClearCache(false);
|
||||
}
|
||||
|
||||
void BlockOfCode::ClearCache(bool poison_memory) {
|
||||
if (poison_memory) {
|
||||
ClearCodeSpace();
|
||||
} else {
|
||||
ResetCodePtr();
|
||||
}
|
||||
|
||||
GenConstants();
|
||||
GenRunCode();
|
||||
}
|
||||
|
||||
size_t Routines::RunCode(JitState* jit_state, CodePtr basic_block, size_t cycles_to_run) const {
|
||||
size_t BlockOfCode::RunCode(JitState* jit_state, CodePtr basic_block, size_t cycles_to_run) const {
|
||||
constexpr size_t max_cycles_to_run = static_cast<size_t>(std::numeric_limits<decltype(jit_state->cycles_remaining)>::max());
|
||||
ASSERT(cycles_to_run <= max_cycles_to_run);
|
||||
|
||||
|
@ -31,7 +40,16 @@ size_t Routines::RunCode(JitState* jit_state, CodePtr basic_block, size_t cycles
|
|||
return cycles_to_run - jit_state->cycles_remaining; // Return number of cycles actually run.
|
||||
}
|
||||
|
||||
void Routines::GenConstants() {
|
||||
void BlockOfCode::ReturnFromRunCode() {
|
||||
STMXCSR(MDisp(R15, offsetof(JitState, guest_MXCSR)));
|
||||
LDMXCSR(MDisp(R15, offsetof(JitState, save_host_MXCSR)));
|
||||
MOV(64, R(RSP), MDisp(R15, offsetof(JitState, save_host_RSP)));
|
||||
|
||||
ABI_PopRegistersAndAdjustStack(ABI_ALL_CALLEE_SAVED, 8);
|
||||
RET();
|
||||
}
|
||||
|
||||
void BlockOfCode::GenConstants() {
|
||||
const_FloatNegativeZero32 = AlignCode16();
|
||||
Write32(0x80000000u);
|
||||
const_FloatNaN32 = AlignCode16();
|
||||
|
@ -49,8 +67,8 @@ void Routines::GenConstants() {
|
|||
AlignCode16();
|
||||
}
|
||||
|
||||
void Routines::GenRunCode() {
|
||||
run_code = reinterpret_cast<RunCodeFuncType>(const_cast<u8*>(this->GetCodePtr()));
|
||||
void BlockOfCode::GenRunCode() {
|
||||
run_code = reinterpret_cast<RunCodeFuncType>(const_cast<u8*>(GetCodePtr()));
|
||||
|
||||
// This serves two purposes:
|
||||
// 1. It saves all the registers we as a callee need to save.
|
||||
|
@ -66,14 +84,5 @@ void Routines::GenRunCode() {
|
|||
JMPptr(R(ABI_PARAM2));
|
||||
}
|
||||
|
||||
void Routines::GenReturnFromRunCode(XEmitter* code) const {
|
||||
code->STMXCSR(MDisp(R15, offsetof(JitState, guest_MXCSR)));
|
||||
code->LDMXCSR(MDisp(R15, offsetof(JitState, save_host_MXCSR)));
|
||||
code->MOV(64, R(RSP), MDisp(R15, offsetof(JitState, save_host_RSP)));
|
||||
|
||||
code->ABI_PopRegistersAndAdjustStack(ABI_ALL_CALLEE_SAVED, 8);
|
||||
code->RET();
|
||||
}
|
||||
|
||||
} // namespace BackendX64
|
||||
} // namespace Dynarmic
|
|
@ -13,12 +13,15 @@
|
|||
namespace Dynarmic {
|
||||
namespace BackendX64 {
|
||||
|
||||
class Routines final : private Gen::XCodeBlock {
|
||||
class BlockOfCode final : public Gen::XCodeBlock {
|
||||
public:
|
||||
Routines();
|
||||
BlockOfCode();
|
||||
|
||||
void ClearCache(bool poison_memory);
|
||||
|
||||
size_t RunCode(JitState* jit_state, CodePtr basic_block, size_t cycles_to_run) const;
|
||||
void GenReturnFromRunCode(Gen::XEmitter* code) const;
|
||||
void ReturnFromRunCode();
|
||||
|
||||
Gen::OpArg MFloatNegativeZero32() const {
|
||||
return Gen::M(const_FloatNegativeZero32);
|
||||
}
|
|
@ -1030,7 +1030,7 @@ void EmitX64::EmitByteReverseDual(IR::Block&, IR::Inst* inst) {
|
|||
code->BSWAP(64, result);
|
||||
}
|
||||
|
||||
static void DenormalsAreZero32(XEmitter* code, X64Reg xmm_value, X64Reg gpr_scratch) {
|
||||
static void DenormalsAreZero32(BlockOfCode* code, X64Reg xmm_value, X64Reg gpr_scratch) {
|
||||
// We need to report back whether we've found a denormal on input.
|
||||
// SSE doesn't do this for us when SSE's DAZ is enabled.
|
||||
code->MOVD_xmm(R(gpr_scratch), xmm_value);
|
||||
|
@ -1043,18 +1043,18 @@ static void DenormalsAreZero32(XEmitter* code, X64Reg xmm_value, X64Reg gpr_scra
|
|||
code->SetJumpTarget(fixup);
|
||||
}
|
||||
|
||||
static void DenormalsAreZero64(XEmitter* code, Routines* routines, X64Reg xmm_value, X64Reg gpr_scratch) {
|
||||
static void DenormalsAreZero64(BlockOfCode* code, X64Reg xmm_value, X64Reg gpr_scratch) {
|
||||
code->MOVQ_xmm(R(gpr_scratch), xmm_value);
|
||||
code->AND(64, R(gpr_scratch), routines->MFloatNonSignMask64());
|
||||
code->AND(64, R(gpr_scratch), code->MFloatNonSignMask64());
|
||||
code->SUB(64, R(gpr_scratch), Imm32(1));
|
||||
code->CMP(64, R(gpr_scratch), routines->MFloatPenultimatePositiveDenormal64());
|
||||
code->CMP(64, R(gpr_scratch), code->MFloatPenultimatePositiveDenormal64());
|
||||
auto fixup = code->J_CC(CC_A);
|
||||
code->PXOR(xmm_value, R(xmm_value));
|
||||
code->MOV(32, MDisp(R15, offsetof(JitState, FPSCR_IDC)), Imm32(1 << 7));
|
||||
code->SetJumpTarget(fixup);
|
||||
}
|
||||
|
||||
static void FlushToZero32(XEmitter* code, X64Reg xmm_value, X64Reg gpr_scratch) {
|
||||
static void FlushToZero32(BlockOfCode* code, X64Reg xmm_value, X64Reg gpr_scratch) {
|
||||
code->MOVD_xmm(R(gpr_scratch), xmm_value);
|
||||
code->AND(32, R(gpr_scratch), Imm32(0x7FFFFFFF));
|
||||
code->SUB(32, R(gpr_scratch), Imm32(1));
|
||||
|
@ -1065,32 +1065,32 @@ static void FlushToZero32(XEmitter* code, X64Reg xmm_value, X64Reg gpr_scratch)
|
|||
code->SetJumpTarget(fixup);
|
||||
}
|
||||
|
||||
static void FlushToZero64(XEmitter* code, Routines* routines, X64Reg xmm_value, X64Reg gpr_scratch) {
|
||||
static void FlushToZero64(BlockOfCode* code, X64Reg xmm_value, X64Reg gpr_scratch) {
|
||||
code->MOVQ_xmm(R(gpr_scratch), xmm_value);
|
||||
code->AND(64, R(gpr_scratch), routines->MFloatNonSignMask64());
|
||||
code->AND(64, R(gpr_scratch), code->MFloatNonSignMask64());
|
||||
code->SUB(64, R(gpr_scratch), Imm32(1));
|
||||
code->CMP(64, R(gpr_scratch), routines->MFloatPenultimatePositiveDenormal64());
|
||||
code->CMP(64, R(gpr_scratch), code->MFloatPenultimatePositiveDenormal64());
|
||||
auto fixup = code->J_CC(CC_A);
|
||||
code->PXOR(xmm_value, R(xmm_value));
|
||||
code->MOV(32, MDisp(R15, offsetof(JitState, FPSCR_UFC)), Imm32(1 << 3));
|
||||
code->SetJumpTarget(fixup);
|
||||
}
|
||||
|
||||
static void DefaultNaN32(XEmitter* code, Routines* routines, X64Reg xmm_value) {
|
||||
static void DefaultNaN32(BlockOfCode* code, X64Reg xmm_value) {
|
||||
code->UCOMISS(xmm_value, R(xmm_value));
|
||||
auto fixup = code->J_CC(CC_NP);
|
||||
code->MOVAPS(xmm_value, routines->MFloatNaN32());
|
||||
code->MOVAPS(xmm_value, code->MFloatNaN32());
|
||||
code->SetJumpTarget(fixup);
|
||||
}
|
||||
|
||||
static void DefaultNaN64(XEmitter* code, Routines* routines, X64Reg xmm_value) {
|
||||
static void DefaultNaN64(BlockOfCode* code, X64Reg xmm_value) {
|
||||
code->UCOMISD(xmm_value, R(xmm_value));
|
||||
auto fixup = code->J_CC(CC_NP);
|
||||
code->MOVAPS(xmm_value, routines->MFloatNaN64());
|
||||
code->MOVAPS(xmm_value, code->MFloatNaN64());
|
||||
code->SetJumpTarget(fixup);
|
||||
}
|
||||
|
||||
static void FPThreeOp32(XEmitter* code, Routines* routines, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) {
|
||||
static void FPThreeOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) {
|
||||
IR::Value a = inst->GetArg(0);
|
||||
IR::Value b = inst->GetArg(1);
|
||||
|
||||
|
@ -1107,11 +1107,11 @@ static void FPThreeOp32(XEmitter* code, Routines* routines, RegAlloc& reg_alloc,
|
|||
FlushToZero32(code, result, gpr_scratch);
|
||||
}
|
||||
if (block.location.FPSCR_DN()) {
|
||||
DefaultNaN32(code, routines, result);
|
||||
DefaultNaN32(code, result);
|
||||
}
|
||||
}
|
||||
|
||||
static void FPThreeOp64(XEmitter* code, Routines* routines, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) {
|
||||
static void FPThreeOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) {
|
||||
IR::Value a = inst->GetArg(0);
|
||||
IR::Value b = inst->GetArg(1);
|
||||
|
||||
|
@ -1120,19 +1120,19 @@ static void FPThreeOp64(XEmitter* code, Routines* routines, RegAlloc& reg_alloc,
|
|||
X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr);
|
||||
|
||||
if (block.location.FPSCR_FTZ()) {
|
||||
DenormalsAreZero64(code, routines, result, gpr_scratch);
|
||||
DenormalsAreZero64(code, routines, operand, gpr_scratch);
|
||||
DenormalsAreZero64(code, result, gpr_scratch);
|
||||
DenormalsAreZero64(code, operand, gpr_scratch);
|
||||
}
|
||||
(code->*fn)(result, R(operand));
|
||||
if (block.location.FPSCR_FTZ()) {
|
||||
FlushToZero64(code, routines, result, gpr_scratch);
|
||||
FlushToZero64(code, result, gpr_scratch);
|
||||
}
|
||||
if (block.location.FPSCR_DN()) {
|
||||
DefaultNaN64(code, routines, result);
|
||||
DefaultNaN64(code, result);
|
||||
}
|
||||
}
|
||||
|
||||
static void FPTwoOp32(XEmitter* code, Routines* routines, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) {
|
||||
static void FPTwoOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) {
|
||||
IR::Value a = inst->GetArg(0);
|
||||
|
||||
X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm);
|
||||
|
@ -1146,25 +1146,25 @@ static void FPTwoOp32(XEmitter* code, Routines* routines, RegAlloc& reg_alloc, I
|
|||
FlushToZero32(code, result, gpr_scratch);
|
||||
}
|
||||
if (block.location.FPSCR_DN()) {
|
||||
DefaultNaN32(code, routines, result);
|
||||
DefaultNaN32(code, result);
|
||||
}
|
||||
}
|
||||
|
||||
static void FPTwoOp64(XEmitter* code, Routines* routines, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) {
|
||||
static void FPTwoOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, IR::Inst* inst, void (XEmitter::*fn)(X64Reg, const OpArg&)) {
|
||||
IR::Value a = inst->GetArg(0);
|
||||
|
||||
X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm);
|
||||
X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr);
|
||||
|
||||
if (block.location.FPSCR_FTZ()) {
|
||||
DenormalsAreZero64(code, routines, result, gpr_scratch);
|
||||
DenormalsAreZero64(code, result, gpr_scratch);
|
||||
}
|
||||
(code->*fn)(result, R(result));
|
||||
if (block.location.FPSCR_FTZ()) {
|
||||
FlushToZero64(code, routines, result, gpr_scratch);
|
||||
FlushToZero64(code, result, gpr_scratch);
|
||||
}
|
||||
if (block.location.FPSCR_DN()) {
|
||||
DefaultNaN64(code, routines, result);
|
||||
DefaultNaN64(code, result);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1173,7 +1173,7 @@ void EmitX64::EmitFPAbs32(IR::Block&, IR::Inst* inst) {
|
|||
|
||||
X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm);
|
||||
|
||||
code->PAND(result, routines->MFloatNonSignMask32());
|
||||
code->PAND(result, code->MFloatNonSignMask32());
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPAbs64(IR::Block&, IR::Inst* inst) {
|
||||
|
@ -1181,7 +1181,7 @@ void EmitX64::EmitFPAbs64(IR::Block&, IR::Inst* inst) {
|
|||
|
||||
X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm);
|
||||
|
||||
code->PAND(result, routines->MFloatNonSignMask64());
|
||||
code->PAND(result, code->MFloatNonSignMask64());
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPNeg32(IR::Block&, IR::Inst* inst) {
|
||||
|
@ -1189,7 +1189,7 @@ void EmitX64::EmitFPNeg32(IR::Block&, IR::Inst* inst) {
|
|||
|
||||
X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm);
|
||||
|
||||
code->PXOR(result, routines->MFloatNegativeZero32());
|
||||
code->PXOR(result, code->MFloatNegativeZero32());
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPNeg64(IR::Block&, IR::Inst* inst) {
|
||||
|
@ -1197,47 +1197,47 @@ void EmitX64::EmitFPNeg64(IR::Block&, IR::Inst* inst) {
|
|||
|
||||
X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm);
|
||||
|
||||
code->PXOR(result, routines->MFloatNegativeZero64());
|
||||
code->PXOR(result, code->MFloatNegativeZero64());
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPAdd32(IR::Block& block, IR::Inst* inst) {
|
||||
FPThreeOp32(code, routines, reg_alloc, block, inst, &XEmitter::ADDSS);
|
||||
FPThreeOp32(code, reg_alloc, block, inst, &XEmitter::ADDSS);
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPAdd64(IR::Block& block, IR::Inst* inst) {
|
||||
FPThreeOp64(code, routines, reg_alloc, block, inst, &XEmitter::ADDSD);
|
||||
FPThreeOp64(code, reg_alloc, block, inst, &XEmitter::ADDSD);
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPDiv32(IR::Block& block, IR::Inst* inst) {
|
||||
FPThreeOp32(code, routines, reg_alloc, block, inst, &XEmitter::DIVSS);
|
||||
FPThreeOp32(code, reg_alloc, block, inst, &XEmitter::DIVSS);
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPDiv64(IR::Block& block, IR::Inst* inst) {
|
||||
FPThreeOp64(code, routines, reg_alloc, block, inst, &XEmitter::DIVSD);
|
||||
FPThreeOp64(code, reg_alloc, block, inst, &XEmitter::DIVSD);
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPMul32(IR::Block& block, IR::Inst* inst) {
|
||||
FPThreeOp32(code, routines, reg_alloc, block, inst, &XEmitter::MULSS);
|
||||
FPThreeOp32(code, reg_alloc, block, inst, &XEmitter::MULSS);
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPMul64(IR::Block& block, IR::Inst* inst) {
|
||||
FPThreeOp64(code, routines, reg_alloc, block, inst, &XEmitter::MULSD);
|
||||
FPThreeOp64(code, reg_alloc, block, inst, &XEmitter::MULSD);
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPSqrt32(IR::Block& block, IR::Inst* inst) {
|
||||
FPTwoOp32(code, routines, reg_alloc, block, inst, &XEmitter::SQRTSS);
|
||||
FPTwoOp32(code, reg_alloc, block, inst, &XEmitter::SQRTSS);
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPSqrt64(IR::Block& block, IR::Inst* inst) {
|
||||
FPTwoOp64(code, routines, reg_alloc, block, inst, &XEmitter::SQRTSD);
|
||||
FPTwoOp64(code, reg_alloc, block, inst, &XEmitter::SQRTSD);
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPSub32(IR::Block& block, IR::Inst* inst) {
|
||||
FPThreeOp32(code, routines, reg_alloc, block, inst, &XEmitter::SUBSS);
|
||||
FPThreeOp32(code, reg_alloc, block, inst, &XEmitter::SUBSS);
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPSub64(IR::Block& block, IR::Inst* inst) {
|
||||
FPThreeOp64(code, routines, reg_alloc, block, inst, &XEmitter::SUBSD);
|
||||
FPThreeOp64(code, reg_alloc, block, inst, &XEmitter::SUBSD);
|
||||
}
|
||||
|
||||
void EmitX64::EmitReadMemory8(IR::Block&, IR::Inst* inst) {
|
||||
|
@ -1294,7 +1294,7 @@ void EmitX64::EmitAddCycles(size_t cycles) {
|
|||
code->SUB(64, MDisp(R15, offsetof(JitState, cycles_remaining)), Imm32(static_cast<u32>(cycles)));
|
||||
}
|
||||
|
||||
static CCFlags EmitCond(Gen::XEmitter* code, Arm::Cond cond) {
|
||||
static CCFlags EmitCond(BlockOfCode* code, Arm::Cond cond) {
|
||||
// TODO: This code is a quick copy-paste-and-quickly-modify job from a previous JIT. Clean this up.
|
||||
|
||||
auto NFlag = [code](X64Reg reg){
|
||||
|
@ -1486,11 +1486,11 @@ void EmitX64::EmitTerminalInterpret(IR::Term::Interpret terminal, Arm::LocationD
|
|||
code->MOV(32, MJitStateReg(Arm::Reg::PC), R(ABI_PARAM1));
|
||||
code->MOV(64, R(RSP), MDisp(R15, offsetof(JitState, save_host_RSP)));
|
||||
code->ABI_CallFunction(reinterpret_cast<void*>(cb.InterpreterFallback));
|
||||
routines->GenReturnFromRunCode(code); // TODO: Check cycles
|
||||
code->ReturnFromRunCode(); // TODO: Check cycles
|
||||
}
|
||||
|
||||
void EmitX64::EmitTerminalReturnToDispatch(IR::Term::ReturnToDispatch, Arm::LocationDescriptor initial_location) {
|
||||
routines->GenReturnFromRunCode(code);
|
||||
code->ReturnFromRunCode();
|
||||
}
|
||||
|
||||
void EmitX64::EmitTerminalLinkBlock(IR::Term::LinkBlock terminal, Arm::LocationDescriptor initial_location) {
|
||||
|
@ -1509,7 +1509,7 @@ void EmitX64::EmitTerminalLinkBlock(IR::Term::LinkBlock terminal, Arm::LocationD
|
|||
code->AND(32, MJitStateCpsr(), Imm32(~(1 << 9)));
|
||||
}
|
||||
}
|
||||
routines->GenReturnFromRunCode(code); // TODO: Check cycles, Properly do a link
|
||||
code->ReturnFromRunCode(); // TODO: Check cycles, Properly do a link
|
||||
}
|
||||
|
||||
void EmitX64::EmitTerminalLinkBlockFast(IR::Term::LinkBlockFast terminal, Arm::LocationDescriptor initial_location) {
|
||||
|
|
|
@ -9,8 +9,8 @@
|
|||
#include <set>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "backend_x64/block_of_code.h"
|
||||
#include "backend_x64/reg_alloc.h"
|
||||
#include "backend_x64/routines.h"
|
||||
#include "common/x64/emitter.h"
|
||||
#include "frontend/ir/ir.h"
|
||||
#include "interface/interface.h"
|
||||
|
@ -20,8 +20,8 @@ namespace BackendX64 {
|
|||
|
||||
class EmitX64 final {
|
||||
public:
|
||||
EmitX64(Gen::XEmitter* code, Routines* routines, UserCallbacks cb, Jit* jit_interface)
|
||||
: reg_alloc(code), code(code), routines(routines), cb(cb), jit_interface(jit_interface) {}
|
||||
EmitX64(BlockOfCode* code, UserCallbacks cb, Jit* jit_interface)
|
||||
: reg_alloc(code), code(code), cb(cb), jit_interface(jit_interface) {}
|
||||
|
||||
struct BlockDescriptor {
|
||||
CodePtr code_ptr;
|
||||
|
@ -62,8 +62,7 @@ private:
|
|||
RegAlloc reg_alloc;
|
||||
|
||||
// State
|
||||
Gen::XEmitter* code;
|
||||
Routines* routines;
|
||||
BlockOfCode* code;
|
||||
UserCallbacks cb;
|
||||
Jit* jit_interface;
|
||||
std::unordered_map<Arm::LocationDescriptor, BlockDescriptor, Arm::LocationDescriptorHash> basic_blocks;
|
||||
|
|
|
@ -11,9 +11,9 @@
|
|||
#include <llvm-c/Target.h>
|
||||
#endif
|
||||
|
||||
#include "backend_x64/block_of_code.h"
|
||||
#include "backend_x64/emit_x64.h"
|
||||
#include "backend_x64/jitstate.h"
|
||||
#include "backend_x64/routines.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_types.h"
|
||||
|
@ -28,17 +28,10 @@ namespace Dynarmic {
|
|||
|
||||
using namespace BackendX64;
|
||||
|
||||
struct BlockOfCode : Gen::XCodeBlock {
|
||||
BlockOfCode() {
|
||||
AllocCodeSpace(128 * 1024 * 1024);
|
||||
}
|
||||
};
|
||||
|
||||
struct Jit::Impl {
|
||||
Impl(Jit* jit, UserCallbacks callbacks) : emitter(&block_of_code, &routines, callbacks, jit), callbacks(callbacks) {}
|
||||
Impl(Jit* jit, UserCallbacks callbacks) : emitter(&block_of_code, callbacks, jit), callbacks(callbacks) {}
|
||||
|
||||
JitState jit_state{};
|
||||
Routines routines{};
|
||||
BlockOfCode block_of_code{};
|
||||
EmitX64 emitter;
|
||||
const UserCallbacks callbacks;
|
||||
|
@ -51,7 +44,7 @@ struct Jit::Impl {
|
|||
Arm::LocationDescriptor descriptor{pc, TFlag, EFlag, jit_state.guest_FPSCR_flags};
|
||||
|
||||
CodePtr code_ptr = GetBasicBlock(descriptor)->code_ptr;
|
||||
return routines.RunCode(&jit_state, code_ptr, cycle_count);
|
||||
return block_of_code.RunCode(&jit_state, code_ptr, cycle_count);
|
||||
}
|
||||
|
||||
std::string Disassemble(const Arm::LocationDescriptor& descriptor) {
|
||||
|
@ -126,13 +119,7 @@ size_t Jit::Run(size_t cycle_count) {
|
|||
|
||||
void Jit::ClearCache(bool poison_memory) {
|
||||
ASSERT(!is_executing);
|
||||
|
||||
if (poison_memory) {
|
||||
impl->block_of_code.ClearCodeSpace();
|
||||
} else {
|
||||
impl->block_of_code.ResetCodePtr();
|
||||
}
|
||||
|
||||
impl->block_of_code.ClearCache(poison_memory);
|
||||
impl->emitter.ClearCache();
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ struct JitState {
|
|||
|
||||
std::array<u64, SpillCount> Spill{}; // Spill.
|
||||
|
||||
// For internal use (See: Routines::RunCode)
|
||||
// For internal use (See: BlockOfCode::RunCode)
|
||||
u32 guest_MXCSR = 0x00001f80;
|
||||
u32 save_host_MXCSR = 0;
|
||||
u64 save_host_RSP = 0;
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include <map>
|
||||
|
||||
#include "backend_x64/block_of_code.h"
|
||||
#include "backend_x64/jitstate.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/x64/emitter.h"
|
||||
|
@ -97,7 +98,7 @@ const HostLocList any_xmm = {
|
|||
|
||||
class RegAlloc final {
|
||||
public:
|
||||
RegAlloc(Gen::XEmitter* code) : code(code) {}
|
||||
RegAlloc(BlockOfCode* code) : code(code) {}
|
||||
|
||||
/// Late-def
|
||||
Gen::X64Reg DefRegister(IR::Inst* def_inst, HostLocList desired_locations);
|
||||
|
@ -145,7 +146,7 @@ private:
|
|||
void SpillRegister(HostLoc loc);
|
||||
HostLoc FindFreeSpill() const;
|
||||
|
||||
Gen::XEmitter* code = nullptr;
|
||||
BlockOfCode* code = nullptr;
|
||||
|
||||
struct HostLocInfo {
|
||||
std::vector<IR::Inst*> values; // early value
|
||||
|
|
Loading…
Reference in a new issue