Misc. fixups of MSVC build
This commit is contained in:
parent
a1dfa01515
commit
aa74a8130b
7 changed files with 28 additions and 32 deletions
|
@ -223,13 +223,13 @@ void A64EmitX64::EmitA64CallSupervisor(A64EmitContext& ctx, IR::Inst* inst) {
|
|||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
ASSERT(args[0].IsImmediate());
|
||||
u32 imm = args[0].GetImmediateU32();
|
||||
Devirtualize<&A64::UserCallbacks::CallSVC>(conf.callbacks).EmitCall(code, [&](Xbyak::Reg64 param1) {
|
||||
DEVIRT(conf.callbacks, &A64::UserCallbacks::CallSVC).EmitCall(code, [&](Xbyak::Reg64 param1) {
|
||||
code->mov(param1.cvt32(), imm);
|
||||
});
|
||||
}
|
||||
|
||||
void A64EmitX64::EmitA64ReadMemory8(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
Devirtualize<&A64::UserCallbacks::MemoryRead8>(conf.callbacks).EmitCall(code, [&](Xbyak::Reg64 vaddr) {
|
||||
DEVIRT(conf.callbacks, &A64::UserCallbacks::MemoryRead8).EmitCall(code, [&](Xbyak::Reg64 vaddr) {
|
||||
ASSERT(vaddr == code->ABI_PARAM2);
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
ctx.reg_alloc.HostCall(inst, {}, args[0]);
|
||||
|
@ -237,7 +237,7 @@ void A64EmitX64::EmitA64ReadMemory8(A64EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitA64ReadMemory16(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
Devirtualize<&A64::UserCallbacks::MemoryRead16>(conf.callbacks).EmitCall(code, [&](Xbyak::Reg64 vaddr) {
|
||||
DEVIRT(conf.callbacks, &A64::UserCallbacks::MemoryRead16).EmitCall(code, [&](Xbyak::Reg64 vaddr) {
|
||||
ASSERT(vaddr == code->ABI_PARAM2);
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
ctx.reg_alloc.HostCall(inst, {}, args[0]);
|
||||
|
@ -245,7 +245,7 @@ void A64EmitX64::EmitA64ReadMemory16(A64EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitA64ReadMemory32(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
Devirtualize<&A64::UserCallbacks::MemoryRead32>(conf.callbacks).EmitCall(code, [&](Xbyak::Reg64 vaddr) {
|
||||
DEVIRT(conf.callbacks, &A64::UserCallbacks::MemoryRead32).EmitCall(code, [&](Xbyak::Reg64 vaddr) {
|
||||
ASSERT(vaddr == code->ABI_PARAM2);
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
ctx.reg_alloc.HostCall(inst, {}, args[0]);
|
||||
|
@ -253,7 +253,7 @@ void A64EmitX64::EmitA64ReadMemory32(A64EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitA64ReadMemory64(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
Devirtualize<&A64::UserCallbacks::MemoryRead64>(conf.callbacks).EmitCall(code, [&](Xbyak::Reg64 vaddr) {
|
||||
DEVIRT(conf.callbacks, &A64::UserCallbacks::MemoryRead64).EmitCall(code, [&](Xbyak::Reg64 vaddr) {
|
||||
ASSERT(vaddr == code->ABI_PARAM2);
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
ctx.reg_alloc.HostCall(inst, {}, args[0]);
|
||||
|
@ -261,7 +261,7 @@ void A64EmitX64::EmitA64ReadMemory64(A64EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitA64WriteMemory8(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
Devirtualize<&A64::UserCallbacks::MemoryWrite8>(conf.callbacks).EmitCall(code, [&](Xbyak::Reg64 vaddr, Xbyak::Reg64 value) {
|
||||
DEVIRT(conf.callbacks, &A64::UserCallbacks::MemoryWrite8).EmitCall(code, [&](Xbyak::Reg64 vaddr, Xbyak::Reg64 value) {
|
||||
ASSERT(vaddr == code->ABI_PARAM2 && value == code->ABI_PARAM3);
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
ctx.reg_alloc.HostCall(nullptr, {}, args[0], args[1]);
|
||||
|
@ -269,7 +269,7 @@ void A64EmitX64::EmitA64WriteMemory8(A64EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitA64WriteMemory16(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
Devirtualize<&A64::UserCallbacks::MemoryWrite16>(conf.callbacks).EmitCall(code, [&](Xbyak::Reg64 vaddr, Xbyak::Reg64 value) {
|
||||
DEVIRT(conf.callbacks, &A64::UserCallbacks::MemoryWrite16).EmitCall(code, [&](Xbyak::Reg64 vaddr, Xbyak::Reg64 value) {
|
||||
ASSERT(vaddr == code->ABI_PARAM2 && value == code->ABI_PARAM3);
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
ctx.reg_alloc.HostCall(nullptr, {}, args[0], args[1]);
|
||||
|
@ -277,7 +277,7 @@ void A64EmitX64::EmitA64WriteMemory16(A64EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitA64WriteMemory32(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
Devirtualize<&A64::UserCallbacks::MemoryWrite32>(conf.callbacks).EmitCall(code, [&](Xbyak::Reg64 vaddr, Xbyak::Reg64 value) {
|
||||
DEVIRT(conf.callbacks, &A64::UserCallbacks::MemoryWrite32).EmitCall(code, [&](Xbyak::Reg64 vaddr, Xbyak::Reg64 value) {
|
||||
ASSERT(vaddr == code->ABI_PARAM2 && value == code->ABI_PARAM3);
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
ctx.reg_alloc.HostCall(nullptr, {}, args[0], args[1]);
|
||||
|
@ -285,7 +285,7 @@ void A64EmitX64::EmitA64WriteMemory32(A64EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitA64WriteMemory64(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
Devirtualize<&A64::UserCallbacks::MemoryWrite64>(conf.callbacks).EmitCall(code, [&](Xbyak::Reg64 vaddr, Xbyak::Reg64 value) {
|
||||
DEVIRT(conf.callbacks, &A64::UserCallbacks::MemoryWrite64).EmitCall(code, [&](Xbyak::Reg64 vaddr, Xbyak::Reg64 value) {
|
||||
ASSERT(vaddr == code->ABI_PARAM2 && value == code->ABI_PARAM3);
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
ctx.reg_alloc.HostCall(nullptr, {}, args[0], args[1]);
|
||||
|
@ -294,7 +294,7 @@ void A64EmitX64::EmitA64WriteMemory64(A64EmitContext& ctx, IR::Inst* inst) {
|
|||
|
||||
void A64EmitX64::EmitTerminalImpl(IR::Term::Interpret terminal, IR::LocationDescriptor) {
|
||||
code->SwitchMxcsrOnExit();
|
||||
Devirtualize<&A64::UserCallbacks::InterpreterFallback>(conf.callbacks).EmitCall(code, [&](Xbyak::Reg64 param1, Xbyak::Reg64 param2) {
|
||||
DEVIRT(conf.callbacks, &A64::UserCallbacks::InterpreterFallback).EmitCall(code, [&](Xbyak::Reg64 param1, Xbyak::Reg64 param2) {
|
||||
code->mov(param1, A64::LocationDescriptor{terminal.next}.PC());
|
||||
code->mov(qword[r15 + offsetof(A64JitState, pc)], param1);
|
||||
code->mov(param2.cvt32(), 1);
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "backend_x64/a64_emit_x64.h"
|
||||
#include "backend_x64/a64_jitstate.h"
|
||||
#include "backend_x64/block_of_code.h"
|
||||
#include "backend_x64/devirtualize.h"
|
||||
#include "backend_x64/jitstate_info.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/scope_exit.h"
|
||||
|
@ -25,16 +26,11 @@ namespace A64 {
|
|||
|
||||
using namespace BackendX64;
|
||||
|
||||
template <auto fn, typename Ret, typename ...Args>
|
||||
static Ret Thunk(A64::UserCallbacks* cb, Args... args) {
|
||||
return (cb->*fn)(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
static RunCodeCallbacks GenRunCodeCallbacks(A64::UserCallbacks* cb, CodePtr (*LookupBlock)(void* lookup_block_arg), void* arg) {
|
||||
return RunCodeCallbacks{
|
||||
std::make_unique<ArgCallback>(LookupBlock, reinterpret_cast<u64>(arg)),
|
||||
std::make_unique<ArgCallback>(&Thunk<&A64::UserCallbacks::AddTicks, void, u64>, reinterpret_cast<u64>(cb)),
|
||||
std::make_unique<ArgCallback>(&Thunk<&A64::UserCallbacks::GetTicksRemaining, u64>, reinterpret_cast<u64>(cb)),
|
||||
std::make_unique<ArgCallback>(DEVIRT(cb, &A64::UserCallbacks::AddTicks)),
|
||||
std::make_unique<ArgCallback>(DEVIRT(cb, &A64::UserCallbacks::GetTicksRemaining)),
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -17,22 +17,24 @@ namespace BackendX64 {
|
|||
|
||||
namespace impl {
|
||||
|
||||
template <auto fn, typename F>
|
||||
template <typename FunctionType, FunctionType mfp>
|
||||
struct ThunkBuilder;
|
||||
|
||||
template <auto fn, typename C, typename R, typename... Args>
|
||||
struct ThunkBuilder<fn, R(C::*)(Args...)> {
|
||||
template <typename C, typename R, typename... Args, R(C::*mfp)(Args...)>
|
||||
struct ThunkBuilder<R(C::*)(Args...), mfp> {
|
||||
static R Thunk(C* this_, Args... args) {
|
||||
return (this_->*fn)(std::forward<Args>(args)...);
|
||||
return (this_->*mfp)(std::forward<Args>(args)...);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
template <auto fn>
|
||||
ArgCallback Devirtualize(mp::class_type_t<decltype(fn)>* this_) {
|
||||
return ArgCallback{&impl::ThunkBuilder<fn, decltype(fn)>::Thunk, reinterpret_cast<u64>(this_)};
|
||||
template <typename FunctionType, FunctionType mfp>
|
||||
ArgCallback Devirtualize(mp::class_type_t<decltype(mfp)>* this_) {
|
||||
return ArgCallback{&impl::ThunkBuilder<FunctionType, mfp>::Thunk, reinterpret_cast<u64>(this_)};
|
||||
}
|
||||
|
||||
#define DEVIRT(this_, mfp) Dynarmic::BackendX64::Devirtualize<decltype(mfp), mfp>(this_)
|
||||
|
||||
} // namespace BackendX64
|
||||
} // namespace Dynarmic
|
||||
|
|
|
@ -135,7 +135,7 @@ template <typename JST>
|
|||
void EmitX64<JST>::EmitGetNZCVFromOp(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
const size_t bitsize = [&]{
|
||||
const int bitsize = [&]{
|
||||
switch (args[0].GetType()) {
|
||||
case IR::Type::U8:
|
||||
return 8;
|
||||
|
@ -786,7 +786,7 @@ static Xbyak::Reg64 DoNZCV(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* nzc
|
|||
return nzcv;
|
||||
}
|
||||
|
||||
static void EmitAdd(BlockOfCode* code, EmitContext& ctx, IR::Inst* inst, size_t bitsize) {
|
||||
static void EmitAdd(BlockOfCode* code, EmitContext& ctx, IR::Inst* inst, int bitsize) {
|
||||
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
||||
auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
||||
auto nzcv_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetNZCVFromOp);
|
||||
|
@ -860,7 +860,7 @@ void EmitX64<JST>::EmitAdd64(EmitContext& ctx, IR::Inst* inst) {
|
|||
EmitAdd(code, ctx, inst, 64);
|
||||
}
|
||||
|
||||
static void EmitSub(BlockOfCode* code, EmitContext& ctx, IR::Inst* inst, size_t bitsize) {
|
||||
static void EmitSub(BlockOfCode* code, EmitContext& ctx, IR::Inst* inst, int bitsize) {
|
||||
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
||||
auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
||||
auto nzcv_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetNZCVFromOp);
|
||||
|
|
|
@ -94,7 +94,7 @@ bool TranslatorVisitor::CBNZ(bool sf, Imm<19> imm19, Reg Rt) {
|
|||
|
||||
bool TranslatorVisitor::TBZ(Imm<1> b5, Imm<5> b40, Imm<14> imm14, Reg Rt) {
|
||||
size_t datasize = b5 == 1 ? 64 : 32;
|
||||
size_t bit_pos = concatenate(b5, b40).ZeroExtend<size_t>();
|
||||
u8 bit_pos = concatenate(b5, b40).ZeroExtend<u8>();
|
||||
s64 offset = concatenate(imm14, Imm<2>{0}).SignExtend<s64>();
|
||||
|
||||
auto operand = X(datasize, Rt);
|
||||
|
@ -110,7 +110,7 @@ bool TranslatorVisitor::TBZ(Imm<1> b5, Imm<5> b40, Imm<14> imm14, Reg Rt) {
|
|||
|
||||
bool TranslatorVisitor::TBNZ(Imm<1> b5, Imm<5> b40, Imm<14> imm14, Reg Rt) {
|
||||
size_t datasize = b5 == 1 ? 64 : 32;
|
||||
size_t bit_pos = concatenate(b5, b40).ZeroExtend<size_t>();
|
||||
u8 bit_pos = concatenate(b5, b40).ZeroExtend<u8>();
|
||||
s64 offset = concatenate(imm14, Imm<2>{0}).SignExtend<s64>();
|
||||
|
||||
auto operand = X(datasize, Rt);
|
||||
|
|
|
@ -45,7 +45,7 @@ boost::optional<TranslatorVisitor::BitMasks> TranslatorVisitor::DecodeBitMasks(b
|
|||
s32 R = s32(immr.ZeroExtend() & levels);
|
||||
u64 d = u64(S - R) & levels;
|
||||
|
||||
size_t esize = 1 << len;
|
||||
size_t esize = static_cast<size_t>(1) << len;
|
||||
u64 welem = Common::Ones<u64>(S + 1);
|
||||
u64 telem = Common::Ones<u64>(d + 1);
|
||||
u64 wmask = Common::RotateRight(Common::Replicate(welem, esize), R);
|
||||
|
|
|
@ -28,8 +28,6 @@ bool TranslatorVisitor::STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback,
|
|||
return UnpredictableInstruction();
|
||||
|
||||
IR::U64 address;
|
||||
IR::U32U64 data1;
|
||||
IR::U32U64 data2;
|
||||
const size_t dbytes = datasize / 8;
|
||||
|
||||
if (Rn == Reg::SP)
|
||||
|
|
Loading…
Reference in a new issue