A64: Implement addsub instructions
This commit is contained in:
parent
d1cef6ffb0
commit
c09e69bb97
15 changed files with 796 additions and 84 deletions
|
@ -115,6 +115,23 @@ A64EmitX64::BlockDescriptor A64EmitX64::Emit(IR::Block& block) {
|
|||
return block_desc;
|
||||
}
|
||||
|
||||
void A64EmitX64::EmitA64GetCFlag(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
Xbyak::Reg32 result = ctx.reg_alloc.ScratchGpr().cvt32();
|
||||
code->mov(result, dword[r15 + offsetof(A64JitState, CPSR_nzcv)]);
|
||||
code->shr(result, 29);
|
||||
code->and_(result, 1);
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
void A64EmitX64::EmitA64SetNZCV(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
Xbyak::Reg32 to_store = ctx.reg_alloc.UseGpr(args[0]).cvt32();
|
||||
code->and_(to_store, 0b11000001'00000001);
|
||||
code->imul(to_store, to_store, 0b00010000'00100001);
|
||||
code->and_(to_store, 0xFF000000);
|
||||
code->mov(dword[r15 + offsetof(A64JitState, CPSR_nzcv)], to_store);
|
||||
}
|
||||
|
||||
void A64EmitX64::EmitA64GetW(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
A64::Reg reg = inst->GetArg(0).GetA64RegRef();
|
||||
|
||||
|
@ -131,6 +148,12 @@ void A64EmitX64::EmitA64GetX(A64EmitContext& ctx, IR::Inst* inst) {
|
|||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
void A64EmitX64::EmitA64GetSP(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.ScratchGpr();
|
||||
code->mov(result, qword[r15 + offsetof(A64JitState, sp)]);
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
void A64EmitX64::EmitA64SetW(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
A64::Reg reg = inst->GetArg(0).GetA64RegRef();
|
||||
|
@ -161,6 +184,20 @@ void A64EmitX64::EmitA64SetX(A64EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void A64EmitX64::EmitA64SetSP(A64EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
auto addr = qword[r15 + offsetof(A64JitState, sp)];
|
||||
if (args[0].IsImmediate()) {
|
||||
code->mov(addr, args[0].GetImmediateU64());
|
||||
} else if (args[0].IsInXmm()) {
|
||||
Xbyak::Xmm to_store = ctx.reg_alloc.UseXmm(args[0]);
|
||||
code->movq(addr, to_store);
|
||||
} else {
|
||||
Xbyak::Reg64 to_store = ctx.reg_alloc.UseGpr(args[0]);
|
||||
code->mov(addr, to_store);
|
||||
}
|
||||
}
|
||||
|
||||
void A64EmitX64::EmitTerminalImpl(IR::Term::Interpret terminal, IR::LocationDescriptor) {
|
||||
code->mov(code->ABI_PARAM1, A64::LocationDescriptor{terminal.next}.PC());
|
||||
code->mov(code->ABI_PARAM2.cvt32(), 1);
|
||||
|
|
|
@ -131,6 +131,11 @@ void EmitX64<JST>::EmitGetGEFromOp(EmitContext&, IR::Inst*) {
|
|||
ASSERT_MSG(false, "should never happen");
|
||||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitGetNZCVFromOp(EmitContext&, IR::Inst*) {
|
||||
ASSERT_MSG(false, "should never happen");
|
||||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitPack2x32To1x64(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
@ -738,21 +743,31 @@ static Xbyak::Reg8 DoCarry(RegAlloc& reg_alloc, Argument& carry_in, IR::Inst* ca
|
|||
}
|
||||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitAddWithCarry(EmitContext& ctx, IR::Inst* inst) {
|
||||
static Xbyak::Reg64 DoNZCV(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* nzcv_out) {
|
||||
if (!nzcv_out)
|
||||
return INVALID_REG;
|
||||
|
||||
Xbyak::Reg64 nzcv = reg_alloc.ScratchGpr({HostLoc::RAX});
|
||||
code->xor_(nzcv.cvt32(), nzcv.cvt32());
|
||||
return nzcv;
|
||||
}
|
||||
|
||||
static void EmitAdd(BlockOfCode* code, EmitContext& ctx, IR::Inst* inst, size_t bitsize) {
|
||||
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
||||
auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
||||
auto nzcv_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetNZCVFromOp);
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
auto& carry_in = args[2];
|
||||
|
||||
Xbyak::Reg32 result = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||
Xbyak::Reg64 nzcv = DoNZCV(code, ctx.reg_alloc, nzcv_inst);
|
||||
Xbyak::Reg result = ctx.reg_alloc.UseScratchGpr(args[0]).changeBit(bitsize);
|
||||
Xbyak::Reg8 carry = DoCarry(ctx.reg_alloc, carry_in, carry_inst);
|
||||
Xbyak::Reg8 overflow = overflow_inst ? ctx.reg_alloc.ScratchGpr().cvt8() : INVALID_REG.cvt8();
|
||||
|
||||
// TODO: Consider using LEA.
|
||||
|
||||
if (args[1].IsImmediate()) {
|
||||
if (args[1].IsImmediate() && args[1].GetType() == IR::Type::U32) {
|
||||
u32 op_arg = args[1].GetImmediateU32();
|
||||
if (carry_in.IsImmediate()) {
|
||||
if (carry_in.GetImmediateU1()) {
|
||||
|
@ -767,7 +782,7 @@ void EmitX64<JST>::EmitAddWithCarry(EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
} else {
|
||||
OpArg op_arg = ctx.reg_alloc.UseOpArg(args[1]);
|
||||
op_arg.setBit(32);
|
||||
op_arg.setBit(bitsize);
|
||||
if (carry_in.IsImmediate()) {
|
||||
if (carry_in.GetImmediateU1()) {
|
||||
code->stc();
|
||||
|
@ -781,6 +796,12 @@ void EmitX64<JST>::EmitAddWithCarry(EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
}
|
||||
|
||||
if (nzcv_inst) {
|
||||
ctx.EraseInstruction(nzcv_inst);
|
||||
code->lahf();
|
||||
code->seto(code->al);
|
||||
ctx.reg_alloc.DefineValue(nzcv_inst, nzcv);
|
||||
}
|
||||
if (carry_inst) {
|
||||
ctx.EraseInstruction(carry_inst);
|
||||
code->setc(carry);
|
||||
|
@ -796,26 +817,25 @@ void EmitX64<JST>::EmitAddWithCarry(EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitAdd64(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
||||
Xbyak::Reg64 op_arg = ctx.reg_alloc.UseGpr(args[1]);
|
||||
|
||||
code->add(result, op_arg);
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
void EmitX64<JST>::EmitAdd32(EmitContext& ctx, IR::Inst* inst) {
|
||||
EmitAdd(code, ctx, inst, 32);
|
||||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitSubWithCarry(EmitContext& ctx, IR::Inst* inst) {
|
||||
void EmitX64<JST>::EmitAdd64(EmitContext& ctx, IR::Inst* inst) {
|
||||
EmitAdd(code, ctx, inst, 64);
|
||||
}
|
||||
|
||||
static void EmitSub(BlockOfCode* code, EmitContext& ctx, IR::Inst* inst, size_t bitsize) {
|
||||
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
||||
auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
||||
auto nzcv_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetNZCVFromOp);
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
auto& carry_in = args[2];
|
||||
|
||||
Xbyak::Reg32 result = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||
Xbyak::Reg64 nzcv = DoNZCV(code, ctx.reg_alloc, nzcv_inst);
|
||||
Xbyak::Reg result = ctx.reg_alloc.UseScratchGpr(args[0]).changeBit(bitsize);
|
||||
Xbyak::Reg8 carry = DoCarry(ctx.reg_alloc, carry_in, carry_inst);
|
||||
Xbyak::Reg8 overflow = overflow_inst ? ctx.reg_alloc.ScratchGpr().cvt8() : INVALID_REG.cvt8();
|
||||
|
||||
|
@ -823,7 +843,7 @@ void EmitX64<JST>::EmitSubWithCarry(EmitContext& ctx, IR::Inst* inst) {
|
|||
// TODO: Optimize CMP case.
|
||||
// Note that x64 CF is inverse of what the ARM carry flag is here.
|
||||
|
||||
if (args[1].IsImmediate()) {
|
||||
if (args[1].IsImmediate() && args[1].GetType() == IR::Type::U32) {
|
||||
u32 op_arg = args[1].GetImmediateU32();
|
||||
if (carry_in.IsImmediate()) {
|
||||
if (carry_in.GetImmediateU1()) {
|
||||
|
@ -839,7 +859,7 @@ void EmitX64<JST>::EmitSubWithCarry(EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
} else {
|
||||
OpArg op_arg = ctx.reg_alloc.UseOpArg(args[1]);
|
||||
op_arg.setBit(32);
|
||||
op_arg.setBit(bitsize);
|
||||
if (carry_in.IsImmediate()) {
|
||||
if (carry_in.GetImmediateU1()) {
|
||||
code->sub(result, *op_arg);
|
||||
|
@ -854,6 +874,12 @@ void EmitX64<JST>::EmitSubWithCarry(EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
}
|
||||
|
||||
if (nzcv_inst) {
|
||||
ctx.EraseInstruction(nzcv_inst);
|
||||
code->lahf();
|
||||
code->seto(code->al);
|
||||
ctx.reg_alloc.DefineValue(nzcv_inst, nzcv);
|
||||
}
|
||||
if (carry_inst) {
|
||||
ctx.EraseInstruction(carry_inst);
|
||||
code->setnc(carry);
|
||||
|
@ -869,19 +895,17 @@ void EmitX64<JST>::EmitSubWithCarry(EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitSub64(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
||||
Xbyak::Reg64 op_arg = ctx.reg_alloc.UseGpr(args[1]);
|
||||
|
||||
code->sub(result, op_arg);
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
void EmitX64<JST>::EmitSub32(EmitContext& ctx, IR::Inst* inst) {
|
||||
EmitSub(code, ctx, inst, 32);
|
||||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitMul(EmitContext& ctx, IR::Inst* inst) {
|
||||
void EmitX64<JST>::EmitSub64(EmitContext& ctx, IR::Inst* inst) {
|
||||
EmitSub(code, ctx, inst, 64);
|
||||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitMul32(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
Xbyak::Reg32 result = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||
|
@ -984,10 +1008,10 @@ void EmitX64<JST>::EmitNot(EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitSignExtendWordToLong(EmitContext& ctx, IR::Inst* inst) {
|
||||
void EmitX64<JST>::EmitSignExtendByteToWord(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
||||
code->movsxd(result.cvt64(), result.cvt32());
|
||||
code->movsx(result.cvt32(), result.cvt8());
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
|
@ -1000,18 +1024,34 @@ void EmitX64<JST>::EmitSignExtendHalfToWord(EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitSignExtendByteToWord(EmitContext& ctx, IR::Inst* inst) {
|
||||
void EmitX64<JST>::EmitSignExtendByteToLong(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
||||
code->movsx(result.cvt32(), result.cvt8());
|
||||
code->movsx(result.cvt64(), result.cvt8());
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitZeroExtendWordToLong(EmitContext& ctx, IR::Inst* inst) {
|
||||
void EmitX64<JST>::EmitSignExtendHalfToLong(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
||||
code->mov(result.cvt32(), result.cvt32()); // x64 zeros upper 32 bits on a 32-bit move
|
||||
code->movsx(result.cvt64(), result.cvt16());
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitSignExtendWordToLong(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
||||
code->movsxd(result.cvt64(), result.cvt32());
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitZeroExtendByteToWord(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
||||
code->movzx(result.cvt32(), result.cvt8());
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
|
@ -1024,10 +1064,26 @@ void EmitX64<JST>::EmitZeroExtendHalfToWord(EmitContext& ctx, IR::Inst* inst) {
|
|||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitZeroExtendByteToWord(EmitContext& ctx, IR::Inst* inst) {
|
||||
void EmitX64<JST>::EmitZeroExtendByteToLong(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
||||
code->movzx(result.cvt32(), result.cvt8());
|
||||
code->movzx(result.cvt32(), result.cvt8()); // x64 zeros upper 32 bits on a 32-bit move
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitZeroExtendHalfToLong(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
||||
code->movzx(result.cvt32(), result.cvt16()); // x64 zeros upper 32 bits on a 32-bit move
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
template <typename JST>
|
||||
void EmitX64<JST>::EmitZeroExtendWordToLong(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
||||
code->mov(result.cvt32(), result.cvt32()); // x64 zeros upper 32 bits on a 32-bit move
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
|
|
|
@ -34,10 +34,10 @@ std::vector<Matcher<V>> GetDecodeTable() {
|
|||
//INST(&V::ADRP, "ADRP", "1ii10000iiiiiiiiiiiiiiiiiiiddddd"),
|
||||
|
||||
// Data processing - Immediate - Add/Sub
|
||||
//INST(&V::ADD_imm, "ADD (immediate)", "z0010001ssiiiiiiiiiiiinnnnnddddd"),
|
||||
//INST(&V::ADDS_imm, "ADDS (immediate)", "z0110001ssiiiiiiiiiiiinnnnnddddd"),
|
||||
//INST(&V::SUB_imm, "SUB (immediate)", "z1010001ssiiiiiiiiiiiinnnnnddddd"),
|
||||
//INST(&V::SUBS_imm, "SUBS (immediate)", "z1110001ssiiiiiiiiiiiinnnnnddddd"),
|
||||
INST(&V::ADD_imm, "ADD (immediate)", "z0010001ssiiiiiiiiiiiinnnnnddddd"),
|
||||
INST(&V::ADDS_imm, "ADDS (immediate)", "z0110001ssiiiiiiiiiiiinnnnnddddd"),
|
||||
INST(&V::SUB_imm, "SUB (immediate)", "z1010001ssiiiiiiiiiiiinnnnnddddd"),
|
||||
INST(&V::SUBS_imm, "SUBS (immediate)", "z1110001ssiiiiiiiiiiiinnnnnddddd"),
|
||||
|
||||
// Data processing - Immediate - Logical
|
||||
//INST(&V::AND_imm, "AND (immediate)", "z00100100Nrrrrrrssssssnnnnnddddd"),
|
||||
|
@ -374,21 +374,21 @@ std::vector<Matcher<V>> GetDecodeTable() {
|
|||
|
||||
// Data Processing - Register - Add/Sub (shifted register)
|
||||
INST(&V::ADD_shift, "ADD (shifted register)", "z0001011ss0mmmmmiiiiiinnnnnddddd"),
|
||||
//INST(&V::ADDS_shift, "ADDS (shifted register)", "z0101011ss0mmmmmiiiiiinnnnnddddd"),
|
||||
//INST(&V::SUB_shift, "SUB (shifted register)", "z1001011ss0mmmmmiiiiiinnnnnddddd"),
|
||||
//INST(&V::SUBS_shift, "SUBS (shifted register)", "z1101011ss0mmmmmiiiiiinnnnnddddd"),
|
||||
INST(&V::ADDS_shift, "ADDS (shifted register)", "z0101011ss0mmmmmiiiiiinnnnnddddd"),
|
||||
INST(&V::SUB_shift, "SUB (shifted register)", "z1001011ss0mmmmmiiiiiinnnnnddddd"),
|
||||
INST(&V::SUBS_shift, "SUBS (shifted register)", "z1101011ss0mmmmmiiiiiinnnnnddddd"),
|
||||
|
||||
// Data Processing - Register - Add/Sub (shifted register)
|
||||
//INST(&V::ADD_ext, "ADD (extended register)", "z0001011001mmmmmxxxiiinnnnnddddd"),
|
||||
//INST(&V::ADDS_ext, "ADDS (extended register)", "z0101011001mmmmmxxxiiinnnnnddddd"),
|
||||
//INST(&V::SUB_ext, "SUB (extended register)", "z1001011001mmmmmxxxiiinnnnnddddd"),
|
||||
//INST(&V::SUBS_ext, "SUBS (extended register)", "z1101011001mmmmmxxxiiinnnnnddddd"),
|
||||
INST(&V::ADD_ext, "ADD (extended register)", "z0001011001mmmmmxxxiiinnnnnddddd"),
|
||||
INST(&V::ADDS_ext, "ADDS (extended register)", "z0101011001mmmmmxxxiiinnnnnddddd"),
|
||||
INST(&V::SUB_ext, "SUB (extended register)", "z1001011001mmmmmxxxiiinnnnnddddd"),
|
||||
INST(&V::SUBS_ext, "SUBS (extended register)", "z1101011001mmmmmxxxiiinnnnnddddd"),
|
||||
|
||||
// Data Processing - Register - Add/Sub (with carry)
|
||||
//INST(&V::ADC, "ADC", "z0011010000mmmmm000000nnnnnddddd"),
|
||||
//INST(&V::ADCS, "ADCS", "z0111010000mmmmm000000nnnnnddddd"),
|
||||
//INST(&V::SBC, "SBC", "z1011010000mmmmm000000nnnnnddddd"),
|
||||
//INST(&V::SBCS, "SBCS", "z1111010000mmmmm000000nnnnnddddd"),
|
||||
INST(&V::ADC, "ADC", "z0011010000mmmmm000000nnnnnddddd"),
|
||||
INST(&V::ADCS, "ADCS", "z0111010000mmmmm000000nnnnnddddd"),
|
||||
INST(&V::SBC, "SBC", "z1011010000mmmmm000000nnnnnddddd"),
|
||||
INST(&V::SBCS, "SBCS", "z1111010000mmmmm000000nnnnnddddd"),
|
||||
|
||||
// Data Processing - Register - Conditional compare
|
||||
//INST(&V::CCMN_reg, "CCMN (register)", "z0111010010mmmmmcccc00nnnnn0ffff"),
|
||||
|
|
|
@ -22,21 +22,45 @@ u64 IREmitter::AlignPC(size_t alignment) {
|
|||
return static_cast<u64>(pc - pc % alignment);
|
||||
}
|
||||
|
||||
IR::U1 IREmitter::GetCFlag() {
|
||||
return Inst<IR::U1>(Opcode::A64GetCFlag);
|
||||
}
|
||||
|
||||
void IREmitter::SetNZCV(const IR::NZCV& nzcv) {
|
||||
Inst(Opcode::A64SetNZCV, nzcv);
|
||||
}
|
||||
|
||||
IR::U32 IREmitter::GetW(Reg reg) {
|
||||
if (reg == Reg::ZR)
|
||||
return Imm32(0);
|
||||
return Inst<IR::U32>(Opcode::A64GetW, IR::Value(reg));
|
||||
}
|
||||
|
||||
IR::U64 IREmitter::GetX(Reg reg) {
|
||||
if (reg == Reg::ZR)
|
||||
return Imm64(0);
|
||||
return Inst<IR::U64>(Opcode::A64GetX, IR::Value(reg));
|
||||
}
|
||||
|
||||
IR::U64 IREmitter::GetSP() {
|
||||
return Inst<IR::U64>(Opcode::A64GetSP);
|
||||
}
|
||||
|
||||
void IREmitter::SetW(const Reg reg, const IR::U32& value) {
|
||||
if (reg == Reg::ZR)
|
||||
return;
|
||||
Inst(Opcode::A64SetW, IR::Value(reg), value);
|
||||
}
|
||||
|
||||
void IREmitter::SetX(const Reg reg, const IR::U64& value) {
|
||||
if (reg == Reg::ZR)
|
||||
return;
|
||||
Inst(Opcode::A64SetX, IR::Value(reg), value);
|
||||
}
|
||||
|
||||
void IREmitter::SetSP(const IR::U64& value) {
|
||||
Inst(Opcode::A64SetSP, value);
|
||||
}
|
||||
|
||||
} // namespace IR
|
||||
} // namespace Dynarmic
|
||||
|
|
|
@ -31,10 +31,15 @@ public:
|
|||
u64 PC();
|
||||
u64 AlignPC(size_t alignment);
|
||||
|
||||
IR::U1 GetCFlag();
|
||||
void SetNZCV(const IR::NZCV& nzcv);
|
||||
|
||||
IR::U32 GetW(Reg source_reg);
|
||||
IR::U64 GetX(Reg source_reg);
|
||||
IR::U64 GetSP();
|
||||
void SetW(Reg dest_reg, const IR::U32& value);
|
||||
void SetX(Reg dest_reg, const IR::U64& value);
|
||||
void SetSP(const IR::U64& value);
|
||||
};
|
||||
|
||||
} // namespace IR
|
||||
|
|
|
@ -9,6 +9,114 @@
|
|||
namespace Dynarmic {
|
||||
namespace A64 {
|
||||
|
||||
bool TranslatorVisitor::ADD_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
u64 imm;
|
||||
switch (shift.ZeroExtend()) {
|
||||
case 0b00:
|
||||
imm = imm12.ZeroExtend<u64>();
|
||||
break;
|
||||
case 0b01:
|
||||
imm = imm12.ZeroExtend<u64>() << 12;
|
||||
break;
|
||||
default:
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn);
|
||||
|
||||
auto result = ir.Add(operand1, I(datasize, imm));
|
||||
|
||||
if (Rd == Reg::SP) {
|
||||
SP(datasize, result);
|
||||
} else {
|
||||
X(datasize, Rd, result);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::ADDS_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
u64 imm;
|
||||
switch (shift.ZeroExtend()) {
|
||||
case 0b00:
|
||||
imm = imm12.ZeroExtend<u64>();
|
||||
break;
|
||||
case 0b01:
|
||||
imm = imm12.ZeroExtend<u64>() << 12;
|
||||
break;
|
||||
default:
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn);
|
||||
|
||||
auto result = ir.Add(operand1, I(datasize, imm));
|
||||
|
||||
ir.SetNZCV(ir.NZCVFrom(result));
|
||||
|
||||
X(datasize, Rd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::SUB_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
u64 imm;
|
||||
switch (shift.ZeroExtend()) {
|
||||
case 0b00:
|
||||
imm = imm12.ZeroExtend<u64>();
|
||||
break;
|
||||
case 0b01:
|
||||
imm = imm12.ZeroExtend<u64>() << 12;
|
||||
break;
|
||||
default:
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn);
|
||||
|
||||
auto result = ir.Sub(operand1, I(datasize, imm));
|
||||
|
||||
if (Rd == Reg::SP) {
|
||||
SP(datasize, result);
|
||||
} else {
|
||||
X(datasize, Rd, result);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::SUBS_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
u64 imm;
|
||||
switch (shift.ZeroExtend()) {
|
||||
case 0b00:
|
||||
imm = imm12.ZeroExtend<u64>();
|
||||
break;
|
||||
case 0b01:
|
||||
imm = imm12.ZeroExtend<u64>() << 12;
|
||||
break;
|
||||
default:
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn);
|
||||
|
||||
auto result = ir.Sub(operand1, I(datasize, imm));
|
||||
|
||||
ir.SetNZCV(ir.NZCVFrom(result));
|
||||
|
||||
X(datasize, Rd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::ADD_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
|
@ -27,5 +135,199 @@ bool TranslatorVisitor::ADD_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Re
|
|||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::ADDS_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
if (shift == 0b11) return ReservedValue();
|
||||
if (!sf && imm6.Bit<5>()) return ReservedValue();
|
||||
|
||||
u8 shift_amount = imm6.ZeroExtend<u8>();
|
||||
|
||||
auto operand1 = X(datasize, Rn);
|
||||
auto operand2 = ShiftReg(datasize, Rm, shift, ir.Imm8(shift_amount));
|
||||
|
||||
auto result = ir.Add(operand1, operand2);
|
||||
|
||||
ir.SetNZCV(ir.NZCVFrom(result));
|
||||
|
||||
X(datasize, Rd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::SUB_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
if (shift == 0b11) return ReservedValue();
|
||||
if (!sf && imm6.Bit<5>()) return ReservedValue();
|
||||
|
||||
u8 shift_amount = imm6.ZeroExtend<u8>();
|
||||
|
||||
auto operand1 = X(datasize, Rn);
|
||||
auto operand2 = ShiftReg(datasize, Rm, shift, ir.Imm8(shift_amount));
|
||||
|
||||
auto result = ir.Sub(operand1, operand2);
|
||||
|
||||
X(datasize, Rd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::SUBS_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
if (shift == 0b11) return ReservedValue();
|
||||
if (!sf && imm6.Bit<5>()) return ReservedValue();
|
||||
|
||||
u8 shift_amount = imm6.ZeroExtend<u8>();
|
||||
|
||||
auto operand1 = X(datasize, Rn);
|
||||
auto operand2 = ShiftReg(datasize, Rm, shift, ir.Imm8(shift_amount));
|
||||
|
||||
auto result = ir.Sub(operand1, operand2);
|
||||
|
||||
ir.SetNZCV(ir.NZCVFrom(result));
|
||||
|
||||
X(datasize, Rd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::ADD_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
u8 shift = imm3.ZeroExtend<u8>();
|
||||
if (shift > 4) return ReservedValue();
|
||||
|
||||
auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn);
|
||||
auto operand2 = ExtendReg(datasize, Rm, option, shift);
|
||||
|
||||
auto result = ir.Add(operand1, operand2);
|
||||
|
||||
if (Rd == Reg::SP) {
|
||||
SP(datasize, result);
|
||||
} else {
|
||||
X(datasize, Rd, result);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::ADDS_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
u8 shift = imm3.ZeroExtend<u8>();
|
||||
if (shift > 4) return ReservedValue();
|
||||
|
||||
auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn);
|
||||
auto operand2 = ExtendReg(datasize, Rm, option, shift);
|
||||
|
||||
auto result = ir.Add(operand1, operand2);
|
||||
|
||||
ir.SetNZCV(ir.NZCVFrom(result));
|
||||
|
||||
if (Rd == Reg::SP) {
|
||||
SP(datasize, result);
|
||||
} else {
|
||||
X(datasize, Rd, result);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::SUB_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
u8 shift = imm3.ZeroExtend<u8>();
|
||||
if (shift > 4) return ReservedValue();
|
||||
|
||||
auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn);
|
||||
auto operand2 = ExtendReg(datasize, Rm, option, shift);
|
||||
|
||||
auto result = ir.Sub(operand1, operand2);
|
||||
|
||||
if (Rd == Reg::SP) {
|
||||
SP(datasize, result);
|
||||
} else {
|
||||
X(datasize, Rd, result);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::SUBS_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
u8 shift = imm3.ZeroExtend<u8>();
|
||||
if (shift > 4) return ReservedValue();
|
||||
|
||||
auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn);
|
||||
auto operand2 = ExtendReg(datasize, Rm, option, shift);
|
||||
|
||||
auto result = ir.Sub(operand1, operand2);
|
||||
|
||||
ir.SetNZCV(ir.NZCVFrom(result));
|
||||
|
||||
if (Rd == Reg::SP) {
|
||||
SP(datasize, result);
|
||||
} else {
|
||||
X(datasize, Rd, result);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::ADC(bool sf, Reg Rm, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
auto operand1 = X(datasize, Rn);
|
||||
auto operand2 = X(datasize, Rm);
|
||||
|
||||
auto result = ir.AddWithCarry(operand1, operand2, ir.GetCFlag());
|
||||
|
||||
X(datasize, Rd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::ADCS(bool sf, Reg Rm, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
auto operand1 = X(datasize, Rn);
|
||||
auto operand2 = X(datasize, Rm);
|
||||
|
||||
auto result = ir.AddWithCarry(operand1, operand2, ir.GetCFlag());
|
||||
|
||||
ir.SetNZCV(ir.NZCVFrom(result));
|
||||
|
||||
X(datasize, Rd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::SBC(bool sf, Reg Rm, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
auto operand1 = X(datasize, Rn);
|
||||
auto operand2 = X(datasize, Rm);
|
||||
|
||||
auto result = ir.SubWithCarry(operand1, operand2, ir.GetCFlag());
|
||||
|
||||
X(datasize, Rd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::SBCS(bool sf, Reg Rm, Reg Rn, Reg Rd) {
|
||||
size_t datasize = sf ? 64 : 32;
|
||||
|
||||
auto operand1 = X(datasize, Rn);
|
||||
auto operand2 = X(datasize, Rm);
|
||||
|
||||
auto result = ir.SubWithCarry(operand1, operand2, ir.GetCFlag());
|
||||
|
||||
ir.SetNZCV(ir.NZCVFrom(result));
|
||||
|
||||
X(datasize, Rd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace A64
|
||||
} // namespace Dynarmic
|
||||
|
|
|
@ -25,6 +25,18 @@ bool TranslatorVisitor::ReservedValue() {
|
|||
return false;
|
||||
}
|
||||
|
||||
IR::U32U64 TranslatorVisitor::I(size_t bitsize, u64 value) {
|
||||
switch (bitsize) {
|
||||
case 32:
|
||||
return ir.Imm32(static_cast<u32>(value));
|
||||
case 64:
|
||||
return ir.Imm64(value);
|
||||
default:
|
||||
ASSERT_MSG(false, "Imm - get: Invalid bitsize");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
IR::U32U64 TranslatorVisitor::X(size_t bitsize, Reg reg) {
|
||||
switch (bitsize) {
|
||||
case 32:
|
||||
|
@ -50,6 +62,31 @@ void TranslatorVisitor::X(size_t bitsize, Reg reg, IR::U32U64 value) {
|
|||
}
|
||||
}
|
||||
|
||||
IR::U32U64 TranslatorVisitor::SP(size_t bitsize) {
|
||||
switch (bitsize) {
|
||||
case 32:
|
||||
return ir.LeastSignificantWord(ir.GetSP());
|
||||
case 64:
|
||||
return ir.GetSP();
|
||||
default:
|
||||
ASSERT_MSG(false, "SP - get : Invalid bitsize");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
void TranslatorVisitor::SP(size_t bitsize, IR::U32U64 value) {
|
||||
switch (bitsize) {
|
||||
case 32:
|
||||
ir.SetSP(ir.ZeroExtendWordToLong(value));
|
||||
break;
|
||||
case 64:
|
||||
ir.SetSP(value);
|
||||
break;
|
||||
default:
|
||||
ASSERT_MSG(false, "SP - : Invalid bitsize");
|
||||
}
|
||||
}
|
||||
|
||||
IR::U32U64 TranslatorVisitor::ShiftReg(size_t bitsize, Reg reg, Imm<2> shift, IR::U8 amount) {
|
||||
auto result = X(bitsize, reg);
|
||||
switch (shift.ZeroExtend()) {
|
||||
|
@ -66,5 +103,81 @@ IR::U32U64 TranslatorVisitor::ShiftReg(size_t bitsize, Reg reg, Imm<2> shift, IR
|
|||
return {};
|
||||
}
|
||||
|
||||
IR::U32U64 TranslatorVisitor::ExtendReg(size_t bitsize, Reg reg, Imm<3> option, u8 shift) {
|
||||
ASSERT(shift <= 4);
|
||||
ASSERT(bitsize == 32 || bitsize == 64);
|
||||
IR::UAny val = X(bitsize, reg);
|
||||
size_t len;
|
||||
IR::U32U64 extended;
|
||||
bool signed_extend;
|
||||
|
||||
switch (option.ZeroExtend()) {
|
||||
case 0b000: { // UXTB
|
||||
val = ir.LeastSignificantByte(val);
|
||||
len = 8;
|
||||
signed_extend = false;
|
||||
break;
|
||||
}
|
||||
case 0b001: { // UXTH
|
||||
val = ir.LeastSignificantHalf(val);
|
||||
len = 16;
|
||||
signed_extend = false;
|
||||
break;
|
||||
}
|
||||
case 0b010: { // UXTW
|
||||
if (bitsize != 32) {
|
||||
val = ir.LeastSignificantWord(val);
|
||||
}
|
||||
len = 32;
|
||||
signed_extend = false;
|
||||
break;
|
||||
}
|
||||
case 0b011: { // UXTX
|
||||
len = 64;
|
||||
signed_extend = false;
|
||||
break;
|
||||
}
|
||||
case 0b100: { // SXTB
|
||||
val = ir.LeastSignificantByte(val);
|
||||
len = 8;
|
||||
signed_extend = true;
|
||||
break;
|
||||
}
|
||||
case 0b101: { // SXTH
|
||||
val = ir.LeastSignificantHalf(val);
|
||||
len = 16;
|
||||
signed_extend = true;
|
||||
break;
|
||||
}
|
||||
case 0b110: { // SXTW
|
||||
if (bitsize != 32) {
|
||||
val = ir.LeastSignificantWord(val);
|
||||
}
|
||||
len = 32;
|
||||
signed_extend = true;
|
||||
break;
|
||||
}
|
||||
case 0b111: { // SXTX
|
||||
len = 64;
|
||||
signed_extend = true;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ASSERT_MSG(false, "Unreachable");
|
||||
}
|
||||
|
||||
if (len < bitsize) {
|
||||
if (bitsize == 32) {
|
||||
extended = signed_extend ? ir.SignExtendToWord(val) : ir.ZeroExtendToWord(val);
|
||||
} else {
|
||||
extended = signed_extend ? ir.SignExtendToLong(val) : ir.ZeroExtendToLong(val);
|
||||
}
|
||||
} else {
|
||||
extended = val;
|
||||
}
|
||||
|
||||
return ir.LogicalShiftLeft(extended, ir.Imm8(shift));
|
||||
}
|
||||
|
||||
} // namespace A64
|
||||
} // namespace Dynarmic
|
||||
|
|
|
@ -25,10 +25,14 @@ struct TranslatorVisitor final {
|
|||
bool UnpredictableInstruction();
|
||||
bool ReservedValue();
|
||||
|
||||
IR::U32U64 I(size_t bitsize, u64 value);
|
||||
IR::U32U64 X(size_t bitsize, Reg reg);
|
||||
void X(size_t bitsize, Reg reg, IR::U32U64 value);
|
||||
IR::U32U64 SP(size_t bitsize);
|
||||
void SP(size_t bitsize, IR::U32U64 value);
|
||||
|
||||
IR::U32U64 ShiftReg(size_t bitsize, Reg reg, Imm<2> shift, IR::U8 amount);
|
||||
IR::U32U64 ExtendReg(size_t bitsize, Reg reg, Imm<3> option, u8 shift);
|
||||
|
||||
// Data processing - Immediate - PC relative addressing
|
||||
bool ADR(Imm<2> immlo, Imm<19> immhi, Reg Rd);
|
||||
|
|
|
@ -49,11 +49,17 @@ ResultAndCarry<U32> IREmitter::MostSignificantWord(const U64& value) {
|
|||
return {result, carry_out};
|
||||
}
|
||||
|
||||
U16 IREmitter::LeastSignificantHalf(const U32& value) {
|
||||
U16 IREmitter::LeastSignificantHalf(U32U64 value) {
|
||||
if (value.GetType() == Type::U64) {
|
||||
value = LeastSignificantWord(value);
|
||||
}
|
||||
return Inst<U16>(Opcode::LeastSignificantHalf, value);
|
||||
}
|
||||
|
||||
U8 IREmitter::LeastSignificantByte(const U32& value) {
|
||||
U8 IREmitter::LeastSignificantByte(U32U64 value) {
|
||||
if (value.GetType() == Type::U64) {
|
||||
value = LeastSignificantWord(value);
|
||||
}
|
||||
return Inst<U8>(Opcode::LeastSignificantByte, value);
|
||||
}
|
||||
|
||||
|
@ -69,6 +75,10 @@ U1 IREmitter::IsZero64(const U64& value) {
|
|||
return Inst<U1>(Opcode::IsZero64, value);
|
||||
}
|
||||
|
||||
NZCV IREmitter::NZCVFrom(const Value& value) {
|
||||
return Inst<NZCV>(Opcode::GetNZCVFromOp, value);
|
||||
}
|
||||
|
||||
ResultAndCarry<U32> IREmitter::LogicalShiftLeft(const U32& value_in, const U8& shift_amount, const U1& carry_in) {
|
||||
auto result = Inst<U32>(Opcode::LogicalShiftLeft32, value_in, shift_amount, carry_in);
|
||||
auto carry_out = Inst<U1>(Opcode::GetCarryFromOp, result);
|
||||
|
@ -135,48 +145,75 @@ U32U64 IREmitter::RotateRight(const U32U64& value_in, const U8& shift_amount) {
|
|||
}
|
||||
}
|
||||
|
||||
ResultAndCarryAndOverflow<U32> IREmitter::AddWithCarry(const Value& a, const Value& b, const U1& carry_in) {
|
||||
auto result = Inst<U32>(Opcode::AddWithCarry, a, b, carry_in);
|
||||
ResultAndCarryAndOverflow<U32> IREmitter::AddWithCarry(const U32& a, const U32& b, const U1& carry_in) {
|
||||
auto result = Inst<U32>(Opcode::Add32, a, b, carry_in);
|
||||
auto carry_out = Inst<U1>(Opcode::GetCarryFromOp, result);
|
||||
auto overflow = Inst<U1>(Opcode::GetOverflowFromOp, result);
|
||||
return {result, carry_out, overflow};
|
||||
}
|
||||
|
||||
U32U64 IREmitter::AddWithCarry(const U32U64& a, const U32U64& b, const U1& carry_in) {
|
||||
ASSERT(a.GetType() == b.GetType());
|
||||
if (a.GetType() == Type::U32) {
|
||||
return Inst<U32>(Opcode::Add32, a, b, carry_in);
|
||||
} else {
|
||||
return Inst<U64>(Opcode::Add64, a, b, carry_in);
|
||||
}
|
||||
}
|
||||
|
||||
U32 IREmitter::Add(const U32& a, const U32& b) {
|
||||
return Inst<U32>(Opcode::AddWithCarry, a, b, Imm1(0));
|
||||
return Inst<U32>(Opcode::Add32, a, b, Imm1(0));
|
||||
}
|
||||
|
||||
U64 IREmitter::Add(const U64& a, const U64& b) {
|
||||
return Inst<U64>(Opcode::Add64, a, b);
|
||||
return Inst<U64>(Opcode::Add64, a, b, Imm1(0));
|
||||
}
|
||||
|
||||
U32U64 IREmitter::Add(const U32U64& a, const U32U64& b) {
|
||||
ASSERT(a.GetType() == b.GetType());
|
||||
if (a.GetType() == Type::U32) {
|
||||
return Inst<U32>(Opcode::AddWithCarry, a, b, Imm1(0));
|
||||
return Inst<U32>(Opcode::Add32, a, b, Imm1(0));
|
||||
} else {
|
||||
return Inst<U64>(Opcode::Add64, a, b);
|
||||
return Inst<U64>(Opcode::Add64, a, b, Imm1(0));
|
||||
}
|
||||
}
|
||||
|
||||
ResultAndCarryAndOverflow<U32> IREmitter::SubWithCarry(const U32& a, const U32& b, const U1& carry_in) {
|
||||
// This is equivalent to AddWithCarry(a, Not(b), carry_in).
|
||||
auto result = Inst<U32>(Opcode::SubWithCarry, a, b, carry_in);
|
||||
auto result = Inst<U32>(Opcode::Sub32, a, b, carry_in);
|
||||
auto carry_out = Inst<U1>(Opcode::GetCarryFromOp, result);
|
||||
auto overflow = Inst<U1>(Opcode::GetOverflowFromOp, result);
|
||||
return {result, carry_out, overflow};
|
||||
}
|
||||
|
||||
U32U64 IREmitter::SubWithCarry(const U32U64& a, const U32U64& b, const U1& carry_in) {
|
||||
ASSERT(a.GetType() == b.GetType());
|
||||
if (a.GetType() == Type::U32) {
|
||||
return Inst<U32>(Opcode::Sub32, a, b, carry_in);
|
||||
} else {
|
||||
return Inst<U64>(Opcode::Sub64, a, b, carry_in);
|
||||
}
|
||||
}
|
||||
|
||||
U32 IREmitter::Sub(const U32& a, const U32& b) {
|
||||
return Inst<U32>(Opcode::SubWithCarry, a, b, Imm1(1));
|
||||
return Inst<U32>(Opcode::Sub32, a, b, Imm1(1));
|
||||
}
|
||||
|
||||
U64 IREmitter::Sub(const U64& a, const U64& b) {
|
||||
return Inst<U64>(Opcode::Sub64, a, b);
|
||||
return Inst<U64>(Opcode::Sub64, a, b, Imm1(1));
|
||||
}
|
||||
|
||||
U32U64 IREmitter::Sub(const U32U64& a, const U32U64& b) {
|
||||
ASSERT(a.GetType() == b.GetType());
|
||||
if (a.GetType() == Type::U32) {
|
||||
return Inst<U32>(Opcode::Sub32, a, b, Imm1(1));
|
||||
} else {
|
||||
return Inst<U64>(Opcode::Sub64, a, b, Imm1(1));
|
||||
}
|
||||
}
|
||||
|
||||
U32 IREmitter::Mul(const U32& a, const U32& b) {
|
||||
return Inst<U32>(Opcode::Mul, a, b);
|
||||
return Inst<U32>(Opcode::Mul32, a, b);
|
||||
}
|
||||
|
||||
U64 IREmitter::Mul(const U64& a, const U64& b) {
|
||||
|
@ -199,6 +236,38 @@ U32 IREmitter::Not(const U32& a) {
|
|||
return Inst<U32>(Opcode::Not, a);
|
||||
}
|
||||
|
||||
U64 IREmitter::SignExtendToLong(const UAny& a) {
|
||||
switch (a.GetType()) {
|
||||
case Type::U8:
|
||||
return Inst<U64>(Opcode::SignExtendByteToLong, a);
|
||||
case Type::U16:
|
||||
return Inst<U64>(Opcode::SignExtendHalfToLong, a);
|
||||
case Type::U32:
|
||||
return Inst<U64>(Opcode::SignExtendWordToLong, a);
|
||||
case Type::U64:
|
||||
return U64(a);
|
||||
default:
|
||||
ASSERT_MSG(false, "Unreachable");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
U32 IREmitter::SignExtendToWord(const UAny& a) {
|
||||
switch (a.GetType()) {
|
||||
case Type::U8:
|
||||
return Inst<U32>(Opcode::SignExtendByteToWord, a);
|
||||
case Type::U16:
|
||||
return Inst<U32>(Opcode::SignExtendHalfToWord, a);
|
||||
case Type::U32:
|
||||
return U32(a);
|
||||
case Type::U64:
|
||||
return Inst<U32>(Opcode::LeastSignificantWord, a);
|
||||
default:
|
||||
ASSERT_MSG(false, "Unreachable");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
U64 IREmitter::SignExtendWordToLong(const U32& a) {
|
||||
return Inst<U64>(Opcode::SignExtendWordToLong, a);
|
||||
}
|
||||
|
@ -211,6 +280,38 @@ U32 IREmitter::SignExtendByteToWord(const U8& a) {
|
|||
return Inst<U32>(Opcode::SignExtendByteToWord, a);
|
||||
}
|
||||
|
||||
U64 IREmitter::ZeroExtendToLong(const UAny& a) {
|
||||
switch (a.GetType()) {
|
||||
case Type::U8:
|
||||
return Inst<U64>(Opcode::ZeroExtendByteToLong, a);
|
||||
case Type::U16:
|
||||
return Inst<U64>(Opcode::ZeroExtendHalfToLong, a);
|
||||
case Type::U32:
|
||||
return Inst<U64>(Opcode::ZeroExtendWordToLong, a);
|
||||
case Type::U64:
|
||||
return U64(a);
|
||||
default:
|
||||
ASSERT_MSG(false, "Unreachable");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
U32 IREmitter::ZeroExtendToWord(const UAny& a) {
|
||||
switch (a.GetType()) {
|
||||
case Type::U8:
|
||||
return Inst<U32>(Opcode::ZeroExtendByteToWord, a);
|
||||
case Type::U16:
|
||||
return Inst<U32>(Opcode::ZeroExtendHalfToWord, a);
|
||||
case Type::U32:
|
||||
return U32(a);
|
||||
case Type::U64:
|
||||
return Inst<U32>(Opcode::LeastSignificantWord, a);
|
||||
default:
|
||||
ASSERT_MSG(false, "Unreachable");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
U64 IREmitter::ZeroExtendWordToLong(const U32& a) {
|
||||
return Inst<U64>(Opcode::ZeroExtendWordToLong, a);
|
||||
}
|
||||
|
|
|
@ -76,12 +76,15 @@ public:
|
|||
U64 Pack2x32To1x64(const U32& lo, const U32& hi);
|
||||
U32 LeastSignificantWord(const U64& value);
|
||||
ResultAndCarry<U32> MostSignificantWord(const U64& value);
|
||||
U16 LeastSignificantHalf(const U32& value);
|
||||
U8 LeastSignificantByte(const U32& value);
|
||||
U16 LeastSignificantHalf(U32U64 value);
|
||||
U8 LeastSignificantByte(U32U64 value);
|
||||
U1 MostSignificantBit(const U32& value);
|
||||
U1 IsZero(const U32& value);
|
||||
U1 IsZero64(const U64& value);
|
||||
|
||||
// This pseudo-instruction may only be added to instructions that support it.
|
||||
NZCV NZCVFrom(const Value& value);
|
||||
|
||||
ResultAndCarry<U32> LogicalShiftLeft(const U32& value_in, const U8& shift_amount, const U1& carry_in);
|
||||
ResultAndCarry<U32> LogicalShiftRight(const U32& value_in, const U8& shift_amount, const U1& carry_in);
|
||||
ResultAndCarry<U32> ArithmeticShiftRight(const U32& value_in, const U8& shift_amount, const U1& carry_in);
|
||||
|
@ -92,25 +95,32 @@ public:
|
|||
U32U64 ArithmeticShiftRight(const U32U64& value_in, const U8& shift_amount);
|
||||
U32U64 RotateRight(const U32U64& value_in, const U8& shift_amount);
|
||||
ResultAndCarry<U32> RotateRightExtended(const U32& value_in, const U1& carry_in);
|
||||
ResultAndCarryAndOverflow<U32> AddWithCarry(const Value& a, const Value& b, const U1& carry_in);
|
||||
ResultAndCarryAndOverflow<U32> AddWithCarry(const U32& a, const U32& b, const U1& carry_in);
|
||||
ResultAndCarryAndOverflow<U32> SubWithCarry(const U32& a, const U32& b, const U1& carry_in);
|
||||
U32U64 AddWithCarry(const U32U64& a, const U32U64& b, const U1& carry_in);
|
||||
U32U64 SubWithCarry(const U32U64& a, const U32U64& b, const U1& carry_in);
|
||||
U32 Add(const U32& a, const U32& b);
|
||||
U64 Add(const U64& a, const U64& b);
|
||||
U32U64 Add(const U32U64& a, const U32U64& b);
|
||||
ResultAndCarryAndOverflow<U32> SubWithCarry(const U32& a, const U32& b, const U1& carry_in);
|
||||
U32 Sub(const U32& a, const U32& b);
|
||||
U64 Sub(const U64& a, const U64& b);
|
||||
U32U64 Sub(const U32U64& a, const U32U64& b);
|
||||
U32 Mul(const U32& a, const U32& b);
|
||||
U64 Mul(const U64& a, const U64& b);
|
||||
U32 And(const U32& a, const U32& b);
|
||||
U32 Eor(const U32& a, const U32& b);
|
||||
U32 Or(const U32& a, const U32& b);
|
||||
U32 Not(const U32& a);
|
||||
U64 SignExtendWordToLong(const U32& a);
|
||||
U32 SignExtendHalfToWord(const U16& a);
|
||||
U32 SignExtendToWord(const UAny& a);
|
||||
U64 SignExtendToLong(const UAny& a);
|
||||
U32 SignExtendByteToWord(const U8& a);
|
||||
U64 ZeroExtendWordToLong(const U32& a);
|
||||
U32 ZeroExtendHalfToWord(const U16& a);
|
||||
U32 SignExtendHalfToWord(const U16& a);
|
||||
U64 SignExtendWordToLong(const U32& a);
|
||||
U32 ZeroExtendToWord(const UAny& a);
|
||||
U64 ZeroExtendToLong(const UAny& a);
|
||||
U32 ZeroExtendByteToWord(const U8& a);
|
||||
U32 ZeroExtendHalfToWord(const U16& a);
|
||||
U64 ZeroExtendWordToLong(const U32& a);
|
||||
U32 ByteReverseWord(const U32& a);
|
||||
U16 ByteReverseHalf(const U16& a);
|
||||
U64 ByteReverseDual(const U64& a);
|
||||
|
|
|
@ -138,6 +138,7 @@ bool Inst::ReadsFromCoreRegister() const {
|
|||
case Opcode::A32GetExtendedRegister64:
|
||||
case Opcode::A64GetW:
|
||||
case Opcode::A64GetX:
|
||||
case Opcode::A64GetSP:
|
||||
return true;
|
||||
|
||||
default:
|
||||
|
@ -153,6 +154,7 @@ bool Inst::WritesToCoreRegister() const {
|
|||
case Opcode::A32BXWritePC:
|
||||
case Opcode::A64SetW:
|
||||
case Opcode::A64SetX:
|
||||
case Opcode::A64SetSP:
|
||||
return true;
|
||||
|
||||
default:
|
||||
|
@ -252,26 +254,55 @@ bool Inst::MayHaveSideEffects() const {
|
|||
IsCoprocessorInstruction();
|
||||
}
|
||||
|
||||
bool Inst::IsAPseudoOperation() const {
|
||||
switch (op) {
|
||||
case Opcode::GetCarryFromOp:
|
||||
case Opcode::GetOverflowFromOp:
|
||||
case Opcode::GetGEFromOp:
|
||||
case Opcode::GetNZCVFromOp:
|
||||
return true;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool Inst::MayGetNZCVFromOp() const {
|
||||
switch (op) {
|
||||
case Opcode::Add32:
|
||||
case Opcode::Add64:
|
||||
case Opcode::Sub32:
|
||||
case Opcode::Sub64:
|
||||
return true;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool Inst::AreAllArgsImmediates() const {
|
||||
return std::all_of(args.begin(), args.begin() + NumArgs(), [](const auto& value){ return value.IsImmediate(); });
|
||||
}
|
||||
|
||||
bool Inst::HasAssociatedPseudoOperation() const {
|
||||
return carry_inst || overflow_inst || ge_inst;
|
||||
return carry_inst || overflow_inst || ge_inst || nzcv_inst;
|
||||
}
|
||||
|
||||
Inst* Inst::GetAssociatedPseudoOperation(Opcode opcode) {
|
||||
// This is faster than doing a search through the block.
|
||||
switch (opcode) {
|
||||
case IR::Opcode::GetCarryFromOp:
|
||||
case Opcode::GetCarryFromOp:
|
||||
ASSERT(!carry_inst || carry_inst->GetOpcode() == Opcode::GetCarryFromOp);
|
||||
return carry_inst;
|
||||
case IR::Opcode::GetOverflowFromOp:
|
||||
case Opcode::GetOverflowFromOp:
|
||||
ASSERT(!overflow_inst || overflow_inst->GetOpcode() == Opcode::GetOverflowFromOp);
|
||||
return overflow_inst;
|
||||
case IR::Opcode::GetGEFromOp:
|
||||
case Opcode::GetGEFromOp:
|
||||
ASSERT(!ge_inst || ge_inst->GetOpcode() == Opcode::GetGEFromOp);
|
||||
return ge_inst;
|
||||
case Opcode::GetNZCVFromOp:
|
||||
ASSERT(!nzcv_inst || nzcv_inst->GetOpcode() == Opcode::GetNZCVFromOp);
|
||||
return nzcv_inst;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -345,6 +376,11 @@ void Inst::Use(const Value& value) {
|
|||
ASSERT_MSG(!value.GetInst()->ge_inst, "Only one of each type of pseudo-op allowed");
|
||||
value.GetInst()->ge_inst = this;
|
||||
break;
|
||||
case Opcode::GetNZCVFromOp:
|
||||
ASSERT_MSG(!value.GetInst()->nzcv_inst, "Only one of each type of pseudo-op allowed");
|
||||
ASSERT_MSG(MayGetNZCVFromOp(), "This instruction doesn't support the GetNZCVFromOp pseduo-op");
|
||||
value.GetInst()->nzcv_inst = this;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -366,6 +402,10 @@ void Inst::UndoUse(const Value& value) {
|
|||
ASSERT(value.GetInst()->ge_inst->GetOpcode() == Opcode::GetGEFromOp);
|
||||
value.GetInst()->ge_inst = nullptr;
|
||||
break;
|
||||
case Opcode::GetNZCVFromOp:
|
||||
ASSERT(value.GetInst()->nzcv_inst->GetOpcode() == Opcode::GetNZCVFromOp);
|
||||
value.GetInst()->nzcv_inst = nullptr;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -76,6 +76,13 @@ public:
|
|||
/// Determines whether or not this instruction may have side-effects.
|
||||
bool MayHaveSideEffects() const;
|
||||
|
||||
/// Determines whether or not this instruction is a pseduo-instruction.
|
||||
/// Pseudo-instructions depend on their parent instructions for their semantics.
|
||||
bool IsAPseudoOperation() const;
|
||||
|
||||
/// Determins whether or not this instruction supports the GetNZCVFromOp pseudo-operation.
|
||||
bool MayGetNZCVFromOp() const;
|
||||
|
||||
/// Determines if all arguments of this instruction are immediates.
|
||||
bool AreAllArgsImmediates() const;
|
||||
|
||||
|
@ -116,6 +123,7 @@ private:
|
|||
Inst* ge_inst;
|
||||
};
|
||||
Inst* overflow_inst = nullptr;
|
||||
Inst* nzcv_inst = nullptr;
|
||||
};
|
||||
|
||||
} // namespace IR
|
||||
|
|
|
@ -47,6 +47,7 @@ enum class Type {
|
|||
F64 = 1 << 11,
|
||||
F128 = 1 << 12,
|
||||
CoprocInfo = 1 << 13,
|
||||
NZCVFlags = 1 << 14,
|
||||
};
|
||||
|
||||
constexpr Type operator|(Type a, Type b) {
|
||||
|
|
|
@ -35,10 +35,14 @@ A32OPC(GetFpscrNZCV, T::U32,
|
|||
A32OPC(SetFpscrNZCV, T::Void, T::U32, )
|
||||
|
||||
// A64 Context getters/setters
|
||||
A64OPC(GetCFlag, T::U1, )
|
||||
A64OPC(SetNZCV, T::Void, T::NZCVFlags )
|
||||
A64OPC(GetW, T::U32, T::A64Reg )
|
||||
A64OPC(GetX, T::U64, T::A64Reg )
|
||||
A64OPC(GetSP, T::U64, )
|
||||
A64OPC(SetW, T::Void, T::A64Reg, T::U32 )
|
||||
A64OPC(SetX, T::Void, T::A64Reg, T::U64 )
|
||||
A64OPC(SetSP, T::Void, T::U64 )
|
||||
|
||||
// Hints
|
||||
OPCODE(PushRSB, T::Void, T::U64 )
|
||||
|
@ -47,6 +51,7 @@ OPCODE(PushRSB, T::Void, T::U64
|
|||
OPCODE(GetCarryFromOp, T::U1, T::U32 )
|
||||
OPCODE(GetOverflowFromOp, T::U1, T::U32 )
|
||||
OPCODE(GetGEFromOp, T::U32, T::U32 )
|
||||
OPCODE(GetNZCVFromOp, T::NZCVFlags, T::Opaque )
|
||||
|
||||
// Calculations
|
||||
OPCODE(Pack2x32To1x64, T::U64, T::U32, T::U32 )
|
||||
|
@ -66,22 +71,26 @@ OPCODE(ArithmeticShiftRight64, T::U64, T::U64, T::U8
|
|||
OPCODE(RotateRight32, T::U32, T::U32, T::U8, T::U1 )
|
||||
OPCODE(RotateRight64, T::U64, T::U64, T::U8 )
|
||||
OPCODE(RotateRightExtended, T::U32, T::U32, T::U1 )
|
||||
OPCODE(AddWithCarry, T::U32, T::U32, T::U32, T::U1 )
|
||||
OPCODE(SubWithCarry, T::U32, T::U32, T::U32, T::U1 )
|
||||
OPCODE(Add64, T::U64, T::U64, T::U64 )
|
||||
OPCODE(Sub64, T::U64, T::U64, T::U64 )
|
||||
OPCODE(Mul, T::U32, T::U32, T::U32 )
|
||||
OPCODE(Add32, T::U32, T::U32, T::U32, T::U1 )
|
||||
OPCODE(Add64, T::U64, T::U64, T::U64, T::U1 )
|
||||
OPCODE(Sub32, T::U32, T::U32, T::U32, T::U1 )
|
||||
OPCODE(Sub64, T::U64, T::U64, T::U64, T::U1 )
|
||||
OPCODE(Mul32, T::U32, T::U32, T::U32 )
|
||||
OPCODE(Mul64, T::U64, T::U64, T::U64 )
|
||||
OPCODE(And, T::U32, T::U32, T::U32 )
|
||||
OPCODE(Eor, T::U32, T::U32, T::U32 )
|
||||
OPCODE(Or, T::U32, T::U32, T::U32 )
|
||||
OPCODE(Not, T::U32, T::U32 )
|
||||
OPCODE(SignExtendWordToLong, T::U64, T::U32 )
|
||||
OPCODE(SignExtendHalfToWord, T::U32, T::U16 )
|
||||
OPCODE(SignExtendByteToWord, T::U32, T::U8 )
|
||||
OPCODE(ZeroExtendWordToLong, T::U64, T::U32 )
|
||||
OPCODE(ZeroExtendHalfToWord, T::U32, T::U16 )
|
||||
OPCODE(SignExtendHalfToWord, T::U32, T::U16 )
|
||||
OPCODE(SignExtendByteToLong, T::U64, T::U8 )
|
||||
OPCODE(SignExtendHalfToLong, T::U64, T::U16 )
|
||||
OPCODE(SignExtendWordToLong, T::U64, T::U32 )
|
||||
OPCODE(ZeroExtendByteToWord, T::U32, T::U8 )
|
||||
OPCODE(ZeroExtendHalfToWord, T::U32, T::U16 )
|
||||
OPCODE(ZeroExtendByteToLong, T::U64, T::U8 )
|
||||
OPCODE(ZeroExtendHalfToLong, T::U64, T::U16 )
|
||||
OPCODE(ZeroExtendWordToLong, T::U64, T::U32 )
|
||||
OPCODE(ByteReverseWord, T::U32, T::U32 )
|
||||
OPCODE(ByteReverseHalf, T::U16, T::U16 )
|
||||
OPCODE(ByteReverseDual, T::U64, T::U64 )
|
||||
|
|
|
@ -94,10 +94,12 @@ using U16 = TypedValue<Type::U16>;
|
|||
using U32 = TypedValue<Type::U32>;
|
||||
using U64 = TypedValue<Type::U64>;
|
||||
using U32U64 = TypedValue<Type::U32 | Type::U64>;
|
||||
using UAny = TypedValue<Type::U8 | Type::U16 | Type::U32 | Type::U64>;
|
||||
using F32 = TypedValue<Type::F32>;
|
||||
using F64 = TypedValue<Type::F64>;
|
||||
using F128 = TypedValue<Type::F128>;
|
||||
using F32F64 = TypedValue<Type::F32 | Type::F64>;
|
||||
using NZCV = TypedValue<Type::NZCVFlags>;
|
||||
|
||||
} // namespace IR
|
||||
} // namespace Dynarmic
|
||||
|
|
Loading…
Reference in a new issue