diff --git a/src/backend_x64/a64_emit_x64.cpp b/src/backend_x64/a64_emit_x64.cpp index dfca927f..17290e62 100644 --- a/src/backend_x64/a64_emit_x64.cpp +++ b/src/backend_x64/a64_emit_x64.cpp @@ -115,6 +115,23 @@ A64EmitX64::BlockDescriptor A64EmitX64::Emit(IR::Block& block) { return block_desc; } +void A64EmitX64::EmitA64GetCFlag(A64EmitContext& ctx, IR::Inst* inst) { + Xbyak::Reg32 result = ctx.reg_alloc.ScratchGpr().cvt32(); + code->mov(result, dword[r15 + offsetof(A64JitState, CPSR_nzcv)]); + code->shr(result, 29); + code->and_(result, 1); + ctx.reg_alloc.DefineValue(inst, result); +} + +void A64EmitX64::EmitA64SetNZCV(A64EmitContext& ctx, IR::Inst* inst) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + Xbyak::Reg32 to_store = ctx.reg_alloc.UseGpr(args[0]).cvt32(); + code->and_(to_store, 0b11000001'00000001); + code->imul(to_store, to_store, 0b00010000'00100001); + code->and_(to_store, 0xFF000000); + code->mov(dword[r15 + offsetof(A64JitState, CPSR_nzcv)], to_store); +} + void A64EmitX64::EmitA64GetW(A64EmitContext& ctx, IR::Inst* inst) { A64::Reg reg = inst->GetArg(0).GetA64RegRef(); @@ -131,6 +148,12 @@ void A64EmitX64::EmitA64GetX(A64EmitContext& ctx, IR::Inst* inst) { ctx.reg_alloc.DefineValue(inst, result); } +void A64EmitX64::EmitA64GetSP(A64EmitContext& ctx, IR::Inst* inst) { + Xbyak::Reg64 result = ctx.reg_alloc.ScratchGpr(); + code->mov(result, qword[r15 + offsetof(A64JitState, sp)]); + ctx.reg_alloc.DefineValue(inst, result); +} + void A64EmitX64::EmitA64SetW(A64EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); A64::Reg reg = inst->GetArg(0).GetA64RegRef(); @@ -161,6 +184,20 @@ void A64EmitX64::EmitA64SetX(A64EmitContext& ctx, IR::Inst* inst) { } } +void A64EmitX64::EmitA64SetSP(A64EmitContext& ctx, IR::Inst* inst) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + auto addr = qword[r15 + offsetof(A64JitState, sp)]; + if (args[0].IsImmediate()) { + code->mov(addr, args[0].GetImmediateU64()); + } else if (args[0].IsInXmm()) { + Xbyak::Xmm to_store = ctx.reg_alloc.UseXmm(args[0]); + code->movq(addr, to_store); + } else { + Xbyak::Reg64 to_store = ctx.reg_alloc.UseGpr(args[0]); + code->mov(addr, to_store); + } +} + void A64EmitX64::EmitTerminalImpl(IR::Term::Interpret terminal, IR::LocationDescriptor) { code->mov(code->ABI_PARAM1, A64::LocationDescriptor{terminal.next}.PC()); code->mov(code->ABI_PARAM2.cvt32(), 1); diff --git a/src/backend_x64/emit_x64.cpp b/src/backend_x64/emit_x64.cpp index 7d94c748..4ce7b463 100644 --- a/src/backend_x64/emit_x64.cpp +++ b/src/backend_x64/emit_x64.cpp @@ -131,6 +131,11 @@ void EmitX64::EmitGetGEFromOp(EmitContext&, IR::Inst*) { ASSERT_MSG(false, "should never happen"); } +template +void EmitX64::EmitGetNZCVFromOp(EmitContext&, IR::Inst*) { + ASSERT_MSG(false, "should never happen"); +} + template void EmitX64::EmitPack2x32To1x64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); @@ -738,21 +743,31 @@ static Xbyak::Reg8 DoCarry(RegAlloc& reg_alloc, Argument& carry_in, IR::Inst* ca } } -template -void EmitX64::EmitAddWithCarry(EmitContext& ctx, IR::Inst* inst) { +static Xbyak::Reg64 DoNZCV(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* nzcv_out) { + if (!nzcv_out) + return INVALID_REG; + + Xbyak::Reg64 nzcv = reg_alloc.ScratchGpr({HostLoc::RAX}); + code->xor_(nzcv.cvt32(), nzcv.cvt32()); + return nzcv; +} + +static void EmitAdd(BlockOfCode* code, EmitContext& ctx, IR::Inst* inst, size_t bitsize) { auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); + auto nzcv_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetNZCVFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto& carry_in = args[2]; - Xbyak::Reg32 result = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32(); + Xbyak::Reg64 nzcv = DoNZCV(code, ctx.reg_alloc, nzcv_inst); + Xbyak::Reg result = ctx.reg_alloc.UseScratchGpr(args[0]).changeBit(bitsize); Xbyak::Reg8 carry = DoCarry(ctx.reg_alloc, carry_in, carry_inst); Xbyak::Reg8 overflow = overflow_inst ? ctx.reg_alloc.ScratchGpr().cvt8() : INVALID_REG.cvt8(); // TODO: Consider using LEA. - if (args[1].IsImmediate()) { + if (args[1].IsImmediate() && args[1].GetType() == IR::Type::U32) { u32 op_arg = args[1].GetImmediateU32(); if (carry_in.IsImmediate()) { if (carry_in.GetImmediateU1()) { @@ -767,7 +782,7 @@ void EmitX64::EmitAddWithCarry(EmitContext& ctx, IR::Inst* inst) { } } else { OpArg op_arg = ctx.reg_alloc.UseOpArg(args[1]); - op_arg.setBit(32); + op_arg.setBit(bitsize); if (carry_in.IsImmediate()) { if (carry_in.GetImmediateU1()) { code->stc(); @@ -781,6 +796,12 @@ void EmitX64::EmitAddWithCarry(EmitContext& ctx, IR::Inst* inst) { } } + if (nzcv_inst) { + ctx.EraseInstruction(nzcv_inst); + code->lahf(); + code->seto(code->al); + ctx.reg_alloc.DefineValue(nzcv_inst, nzcv); + } if (carry_inst) { ctx.EraseInstruction(carry_inst); code->setc(carry); @@ -796,26 +817,25 @@ void EmitX64::EmitAddWithCarry(EmitContext& ctx, IR::Inst* inst) { } template -void EmitX64::EmitAdd64(EmitContext& ctx, IR::Inst* inst) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - - Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]); - Xbyak::Reg64 op_arg = ctx.reg_alloc.UseGpr(args[1]); - - code->add(result, op_arg); - - ctx.reg_alloc.DefineValue(inst, result); +void EmitX64::EmitAdd32(EmitContext& ctx, IR::Inst* inst) { + EmitAdd(code, ctx, inst, 32); } template -void EmitX64::EmitSubWithCarry(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitAdd64(EmitContext& ctx, IR::Inst* inst) { + EmitAdd(code, ctx, inst, 64); +} + +static void EmitSub(BlockOfCode* code, EmitContext& ctx, IR::Inst* inst, size_t bitsize) { auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); + auto nzcv_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetNZCVFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto& carry_in = args[2]; - Xbyak::Reg32 result = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32(); + Xbyak::Reg64 nzcv = DoNZCV(code, ctx.reg_alloc, nzcv_inst); + Xbyak::Reg result = ctx.reg_alloc.UseScratchGpr(args[0]).changeBit(bitsize); Xbyak::Reg8 carry = DoCarry(ctx.reg_alloc, carry_in, carry_inst); Xbyak::Reg8 overflow = overflow_inst ? ctx.reg_alloc.ScratchGpr().cvt8() : INVALID_REG.cvt8(); @@ -823,7 +843,7 @@ void EmitX64::EmitSubWithCarry(EmitContext& ctx, IR::Inst* inst) { // TODO: Optimize CMP case. // Note that x64 CF is inverse of what the ARM carry flag is here. - if (args[1].IsImmediate()) { + if (args[1].IsImmediate() && args[1].GetType() == IR::Type::U32) { u32 op_arg = args[1].GetImmediateU32(); if (carry_in.IsImmediate()) { if (carry_in.GetImmediateU1()) { @@ -839,7 +859,7 @@ void EmitX64::EmitSubWithCarry(EmitContext& ctx, IR::Inst* inst) { } } else { OpArg op_arg = ctx.reg_alloc.UseOpArg(args[1]); - op_arg.setBit(32); + op_arg.setBit(bitsize); if (carry_in.IsImmediate()) { if (carry_in.GetImmediateU1()) { code->sub(result, *op_arg); @@ -854,6 +874,12 @@ void EmitX64::EmitSubWithCarry(EmitContext& ctx, IR::Inst* inst) { } } + if (nzcv_inst) { + ctx.EraseInstruction(nzcv_inst); + code->lahf(); + code->seto(code->al); + ctx.reg_alloc.DefineValue(nzcv_inst, nzcv); + } if (carry_inst) { ctx.EraseInstruction(carry_inst); code->setnc(carry); @@ -869,19 +895,17 @@ void EmitX64::EmitSubWithCarry(EmitContext& ctx, IR::Inst* inst) { } template -void EmitX64::EmitSub64(EmitContext& ctx, IR::Inst* inst) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - - Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]); - Xbyak::Reg64 op_arg = ctx.reg_alloc.UseGpr(args[1]); - - code->sub(result, op_arg); - - ctx.reg_alloc.DefineValue(inst, result); +void EmitX64::EmitSub32(EmitContext& ctx, IR::Inst* inst) { + EmitSub(code, ctx, inst, 32); } template -void EmitX64::EmitMul(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitSub64(EmitContext& ctx, IR::Inst* inst) { + EmitSub(code, ctx, inst, 64); +} + +template +void EmitX64::EmitMul32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); Xbyak::Reg32 result = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32(); @@ -984,10 +1008,10 @@ void EmitX64::EmitNot(EmitContext& ctx, IR::Inst* inst) { } template -void EmitX64::EmitSignExtendWordToLong(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitSignExtendByteToWord(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]); - code->movsxd(result.cvt64(), result.cvt32()); + code->movsx(result.cvt32(), result.cvt8()); ctx.reg_alloc.DefineValue(inst, result); } @@ -1000,18 +1024,34 @@ void EmitX64::EmitSignExtendHalfToWord(EmitContext& ctx, IR::Inst* inst) { } template -void EmitX64::EmitSignExtendByteToWord(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitSignExtendByteToLong(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]); - code->movsx(result.cvt32(), result.cvt8()); + code->movsx(result.cvt64(), result.cvt8()); ctx.reg_alloc.DefineValue(inst, result); } template -void EmitX64::EmitZeroExtendWordToLong(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitSignExtendHalfToLong(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]); - code->mov(result.cvt32(), result.cvt32()); // x64 zeros upper 32 bits on a 32-bit move + code->movsx(result.cvt64(), result.cvt16()); + ctx.reg_alloc.DefineValue(inst, result); +} + +template +void EmitX64::EmitSignExtendWordToLong(EmitContext& ctx, IR::Inst* inst) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]); + code->movsxd(result.cvt64(), result.cvt32()); + ctx.reg_alloc.DefineValue(inst, result); +} + +template +void EmitX64::EmitZeroExtendByteToWord(EmitContext& ctx, IR::Inst* inst) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]); + code->movzx(result.cvt32(), result.cvt8()); ctx.reg_alloc.DefineValue(inst, result); } @@ -1024,10 +1064,26 @@ void EmitX64::EmitZeroExtendHalfToWord(EmitContext& ctx, IR::Inst* inst) { } template -void EmitX64::EmitZeroExtendByteToWord(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitZeroExtendByteToLong(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]); - code->movzx(result.cvt32(), result.cvt8()); + code->movzx(result.cvt32(), result.cvt8()); // x64 zeros upper 32 bits on a 32-bit move + ctx.reg_alloc.DefineValue(inst, result); +} + +template +void EmitX64::EmitZeroExtendHalfToLong(EmitContext& ctx, IR::Inst* inst) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]); + code->movzx(result.cvt32(), result.cvt16()); // x64 zeros upper 32 bits on a 32-bit move + ctx.reg_alloc.DefineValue(inst, result); +} + +template +void EmitX64::EmitZeroExtendWordToLong(EmitContext& ctx, IR::Inst* inst) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]); + code->mov(result.cvt32(), result.cvt32()); // x64 zeros upper 32 bits on a 32-bit move ctx.reg_alloc.DefineValue(inst, result); } diff --git a/src/frontend/A64/decoder/a64.h b/src/frontend/A64/decoder/a64.h index 911928ed..c992b514 100644 --- a/src/frontend/A64/decoder/a64.h +++ b/src/frontend/A64/decoder/a64.h @@ -34,10 +34,10 @@ std::vector> GetDecodeTable() { //INST(&V::ADRP, "ADRP", "1ii10000iiiiiiiiiiiiiiiiiiiddddd"), // Data processing - Immediate - Add/Sub - //INST(&V::ADD_imm, "ADD (immediate)", "z0010001ssiiiiiiiiiiiinnnnnddddd"), - //INST(&V::ADDS_imm, "ADDS (immediate)", "z0110001ssiiiiiiiiiiiinnnnnddddd"), - //INST(&V::SUB_imm, "SUB (immediate)", "z1010001ssiiiiiiiiiiiinnnnnddddd"), - //INST(&V::SUBS_imm, "SUBS (immediate)", "z1110001ssiiiiiiiiiiiinnnnnddddd"), + INST(&V::ADD_imm, "ADD (immediate)", "z0010001ssiiiiiiiiiiiinnnnnddddd"), + INST(&V::ADDS_imm, "ADDS (immediate)", "z0110001ssiiiiiiiiiiiinnnnnddddd"), + INST(&V::SUB_imm, "SUB (immediate)", "z1010001ssiiiiiiiiiiiinnnnnddddd"), + INST(&V::SUBS_imm, "SUBS (immediate)", "z1110001ssiiiiiiiiiiiinnnnnddddd"), // Data processing - Immediate - Logical //INST(&V::AND_imm, "AND (immediate)", "z00100100Nrrrrrrssssssnnnnnddddd"), @@ -374,21 +374,21 @@ std::vector> GetDecodeTable() { // Data Processing - Register - Add/Sub (shifted register) INST(&V::ADD_shift, "ADD (shifted register)", "z0001011ss0mmmmmiiiiiinnnnnddddd"), - //INST(&V::ADDS_shift, "ADDS (shifted register)", "z0101011ss0mmmmmiiiiiinnnnnddddd"), - //INST(&V::SUB_shift, "SUB (shifted register)", "z1001011ss0mmmmmiiiiiinnnnnddddd"), - //INST(&V::SUBS_shift, "SUBS (shifted register)", "z1101011ss0mmmmmiiiiiinnnnnddddd"), + INST(&V::ADDS_shift, "ADDS (shifted register)", "z0101011ss0mmmmmiiiiiinnnnnddddd"), + INST(&V::SUB_shift, "SUB (shifted register)", "z1001011ss0mmmmmiiiiiinnnnnddddd"), + INST(&V::SUBS_shift, "SUBS (shifted register)", "z1101011ss0mmmmmiiiiiinnnnnddddd"), // Data Processing - Register - Add/Sub (shifted register) - //INST(&V::ADD_ext, "ADD (extended register)", "z0001011001mmmmmxxxiiinnnnnddddd"), - //INST(&V::ADDS_ext, "ADDS (extended register)", "z0101011001mmmmmxxxiiinnnnnddddd"), - //INST(&V::SUB_ext, "SUB (extended register)", "z1001011001mmmmmxxxiiinnnnnddddd"), - //INST(&V::SUBS_ext, "SUBS (extended register)", "z1101011001mmmmmxxxiiinnnnnddddd"), + INST(&V::ADD_ext, "ADD (extended register)", "z0001011001mmmmmxxxiiinnnnnddddd"), + INST(&V::ADDS_ext, "ADDS (extended register)", "z0101011001mmmmmxxxiiinnnnnddddd"), + INST(&V::SUB_ext, "SUB (extended register)", "z1001011001mmmmmxxxiiinnnnnddddd"), + INST(&V::SUBS_ext, "SUBS (extended register)", "z1101011001mmmmmxxxiiinnnnnddddd"), // Data Processing - Register - Add/Sub (with carry) - //INST(&V::ADC, "ADC", "z0011010000mmmmm000000nnnnnddddd"), - //INST(&V::ADCS, "ADCS", "z0111010000mmmmm000000nnnnnddddd"), - //INST(&V::SBC, "SBC", "z1011010000mmmmm000000nnnnnddddd"), - //INST(&V::SBCS, "SBCS", "z1111010000mmmmm000000nnnnnddddd"), + INST(&V::ADC, "ADC", "z0011010000mmmmm000000nnnnnddddd"), + INST(&V::ADCS, "ADCS", "z0111010000mmmmm000000nnnnnddddd"), + INST(&V::SBC, "SBC", "z1011010000mmmmm000000nnnnnddddd"), + INST(&V::SBCS, "SBCS", "z1111010000mmmmm000000nnnnnddddd"), // Data Processing - Register - Conditional compare //INST(&V::CCMN_reg, "CCMN (register)", "z0111010010mmmmmcccc00nnnnn0ffff"), diff --git a/src/frontend/A64/ir_emitter.cpp b/src/frontend/A64/ir_emitter.cpp index 48868e25..e0f6029e 100644 --- a/src/frontend/A64/ir_emitter.cpp +++ b/src/frontend/A64/ir_emitter.cpp @@ -22,21 +22,45 @@ u64 IREmitter::AlignPC(size_t alignment) { return static_cast(pc - pc % alignment); } +IR::U1 IREmitter::GetCFlag() { + return Inst(Opcode::A64GetCFlag); +} + +void IREmitter::SetNZCV(const IR::NZCV& nzcv) { + Inst(Opcode::A64SetNZCV, nzcv); +} + IR::U32 IREmitter::GetW(Reg reg) { + if (reg == Reg::ZR) + return Imm32(0); return Inst(Opcode::A64GetW, IR::Value(reg)); } IR::U64 IREmitter::GetX(Reg reg) { + if (reg == Reg::ZR) + return Imm64(0); return Inst(Opcode::A64GetX, IR::Value(reg)); } +IR::U64 IREmitter::GetSP() { + return Inst(Opcode::A64GetSP); +} + void IREmitter::SetW(const Reg reg, const IR::U32& value) { + if (reg == Reg::ZR) + return; Inst(Opcode::A64SetW, IR::Value(reg), value); } void IREmitter::SetX(const Reg reg, const IR::U64& value) { + if (reg == Reg::ZR) + return; Inst(Opcode::A64SetX, IR::Value(reg), value); } +void IREmitter::SetSP(const IR::U64& value) { + Inst(Opcode::A64SetSP, value); +} + } // namespace IR } // namespace Dynarmic diff --git a/src/frontend/A64/ir_emitter.h b/src/frontend/A64/ir_emitter.h index 9ce8fb8e..27e16cc3 100644 --- a/src/frontend/A64/ir_emitter.h +++ b/src/frontend/A64/ir_emitter.h @@ -31,10 +31,15 @@ public: u64 PC(); u64 AlignPC(size_t alignment); + IR::U1 GetCFlag(); + void SetNZCV(const IR::NZCV& nzcv); + IR::U32 GetW(Reg source_reg); IR::U64 GetX(Reg source_reg); + IR::U64 GetSP(); void SetW(Reg dest_reg, const IR::U32& value); void SetX(Reg dest_reg, const IR::U64& value); + void SetSP(const IR::U64& value); }; } // namespace IR diff --git a/src/frontend/A64/translate/impl/data_processing_addsub.cpp b/src/frontend/A64/translate/impl/data_processing_addsub.cpp index 83253f0c..18110f80 100644 --- a/src/frontend/A64/translate/impl/data_processing_addsub.cpp +++ b/src/frontend/A64/translate/impl/data_processing_addsub.cpp @@ -9,6 +9,114 @@ namespace Dynarmic { namespace A64 { +bool TranslatorVisitor::ADD_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + + u64 imm; + switch (shift.ZeroExtend()) { + case 0b00: + imm = imm12.ZeroExtend(); + break; + case 0b01: + imm = imm12.ZeroExtend() << 12; + break; + default: + return ReservedValue(); + } + + auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn); + + auto result = ir.Add(operand1, I(datasize, imm)); + + if (Rd == Reg::SP) { + SP(datasize, result); + } else { + X(datasize, Rd, result); + } + + return true; +} + +bool TranslatorVisitor::ADDS_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + + u64 imm; + switch (shift.ZeroExtend()) { + case 0b00: + imm = imm12.ZeroExtend(); + break; + case 0b01: + imm = imm12.ZeroExtend() << 12; + break; + default: + return ReservedValue(); + } + + auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn); + + auto result = ir.Add(operand1, I(datasize, imm)); + + ir.SetNZCV(ir.NZCVFrom(result)); + + X(datasize, Rd, result); + + return true; +} + +bool TranslatorVisitor::SUB_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + + u64 imm; + switch (shift.ZeroExtend()) { + case 0b00: + imm = imm12.ZeroExtend(); + break; + case 0b01: + imm = imm12.ZeroExtend() << 12; + break; + default: + return ReservedValue(); + } + + auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn); + + auto result = ir.Sub(operand1, I(datasize, imm)); + + if (Rd == Reg::SP) { + SP(datasize, result); + } else { + X(datasize, Rd, result); + } + + return true; +} + +bool TranslatorVisitor::SUBS_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + + u64 imm; + switch (shift.ZeroExtend()) { + case 0b00: + imm = imm12.ZeroExtend(); + break; + case 0b01: + imm = imm12.ZeroExtend() << 12; + break; + default: + return ReservedValue(); + } + + auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn); + + auto result = ir.Sub(operand1, I(datasize, imm)); + + ir.SetNZCV(ir.NZCVFrom(result)); + + X(datasize, Rd, result); + + return true; +} + bool TranslatorVisitor::ADD_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) { size_t datasize = sf ? 64 : 32; @@ -27,5 +135,199 @@ bool TranslatorVisitor::ADD_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Re return true; } +bool TranslatorVisitor::ADDS_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + + if (shift == 0b11) return ReservedValue(); + if (!sf && imm6.Bit<5>()) return ReservedValue(); + + u8 shift_amount = imm6.ZeroExtend(); + + auto operand1 = X(datasize, Rn); + auto operand2 = ShiftReg(datasize, Rm, shift, ir.Imm8(shift_amount)); + + auto result = ir.Add(operand1, operand2); + + ir.SetNZCV(ir.NZCVFrom(result)); + + X(datasize, Rd, result); + + return true; +} + +bool TranslatorVisitor::SUB_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + + if (shift == 0b11) return ReservedValue(); + if (!sf && imm6.Bit<5>()) return ReservedValue(); + + u8 shift_amount = imm6.ZeroExtend(); + + auto operand1 = X(datasize, Rn); + auto operand2 = ShiftReg(datasize, Rm, shift, ir.Imm8(shift_amount)); + + auto result = ir.Sub(operand1, operand2); + + X(datasize, Rd, result); + + return true; +} + +bool TranslatorVisitor::SUBS_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + + if (shift == 0b11) return ReservedValue(); + if (!sf && imm6.Bit<5>()) return ReservedValue(); + + u8 shift_amount = imm6.ZeroExtend(); + + auto operand1 = X(datasize, Rn); + auto operand2 = ShiftReg(datasize, Rm, shift, ir.Imm8(shift_amount)); + + auto result = ir.Sub(operand1, operand2); + + ir.SetNZCV(ir.NZCVFrom(result)); + + X(datasize, Rd, result); + + return true; +} + +bool TranslatorVisitor::ADD_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + u8 shift = imm3.ZeroExtend(); + if (shift > 4) return ReservedValue(); + + auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn); + auto operand2 = ExtendReg(datasize, Rm, option, shift); + + auto result = ir.Add(operand1, operand2); + + if (Rd == Reg::SP) { + SP(datasize, result); + } else { + X(datasize, Rd, result); + } + + return true; +} + +bool TranslatorVisitor::ADDS_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + u8 shift = imm3.ZeroExtend(); + if (shift > 4) return ReservedValue(); + + auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn); + auto operand2 = ExtendReg(datasize, Rm, option, shift); + + auto result = ir.Add(operand1, operand2); + + ir.SetNZCV(ir.NZCVFrom(result)); + + if (Rd == Reg::SP) { + SP(datasize, result); + } else { + X(datasize, Rd, result); + } + + return true; +} + +bool TranslatorVisitor::SUB_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + u8 shift = imm3.ZeroExtend(); + if (shift > 4) return ReservedValue(); + + auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn); + auto operand2 = ExtendReg(datasize, Rm, option, shift); + + auto result = ir.Sub(operand1, operand2); + + if (Rd == Reg::SP) { + SP(datasize, result); + } else { + X(datasize, Rd, result); + } + + return true; +} + +bool TranslatorVisitor::SUBS_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + u8 shift = imm3.ZeroExtend(); + if (shift > 4) return ReservedValue(); + + auto operand1 = Rn == Reg::SP ? SP(datasize) : X(datasize, Rn); + auto operand2 = ExtendReg(datasize, Rm, option, shift); + + auto result = ir.Sub(operand1, operand2); + + ir.SetNZCV(ir.NZCVFrom(result)); + + if (Rd == Reg::SP) { + SP(datasize, result); + } else { + X(datasize, Rd, result); + } + + return true; +} + +bool TranslatorVisitor::ADC(bool sf, Reg Rm, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + + auto operand1 = X(datasize, Rn); + auto operand2 = X(datasize, Rm); + + auto result = ir.AddWithCarry(operand1, operand2, ir.GetCFlag()); + + X(datasize, Rd, result); + + return true; +} + +bool TranslatorVisitor::ADCS(bool sf, Reg Rm, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + + auto operand1 = X(datasize, Rn); + auto operand2 = X(datasize, Rm); + + auto result = ir.AddWithCarry(operand1, operand2, ir.GetCFlag()); + + ir.SetNZCV(ir.NZCVFrom(result)); + + X(datasize, Rd, result); + + return true; +} + +bool TranslatorVisitor::SBC(bool sf, Reg Rm, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + + auto operand1 = X(datasize, Rn); + auto operand2 = X(datasize, Rm); + + auto result = ir.SubWithCarry(operand1, operand2, ir.GetCFlag()); + + X(datasize, Rd, result); + + return true; +} + +bool TranslatorVisitor::SBCS(bool sf, Reg Rm, Reg Rn, Reg Rd) { + size_t datasize = sf ? 64 : 32; + + auto operand1 = X(datasize, Rn); + auto operand2 = X(datasize, Rm); + + auto result = ir.SubWithCarry(operand1, operand2, ir.GetCFlag()); + + ir.SetNZCV(ir.NZCVFrom(result)); + + X(datasize, Rd, result); + + return true; +} + } // namespace A64 } // namespace Dynarmic diff --git a/src/frontend/A64/translate/impl/impl.cpp b/src/frontend/A64/translate/impl/impl.cpp index 0b9e24a3..fbcb7150 100644 --- a/src/frontend/A64/translate/impl/impl.cpp +++ b/src/frontend/A64/translate/impl/impl.cpp @@ -25,6 +25,18 @@ bool TranslatorVisitor::ReservedValue() { return false; } +IR::U32U64 TranslatorVisitor::I(size_t bitsize, u64 value) { + switch (bitsize) { + case 32: + return ir.Imm32(static_cast(value)); + case 64: + return ir.Imm64(value); + default: + ASSERT_MSG(false, "Imm - get: Invalid bitsize"); + return {}; + } +} + IR::U32U64 TranslatorVisitor::X(size_t bitsize, Reg reg) { switch (bitsize) { case 32: @@ -50,6 +62,31 @@ void TranslatorVisitor::X(size_t bitsize, Reg reg, IR::U32U64 value) { } } +IR::U32U64 TranslatorVisitor::SP(size_t bitsize) { + switch (bitsize) { + case 32: + return ir.LeastSignificantWord(ir.GetSP()); + case 64: + return ir.GetSP(); + default: + ASSERT_MSG(false, "SP - get : Invalid bitsize"); + return {}; + } +} + +void TranslatorVisitor::SP(size_t bitsize, IR::U32U64 value) { + switch (bitsize) { + case 32: + ir.SetSP(ir.ZeroExtendWordToLong(value)); + break; + case 64: + ir.SetSP(value); + break; + default: + ASSERT_MSG(false, "SP - : Invalid bitsize"); + } +} + IR::U32U64 TranslatorVisitor::ShiftReg(size_t bitsize, Reg reg, Imm<2> shift, IR::U8 amount) { auto result = X(bitsize, reg); switch (shift.ZeroExtend()) { @@ -66,5 +103,81 @@ IR::U32U64 TranslatorVisitor::ShiftReg(size_t bitsize, Reg reg, Imm<2> shift, IR return {}; } +IR::U32U64 TranslatorVisitor::ExtendReg(size_t bitsize, Reg reg, Imm<3> option, u8 shift) { + ASSERT(shift <= 4); + ASSERT(bitsize == 32 || bitsize == 64); + IR::UAny val = X(bitsize, reg); + size_t len; + IR::U32U64 extended; + bool signed_extend; + + switch (option.ZeroExtend()) { + case 0b000: { // UXTB + val = ir.LeastSignificantByte(val); + len = 8; + signed_extend = false; + break; + } + case 0b001: { // UXTH + val = ir.LeastSignificantHalf(val); + len = 16; + signed_extend = false; + break; + } + case 0b010: { // UXTW + if (bitsize != 32) { + val = ir.LeastSignificantWord(val); + } + len = 32; + signed_extend = false; + break; + } + case 0b011: { // UXTX + len = 64; + signed_extend = false; + break; + } + case 0b100: { // SXTB + val = ir.LeastSignificantByte(val); + len = 8; + signed_extend = true; + break; + } + case 0b101: { // SXTH + val = ir.LeastSignificantHalf(val); + len = 16; + signed_extend = true; + break; + } + case 0b110: { // SXTW + if (bitsize != 32) { + val = ir.LeastSignificantWord(val); + } + len = 32; + signed_extend = true; + break; + } + case 0b111: { // SXTX + len = 64; + signed_extend = true; + break; + } + default: + ASSERT_MSG(false, "Unreachable"); + } + + if (len < bitsize) { + if (bitsize == 32) { + extended = signed_extend ? ir.SignExtendToWord(val) : ir.ZeroExtendToWord(val); + } else { + extended = signed_extend ? ir.SignExtendToLong(val) : ir.ZeroExtendToLong(val); + } + } else { + extended = val; + } + + return ir.LogicalShiftLeft(extended, ir.Imm8(shift)); +} + } // namespace A64 } // namespace Dynarmic diff --git a/src/frontend/A64/translate/impl/impl.h b/src/frontend/A64/translate/impl/impl.h index 5090e9d0..8a141df1 100644 --- a/src/frontend/A64/translate/impl/impl.h +++ b/src/frontend/A64/translate/impl/impl.h @@ -25,10 +25,14 @@ struct TranslatorVisitor final { bool UnpredictableInstruction(); bool ReservedValue(); + IR::U32U64 I(size_t bitsize, u64 value); IR::U32U64 X(size_t bitsize, Reg reg); void X(size_t bitsize, Reg reg, IR::U32U64 value); + IR::U32U64 SP(size_t bitsize); + void SP(size_t bitsize, IR::U32U64 value); IR::U32U64 ShiftReg(size_t bitsize, Reg reg, Imm<2> shift, IR::U8 amount); + IR::U32U64 ExtendReg(size_t bitsize, Reg reg, Imm<3> option, u8 shift); // Data processing - Immediate - PC relative addressing bool ADR(Imm<2> immlo, Imm<19> immhi, Reg Rd); diff --git a/src/frontend/ir/ir_emitter.cpp b/src/frontend/ir/ir_emitter.cpp index 2648296d..29b80275 100644 --- a/src/frontend/ir/ir_emitter.cpp +++ b/src/frontend/ir/ir_emitter.cpp @@ -49,11 +49,17 @@ ResultAndCarry IREmitter::MostSignificantWord(const U64& value) { return {result, carry_out}; } -U16 IREmitter::LeastSignificantHalf(const U32& value) { +U16 IREmitter::LeastSignificantHalf(U32U64 value) { + if (value.GetType() == Type::U64) { + value = LeastSignificantWord(value); + } return Inst(Opcode::LeastSignificantHalf, value); } -U8 IREmitter::LeastSignificantByte(const U32& value) { +U8 IREmitter::LeastSignificantByte(U32U64 value) { + if (value.GetType() == Type::U64) { + value = LeastSignificantWord(value); + } return Inst(Opcode::LeastSignificantByte, value); } @@ -69,6 +75,10 @@ U1 IREmitter::IsZero64(const U64& value) { return Inst(Opcode::IsZero64, value); } +NZCV IREmitter::NZCVFrom(const Value& value) { + return Inst(Opcode::GetNZCVFromOp, value); +} + ResultAndCarry IREmitter::LogicalShiftLeft(const U32& value_in, const U8& shift_amount, const U1& carry_in) { auto result = Inst(Opcode::LogicalShiftLeft32, value_in, shift_amount, carry_in); auto carry_out = Inst(Opcode::GetCarryFromOp, result); @@ -135,48 +145,75 @@ U32U64 IREmitter::RotateRight(const U32U64& value_in, const U8& shift_amount) { } } -ResultAndCarryAndOverflow IREmitter::AddWithCarry(const Value& a, const Value& b, const U1& carry_in) { - auto result = Inst(Opcode::AddWithCarry, a, b, carry_in); +ResultAndCarryAndOverflow IREmitter::AddWithCarry(const U32& a, const U32& b, const U1& carry_in) { + auto result = Inst(Opcode::Add32, a, b, carry_in); auto carry_out = Inst(Opcode::GetCarryFromOp, result); auto overflow = Inst(Opcode::GetOverflowFromOp, result); return {result, carry_out, overflow}; } +U32U64 IREmitter::AddWithCarry(const U32U64& a, const U32U64& b, const U1& carry_in) { + ASSERT(a.GetType() == b.GetType()); + if (a.GetType() == Type::U32) { + return Inst(Opcode::Add32, a, b, carry_in); + } else { + return Inst(Opcode::Add64, a, b, carry_in); + } +} + U32 IREmitter::Add(const U32& a, const U32& b) { - return Inst(Opcode::AddWithCarry, a, b, Imm1(0)); + return Inst(Opcode::Add32, a, b, Imm1(0)); } U64 IREmitter::Add(const U64& a, const U64& b) { - return Inst(Opcode::Add64, a, b); + return Inst(Opcode::Add64, a, b, Imm1(0)); } U32U64 IREmitter::Add(const U32U64& a, const U32U64& b) { ASSERT(a.GetType() == b.GetType()); if (a.GetType() == Type::U32) { - return Inst(Opcode::AddWithCarry, a, b, Imm1(0)); + return Inst(Opcode::Add32, a, b, Imm1(0)); } else { - return Inst(Opcode::Add64, a, b); + return Inst(Opcode::Add64, a, b, Imm1(0)); } } ResultAndCarryAndOverflow IREmitter::SubWithCarry(const U32& a, const U32& b, const U1& carry_in) { // This is equivalent to AddWithCarry(a, Not(b), carry_in). - auto result = Inst(Opcode::SubWithCarry, a, b, carry_in); + auto result = Inst(Opcode::Sub32, a, b, carry_in); auto carry_out = Inst(Opcode::GetCarryFromOp, result); auto overflow = Inst(Opcode::GetOverflowFromOp, result); return {result, carry_out, overflow}; } +U32U64 IREmitter::SubWithCarry(const U32U64& a, const U32U64& b, const U1& carry_in) { + ASSERT(a.GetType() == b.GetType()); + if (a.GetType() == Type::U32) { + return Inst(Opcode::Sub32, a, b, carry_in); + } else { + return Inst(Opcode::Sub64, a, b, carry_in); + } +} + U32 IREmitter::Sub(const U32& a, const U32& b) { - return Inst(Opcode::SubWithCarry, a, b, Imm1(1)); + return Inst(Opcode::Sub32, a, b, Imm1(1)); } U64 IREmitter::Sub(const U64& a, const U64& b) { - return Inst(Opcode::Sub64, a, b); + return Inst(Opcode::Sub64, a, b, Imm1(1)); +} + +U32U64 IREmitter::Sub(const U32U64& a, const U32U64& b) { + ASSERT(a.GetType() == b.GetType()); + if (a.GetType() == Type::U32) { + return Inst(Opcode::Sub32, a, b, Imm1(1)); + } else { + return Inst(Opcode::Sub64, a, b, Imm1(1)); + } } U32 IREmitter::Mul(const U32& a, const U32& b) { - return Inst(Opcode::Mul, a, b); + return Inst(Opcode::Mul32, a, b); } U64 IREmitter::Mul(const U64& a, const U64& b) { @@ -199,6 +236,38 @@ U32 IREmitter::Not(const U32& a) { return Inst(Opcode::Not, a); } +U64 IREmitter::SignExtendToLong(const UAny& a) { + switch (a.GetType()) { + case Type::U8: + return Inst(Opcode::SignExtendByteToLong, a); + case Type::U16: + return Inst(Opcode::SignExtendHalfToLong, a); + case Type::U32: + return Inst(Opcode::SignExtendWordToLong, a); + case Type::U64: + return U64(a); + default: + ASSERT_MSG(false, "Unreachable"); + return {}; + } +} + +U32 IREmitter::SignExtendToWord(const UAny& a) { + switch (a.GetType()) { + case Type::U8: + return Inst(Opcode::SignExtendByteToWord, a); + case Type::U16: + return Inst(Opcode::SignExtendHalfToWord, a); + case Type::U32: + return U32(a); + case Type::U64: + return Inst(Opcode::LeastSignificantWord, a); + default: + ASSERT_MSG(false, "Unreachable"); + return {}; + } +} + U64 IREmitter::SignExtendWordToLong(const U32& a) { return Inst(Opcode::SignExtendWordToLong, a); } @@ -211,6 +280,38 @@ U32 IREmitter::SignExtendByteToWord(const U8& a) { return Inst(Opcode::SignExtendByteToWord, a); } +U64 IREmitter::ZeroExtendToLong(const UAny& a) { + switch (a.GetType()) { + case Type::U8: + return Inst(Opcode::ZeroExtendByteToLong, a); + case Type::U16: + return Inst(Opcode::ZeroExtendHalfToLong, a); + case Type::U32: + return Inst(Opcode::ZeroExtendWordToLong, a); + case Type::U64: + return U64(a); + default: + ASSERT_MSG(false, "Unreachable"); + return {}; + } +} + +U32 IREmitter::ZeroExtendToWord(const UAny& a) { + switch (a.GetType()) { + case Type::U8: + return Inst(Opcode::ZeroExtendByteToWord, a); + case Type::U16: + return Inst(Opcode::ZeroExtendHalfToWord, a); + case Type::U32: + return U32(a); + case Type::U64: + return Inst(Opcode::LeastSignificantWord, a); + default: + ASSERT_MSG(false, "Unreachable"); + return {}; + } +} + U64 IREmitter::ZeroExtendWordToLong(const U32& a) { return Inst(Opcode::ZeroExtendWordToLong, a); } diff --git a/src/frontend/ir/ir_emitter.h b/src/frontend/ir/ir_emitter.h index e0d54049..64b573b3 100644 --- a/src/frontend/ir/ir_emitter.h +++ b/src/frontend/ir/ir_emitter.h @@ -76,12 +76,15 @@ public: U64 Pack2x32To1x64(const U32& lo, const U32& hi); U32 LeastSignificantWord(const U64& value); ResultAndCarry MostSignificantWord(const U64& value); - U16 LeastSignificantHalf(const U32& value); - U8 LeastSignificantByte(const U32& value); + U16 LeastSignificantHalf(U32U64 value); + U8 LeastSignificantByte(U32U64 value); U1 MostSignificantBit(const U32& value); U1 IsZero(const U32& value); U1 IsZero64(const U64& value); + // This pseudo-instruction may only be added to instructions that support it. + NZCV NZCVFrom(const Value& value); + ResultAndCarry LogicalShiftLeft(const U32& value_in, const U8& shift_amount, const U1& carry_in); ResultAndCarry LogicalShiftRight(const U32& value_in, const U8& shift_amount, const U1& carry_in); ResultAndCarry ArithmeticShiftRight(const U32& value_in, const U8& shift_amount, const U1& carry_in); @@ -92,25 +95,32 @@ public: U32U64 ArithmeticShiftRight(const U32U64& value_in, const U8& shift_amount); U32U64 RotateRight(const U32U64& value_in, const U8& shift_amount); ResultAndCarry RotateRightExtended(const U32& value_in, const U1& carry_in); - ResultAndCarryAndOverflow AddWithCarry(const Value& a, const Value& b, const U1& carry_in); + ResultAndCarryAndOverflow AddWithCarry(const U32& a, const U32& b, const U1& carry_in); + ResultAndCarryAndOverflow SubWithCarry(const U32& a, const U32& b, const U1& carry_in); + U32U64 AddWithCarry(const U32U64& a, const U32U64& b, const U1& carry_in); + U32U64 SubWithCarry(const U32U64& a, const U32U64& b, const U1& carry_in); U32 Add(const U32& a, const U32& b); U64 Add(const U64& a, const U64& b); U32U64 Add(const U32U64& a, const U32U64& b); - ResultAndCarryAndOverflow SubWithCarry(const U32& a, const U32& b, const U1& carry_in); U32 Sub(const U32& a, const U32& b); U64 Sub(const U64& a, const U64& b); + U32U64 Sub(const U32U64& a, const U32U64& b); U32 Mul(const U32& a, const U32& b); U64 Mul(const U64& a, const U64& b); U32 And(const U32& a, const U32& b); U32 Eor(const U32& a, const U32& b); U32 Or(const U32& a, const U32& b); U32 Not(const U32& a); - U64 SignExtendWordToLong(const U32& a); - U32 SignExtendHalfToWord(const U16& a); + U32 SignExtendToWord(const UAny& a); + U64 SignExtendToLong(const UAny& a); U32 SignExtendByteToWord(const U8& a); - U64 ZeroExtendWordToLong(const U32& a); - U32 ZeroExtendHalfToWord(const U16& a); + U32 SignExtendHalfToWord(const U16& a); + U64 SignExtendWordToLong(const U32& a); + U32 ZeroExtendToWord(const UAny& a); + U64 ZeroExtendToLong(const UAny& a); U32 ZeroExtendByteToWord(const U8& a); + U32 ZeroExtendHalfToWord(const U16& a); + U64 ZeroExtendWordToLong(const U32& a); U32 ByteReverseWord(const U32& a); U16 ByteReverseHalf(const U16& a); U64 ByteReverseDual(const U64& a); diff --git a/src/frontend/ir/microinstruction.cpp b/src/frontend/ir/microinstruction.cpp index 9e0bdc8b..0f4636a1 100644 --- a/src/frontend/ir/microinstruction.cpp +++ b/src/frontend/ir/microinstruction.cpp @@ -138,6 +138,7 @@ bool Inst::ReadsFromCoreRegister() const { case Opcode::A32GetExtendedRegister64: case Opcode::A64GetW: case Opcode::A64GetX: + case Opcode::A64GetSP: return true; default: @@ -153,6 +154,7 @@ bool Inst::WritesToCoreRegister() const { case Opcode::A32BXWritePC: case Opcode::A64SetW: case Opcode::A64SetX: + case Opcode::A64SetSP: return true; default: @@ -252,26 +254,55 @@ bool Inst::MayHaveSideEffects() const { IsCoprocessorInstruction(); } +bool Inst::IsAPseudoOperation() const { + switch (op) { + case Opcode::GetCarryFromOp: + case Opcode::GetOverflowFromOp: + case Opcode::GetGEFromOp: + case Opcode::GetNZCVFromOp: + return true; + + default: + return false; + } +} + +bool Inst::MayGetNZCVFromOp() const { + switch (op) { + case Opcode::Add32: + case Opcode::Add64: + case Opcode::Sub32: + case Opcode::Sub64: + return true; + + default: + return false; + } +} + bool Inst::AreAllArgsImmediates() const { return std::all_of(args.begin(), args.begin() + NumArgs(), [](const auto& value){ return value.IsImmediate(); }); } bool Inst::HasAssociatedPseudoOperation() const { - return carry_inst || overflow_inst || ge_inst; + return carry_inst || overflow_inst || ge_inst || nzcv_inst; } Inst* Inst::GetAssociatedPseudoOperation(Opcode opcode) { // This is faster than doing a search through the block. switch (opcode) { - case IR::Opcode::GetCarryFromOp: + case Opcode::GetCarryFromOp: ASSERT(!carry_inst || carry_inst->GetOpcode() == Opcode::GetCarryFromOp); return carry_inst; - case IR::Opcode::GetOverflowFromOp: + case Opcode::GetOverflowFromOp: ASSERT(!overflow_inst || overflow_inst->GetOpcode() == Opcode::GetOverflowFromOp); return overflow_inst; - case IR::Opcode::GetGEFromOp: + case Opcode::GetGEFromOp: ASSERT(!ge_inst || ge_inst->GetOpcode() == Opcode::GetGEFromOp); return ge_inst; + case Opcode::GetNZCVFromOp: + ASSERT(!nzcv_inst || nzcv_inst->GetOpcode() == Opcode::GetNZCVFromOp); + return nzcv_inst; default: break; } @@ -345,6 +376,11 @@ void Inst::Use(const Value& value) { ASSERT_MSG(!value.GetInst()->ge_inst, "Only one of each type of pseudo-op allowed"); value.GetInst()->ge_inst = this; break; + case Opcode::GetNZCVFromOp: + ASSERT_MSG(!value.GetInst()->nzcv_inst, "Only one of each type of pseudo-op allowed"); + ASSERT_MSG(MayGetNZCVFromOp(), "This instruction doesn't support the GetNZCVFromOp pseduo-op"); + value.GetInst()->nzcv_inst = this; + break; default: break; } @@ -366,6 +402,10 @@ void Inst::UndoUse(const Value& value) { ASSERT(value.GetInst()->ge_inst->GetOpcode() == Opcode::GetGEFromOp); value.GetInst()->ge_inst = nullptr; break; + case Opcode::GetNZCVFromOp: + ASSERT(value.GetInst()->nzcv_inst->GetOpcode() == Opcode::GetNZCVFromOp); + value.GetInst()->nzcv_inst = nullptr; + break; default: break; } diff --git a/src/frontend/ir/microinstruction.h b/src/frontend/ir/microinstruction.h index 70ac48c3..04fd7bdf 100644 --- a/src/frontend/ir/microinstruction.h +++ b/src/frontend/ir/microinstruction.h @@ -76,6 +76,13 @@ public: /// Determines whether or not this instruction may have side-effects. bool MayHaveSideEffects() const; + /// Determines whether or not this instruction is a pseduo-instruction. + /// Pseudo-instructions depend on their parent instructions for their semantics. + bool IsAPseudoOperation() const; + + /// Determins whether or not this instruction supports the GetNZCVFromOp pseudo-operation. + bool MayGetNZCVFromOp() const; + /// Determines if all arguments of this instruction are immediates. bool AreAllArgsImmediates() const; @@ -116,6 +123,7 @@ private: Inst* ge_inst; }; Inst* overflow_inst = nullptr; + Inst* nzcv_inst = nullptr; }; } // namespace IR diff --git a/src/frontend/ir/opcodes.h b/src/frontend/ir/opcodes.h index 0012034d..e58abcbb 100644 --- a/src/frontend/ir/opcodes.h +++ b/src/frontend/ir/opcodes.h @@ -47,6 +47,7 @@ enum class Type { F64 = 1 << 11, F128 = 1 << 12, CoprocInfo = 1 << 13, + NZCVFlags = 1 << 14, }; constexpr Type operator|(Type a, Type b) { diff --git a/src/frontend/ir/opcodes.inc b/src/frontend/ir/opcodes.inc index ebb264a4..39a71cb4 100644 --- a/src/frontend/ir/opcodes.inc +++ b/src/frontend/ir/opcodes.inc @@ -35,10 +35,14 @@ A32OPC(GetFpscrNZCV, T::U32, A32OPC(SetFpscrNZCV, T::Void, T::U32, ) // A64 Context getters/setters +A64OPC(GetCFlag, T::U1, ) +A64OPC(SetNZCV, T::Void, T::NZCVFlags ) A64OPC(GetW, T::U32, T::A64Reg ) A64OPC(GetX, T::U64, T::A64Reg ) +A64OPC(GetSP, T::U64, ) A64OPC(SetW, T::Void, T::A64Reg, T::U32 ) A64OPC(SetX, T::Void, T::A64Reg, T::U64 ) +A64OPC(SetSP, T::Void, T::U64 ) // Hints OPCODE(PushRSB, T::Void, T::U64 ) @@ -47,6 +51,7 @@ OPCODE(PushRSB, T::Void, T::U64 OPCODE(GetCarryFromOp, T::U1, T::U32 ) OPCODE(GetOverflowFromOp, T::U1, T::U32 ) OPCODE(GetGEFromOp, T::U32, T::U32 ) +OPCODE(GetNZCVFromOp, T::NZCVFlags, T::Opaque ) // Calculations OPCODE(Pack2x32To1x64, T::U64, T::U32, T::U32 ) @@ -66,22 +71,26 @@ OPCODE(ArithmeticShiftRight64, T::U64, T::U64, T::U8 OPCODE(RotateRight32, T::U32, T::U32, T::U8, T::U1 ) OPCODE(RotateRight64, T::U64, T::U64, T::U8 ) OPCODE(RotateRightExtended, T::U32, T::U32, T::U1 ) -OPCODE(AddWithCarry, T::U32, T::U32, T::U32, T::U1 ) -OPCODE(SubWithCarry, T::U32, T::U32, T::U32, T::U1 ) -OPCODE(Add64, T::U64, T::U64, T::U64 ) -OPCODE(Sub64, T::U64, T::U64, T::U64 ) -OPCODE(Mul, T::U32, T::U32, T::U32 ) +OPCODE(Add32, T::U32, T::U32, T::U32, T::U1 ) +OPCODE(Add64, T::U64, T::U64, T::U64, T::U1 ) +OPCODE(Sub32, T::U32, T::U32, T::U32, T::U1 ) +OPCODE(Sub64, T::U64, T::U64, T::U64, T::U1 ) +OPCODE(Mul32, T::U32, T::U32, T::U32 ) OPCODE(Mul64, T::U64, T::U64, T::U64 ) OPCODE(And, T::U32, T::U32, T::U32 ) OPCODE(Eor, T::U32, T::U32, T::U32 ) OPCODE(Or, T::U32, T::U32, T::U32 ) OPCODE(Not, T::U32, T::U32 ) -OPCODE(SignExtendWordToLong, T::U64, T::U32 ) -OPCODE(SignExtendHalfToWord, T::U32, T::U16 ) OPCODE(SignExtendByteToWord, T::U32, T::U8 ) -OPCODE(ZeroExtendWordToLong, T::U64, T::U32 ) -OPCODE(ZeroExtendHalfToWord, T::U32, T::U16 ) +OPCODE(SignExtendHalfToWord, T::U32, T::U16 ) +OPCODE(SignExtendByteToLong, T::U64, T::U8 ) +OPCODE(SignExtendHalfToLong, T::U64, T::U16 ) +OPCODE(SignExtendWordToLong, T::U64, T::U32 ) OPCODE(ZeroExtendByteToWord, T::U32, T::U8 ) +OPCODE(ZeroExtendHalfToWord, T::U32, T::U16 ) +OPCODE(ZeroExtendByteToLong, T::U64, T::U8 ) +OPCODE(ZeroExtendHalfToLong, T::U64, T::U16 ) +OPCODE(ZeroExtendWordToLong, T::U64, T::U32 ) OPCODE(ByteReverseWord, T::U32, T::U32 ) OPCODE(ByteReverseHalf, T::U16, T::U16 ) OPCODE(ByteReverseDual, T::U64, T::U64 ) diff --git a/src/frontend/ir/value.h b/src/frontend/ir/value.h index 6a5404c2..4b93e4fd 100644 --- a/src/frontend/ir/value.h +++ b/src/frontend/ir/value.h @@ -94,10 +94,12 @@ using U16 = TypedValue; using U32 = TypedValue; using U64 = TypedValue; using U32U64 = TypedValue; +using UAny = TypedValue; using F32 = TypedValue; using F64 = TypedValue; using F128 = TypedValue; using F32F64 = TypedValue; +using NZCV = TypedValue; } // namespace IR } // namespace Dynarmic