diff --git a/src/backend/x64/emit_x64_floating_point.cpp b/src/backend/x64/emit_x64_floating_point.cpp index d59ddbbd..9ffc248a 100644 --- a/src/backend/x64/emit_x64_floating_point.cpp +++ b/src/backend/x64/emit_x64_floating_point.cpp @@ -38,6 +38,9 @@ namespace { const Xbyak::Reg64 INVALID_REG = Xbyak::Reg64(-1); +constexpr u64 f16_negative_zero = 0x8000; +constexpr u64 f16_non_sign_mask = 0x7fff; + constexpr u64 f32_negative_zero = 0x80000000u; constexpr u64 f32_nan = 0x7fc00000u; constexpr u64 f32_non_sign_mask = 0x7fffffffu; @@ -323,9 +326,18 @@ void FPThreeOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Function fn) } // anonymous namespace +void EmitX64::EmitFPAbs16(EmitContext& ctx, IR::Inst* inst) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); + + code.pand(result, code.MConst(xword, f16_non_sign_mask)); + + ctx.reg_alloc.DefineValue(inst, result); +} + void EmitX64::EmitFPAbs32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); + const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); code.pand(result, code.MConst(xword, f32_non_sign_mask)); @@ -334,16 +346,25 @@ void EmitX64::EmitFPAbs32(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitFPAbs64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); + const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); code.pand(result, code.MConst(xword, f64_non_sign_mask)); ctx.reg_alloc.DefineValue(inst, result); } +void EmitX64::EmitFPNeg16(EmitContext& ctx, IR::Inst* inst) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); + + code.pxor(result, code.MConst(xword, f16_negative_zero)); + + ctx.reg_alloc.DefineValue(inst, result); +} + void EmitX64::EmitFPNeg32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); + const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); code.pxor(result, code.MConst(xword, f32_negative_zero)); @@ -352,7 +373,7 @@ void EmitX64::EmitFPNeg32(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitFPNeg64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); + const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); code.pxor(result, code.MConst(xword, f64_negative_zero)); diff --git a/src/frontend/A64/translate/impl/floating_point_data_processing_one_register.cpp b/src/frontend/A64/translate/impl/floating_point_data_processing_one_register.cpp index 2b26d85f..66b6d377 100644 --- a/src/frontend/A64/translate/impl/floating_point_data_processing_one_register.cpp +++ b/src/frontend/A64/translate/impl/floating_point_data_processing_one_register.cpp @@ -12,36 +12,36 @@ namespace Dynarmic::A64 { bool TranslatorVisitor::FMOV_float(Imm<2> type, Vec Vn, Vec Vd) { const auto datasize = FPGetDataSize(type); - if (!datasize || *datasize == 16) { + if (!datasize) { return UnallocatedEncoding(); } - const IR::U128 operand = V(*datasize, Vn); + const IR::U16U32U64 operand = V_scalar(*datasize, Vn); - V(*datasize, Vd, operand); + V_scalar(*datasize, Vd, operand); return true; } bool TranslatorVisitor::FABS_float(Imm<2> type, Vec Vn, Vec Vd) { const auto datasize = FPGetDataSize(type); - if (!datasize || *datasize == 16) { + if (!datasize) { return UnallocatedEncoding(); } - const IR::U32U64 operand = V_scalar(*datasize, Vn); - const IR::U32U64 result = ir.FPAbs(operand); + const IR::U16U32U64 operand = V_scalar(*datasize, Vn); + const IR::U16U32U64 result = ir.FPAbs(operand); V_scalar(*datasize, Vd, result); return true; } bool TranslatorVisitor::FNEG_float(Imm<2> type, Vec Vn, Vec Vd) { const auto datasize = FPGetDataSize(type); - if (!datasize || *datasize == 16) { + if (!datasize) { return UnallocatedEncoding(); } - const IR::U32U64 operand = V_scalar(*datasize, Vn); - const IR::U32U64 result = ir.FPNeg(operand); + const IR::U16U32U64 operand = V_scalar(*datasize, Vn); + const IR::U16U32U64 result = ir.FPNeg(operand); V_scalar(*datasize, Vd, result); return true; } diff --git a/src/frontend/ir/ir_emitter.cpp b/src/frontend/ir/ir_emitter.cpp index 1da47e4f..6324b346 100644 --- a/src/frontend/ir/ir_emitter.cpp +++ b/src/frontend/ir/ir_emitter.cpp @@ -1773,11 +1773,17 @@ U128 IREmitter::ZeroVector() { return Inst(Opcode::ZeroVector); } -U32U64 IREmitter::FPAbs(const U32U64& a) { - if (a.GetType() == Type::U32) { +U16U32U64 IREmitter::FPAbs(const U16U32U64& a) { + switch (a.GetType()) { + case Type::U16: + return Inst(Opcode::FPAbs16, a); + case Type::U32: return Inst(Opcode::FPAbs32, a); - } else { + case Type::U64: return Inst(Opcode::FPAbs64, a); + default: + UNREACHABLE(); + return U16U32U64{}; } } @@ -1880,11 +1886,17 @@ U32U64 IREmitter::FPMulX(const U32U64& a, const U32U64& b) { } } -U32U64 IREmitter::FPNeg(const U32U64& a) { - if (a.GetType() == Type::U32) { +U16U32U64 IREmitter::FPNeg(const U16U32U64& a) { + switch (a.GetType()) { + case Type::U16: + return Inst(Opcode::FPNeg16, a); + case Type::U32: return Inst(Opcode::FPNeg32, a); - } else { + case Type::U64: return Inst(Opcode::FPNeg64, a); + default: + UNREACHABLE(); + return U16U32U64{}; } } diff --git a/src/frontend/ir/ir_emitter.h b/src/frontend/ir/ir_emitter.h index f4533801..291712ee 100644 --- a/src/frontend/ir/ir_emitter.h +++ b/src/frontend/ir/ir_emitter.h @@ -292,7 +292,7 @@ public: U128 VectorZeroUpper(const U128& a); U128 ZeroVector(); - U32U64 FPAbs(const U32U64& a); + U16U32U64 FPAbs(const U16U32U64& a); U32U64 FPAdd(const U32U64& a, const U32U64& b, bool fpcr_controlled); NZCV FPCompare(const U32U64& a, const U32U64& b, bool exc_on_qnan, bool fpcr_controlled); U32U64 FPDiv(const U32U64& a, const U32U64& b, bool fpcr_controlled); @@ -303,7 +303,7 @@ public: U32U64 FPMul(const U32U64& a, const U32U64& b, bool fpcr_controlled); U32U64 FPMulAdd(const U32U64& addend, const U32U64& op1, const U32U64& op2, bool fpcr_controlled); U32U64 FPMulX(const U32U64& a, const U32U64& b); - U32U64 FPNeg(const U32U64& a); + U16U32U64 FPNeg(const U16U32U64& a); U32U64 FPRecipEstimate(const U32U64& a); U16U32U64 FPRecipExponent(const U16U32U64& a); U32U64 FPRecipStepFused(const U32U64& a, const U32U64& b); diff --git a/src/frontend/ir/opcodes.inc b/src/frontend/ir/opcodes.inc index 734a0934..b5e8f691 100644 --- a/src/frontend/ir/opcodes.inc +++ b/src/frontend/ir/opcodes.inc @@ -460,6 +460,7 @@ OPCODE(VectorZeroUpper, U128, U128 OPCODE(ZeroVector, U128, ) // Floating-point operations +OPCODE(FPAbs16, U16, U16 ) OPCODE(FPAbs32, U32, U32 ) OPCODE(FPAbs64, U64, U64 ) OPCODE(FPAdd32, U32, U32, U32 ) @@ -482,6 +483,7 @@ OPCODE(FPMulAdd32, U32, U32, OPCODE(FPMulAdd64, U64, U64, U64, U64 ) OPCODE(FPMulX32, U32, U32, U32 ) OPCODE(FPMulX64, U64, U64, U64 ) +OPCODE(FPNeg16, U16, U16 ) OPCODE(FPNeg32, U32, U32 ) OPCODE(FPNeg64, U64, U64 ) OPCODE(FPRecipEstimate32, U32, U32 )