diff --git a/src/backend/x64/emit_x64_floating_point.cpp b/src/backend/x64/emit_x64_floating_point.cpp index b710a989..8543bc94 100644 --- a/src/backend/x64/emit_x64_floating_point.cpp +++ b/src/backend/x64/emit_x64_floating_point.cpp @@ -754,6 +754,10 @@ static void EmitFPRecipEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* i code.CallFunction(&FP::FPRecipEstimate); } +void EmitX64::EmitFPRecipEstimate16(EmitContext& ctx, IR::Inst* inst) { + EmitFPRecipEstimate(code, ctx, inst); +} + void EmitX64::EmitFPRecipEstimate32(EmitContext& ctx, IR::Inst* inst) { EmitFPRecipEstimate(code, ctx, inst); } @@ -787,40 +791,42 @@ template static void EmitFPRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { using FPT = mp::unsigned_integer_of_size; - if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA)) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); + if constexpr (fsize != 16) { + if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA)) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Label end, fallback; + Xbyak::Label end, fallback; - const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); - const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]); - const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); + const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]); + const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); - code.movaps(result, code.MConst(xword, FP::FPValue())); - FCODE(vfnmadd231s)(result, operand1, operand2); - FCODE(ucomis)(result, result); - code.jp(fallback, code.T_NEAR); - code.L(end); + code.movaps(result, code.MConst(xword, FP::FPValue())); + FCODE(vfnmadd231s)(result, operand1, operand2); + FCODE(ucomis)(result, result); + code.jp(fallback, code.T_NEAR); + code.L(end); - code.SwitchToFarCode(); - code.L(fallback); + code.SwitchToFarCode(); + code.L(fallback); - code.sub(rsp, 8); - ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); - code.movq(code.ABI_PARAM1, operand1); - code.movq(code.ABI_PARAM2, operand2); - code.mov(code.ABI_PARAM3.cvt32(), ctx.FPCR().Value()); - code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); - code.CallFunction(&FP::FPRecipStepFused); - code.movq(result, code.ABI_RETURN); - ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); - code.add(rsp, 8); + code.sub(rsp, 8); + ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); + code.movq(code.ABI_PARAM1, operand1); + code.movq(code.ABI_PARAM2, operand2); + code.mov(code.ABI_PARAM3.cvt32(), ctx.FPCR().Value()); + code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); + code.CallFunction(&FP::FPRecipStepFused); + code.movq(result, code.ABI_RETURN); + ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); + code.add(rsp, 8); - code.jmp(end, code.T_NEAR); - code.SwitchToNearCode(); + code.jmp(end, code.T_NEAR); + code.SwitchToNearCode(); - ctx.reg_alloc.DefineValue(inst, result); - return; + ctx.reg_alloc.DefineValue(inst, result); + return; + } } auto args = ctx.reg_alloc.GetArgumentInfo(inst); @@ -830,6 +836,10 @@ static void EmitFPRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* code.CallFunction(&FP::FPRecipStepFused); } +void EmitX64::EmitFPRecipStepFused16(EmitContext& ctx, IR::Inst* inst) { + EmitFPRecipStepFused<16>(code, ctx, inst); +} + void EmitX64::EmitFPRecipStepFused32(EmitContext& ctx, IR::Inst* inst) { EmitFPRecipStepFused<32>(code, ctx, inst); } diff --git a/src/backend/x64/emit_x64_vector_floating_point.cpp b/src/backend/x64/emit_x64_vector_floating_point.cpp index faa30161..7023ce35 100644 --- a/src/backend/x64/emit_x64_vector_floating_point.cpp +++ b/src/backend/x64/emit_x64_vector_floating_point.cpp @@ -1092,6 +1092,10 @@ static void EmitRecipEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* ins }); } +void EmitX64::EmitFPVectorRecipEstimate16(EmitContext& ctx, IR::Inst* inst) { + EmitRecipEstimate(code, ctx, inst); +} + void EmitX64::EmitFPVectorRecipEstimate32(EmitContext& ctx, IR::Inst* inst) { EmitRecipEstimate(code, ctx, inst); } @@ -1110,41 +1114,47 @@ static void EmitRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* in } }; - if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA) && code.DoesCpuSupport(Xbyak::util::Cpu::tAVX)) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); + if constexpr (fsize != 16) { + if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA) && code.DoesCpuSupport(Xbyak::util::Cpu::tAVX)) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); - const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); - const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); - const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]); - const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); + const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]); + const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); - Xbyak::Label end, fallback; + Xbyak::Label end, fallback; - code.movaps(result, GetVectorOf(code)); - FCODE(vfnmadd231p)(result, operand1, operand2); + code.movaps(result, GetVectorOf(code)); + FCODE(vfnmadd231p)(result, operand1, operand2); - FCODE(vcmpunordp)(tmp, result, result); - code.vptest(tmp, tmp); - code.jnz(fallback, code.T_NEAR); - code.L(end); + FCODE(vcmpunordp)(tmp, result, result); + code.vptest(tmp, tmp); + code.jnz(fallback, code.T_NEAR); + code.L(end); - code.SwitchToFarCode(); - code.L(fallback); - code.sub(rsp, 8); - ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); - EmitThreeOpFallbackWithoutRegAlloc(code, ctx, result, operand1, operand2, fallback_fn); - ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); - code.add(rsp, 8); - code.jmp(end, code.T_NEAR); - code.SwitchToNearCode(); + code.SwitchToFarCode(); + code.L(fallback); + code.sub(rsp, 8); + ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); + EmitThreeOpFallbackWithoutRegAlloc(code, ctx, result, operand1, operand2, fallback_fn); + ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); + code.add(rsp, 8); + code.jmp(end, code.T_NEAR); + code.SwitchToNearCode(); - ctx.reg_alloc.DefineValue(inst, result); - return; + ctx.reg_alloc.DefineValue(inst, result); + return; + } } EmitThreeOpFallback(code, ctx, inst, fallback_fn); } +void EmitX64::EmitFPVectorRecipStepFused16(EmitContext& ctx, IR::Inst* inst) { + EmitRecipStepFused<16>(code, ctx, inst); +} + void EmitX64::EmitFPVectorRecipStepFused32(EmitContext& ctx, IR::Inst* inst) { EmitRecipStepFused<32>(code, ctx, inst); } diff --git a/src/common/fp/info.h b/src/common/fp/info.h index 357fd0c3..9253a2ee 100644 --- a/src/common/fp/info.h +++ b/src/common/fp/info.h @@ -88,7 +88,7 @@ struct FPInfo { template constexpr FPT FPValue() { if constexpr (value == 0) { - return FPInfo::Zero(sign); + return FPT(FPInfo::Zero(sign)); } constexpr int point_position = static_cast(FPInfo::explicit_mantissa_width); @@ -100,7 +100,7 @@ constexpr FPT FPValue() { constexpr FPT mantissa = (value << offset) & FPInfo::mantissa_mask; constexpr FPT biased_exponent = static_cast(normalized_exponent + FPInfo::exponent_bias); - return FPInfo::Zero(sign) | mantissa | (biased_exponent << FPInfo::explicit_mantissa_width); + return FPT(FPInfo::Zero(sign) | mantissa | (biased_exponent << FPInfo::explicit_mantissa_width)); } } // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPRecipEstimate.cpp b/src/common/fp/op/FPRecipEstimate.cpp index 95e02a07..c35a6569 100644 --- a/src/common/fp/op/FPRecipEstimate.cpp +++ b/src/common/fp/op/FPRecipEstimate.cpp @@ -31,12 +31,12 @@ FPT FPRecipEstimate(FPT op, FPCR fpcr, FPSR& fpsr) { } if (type == FPType::Infinity) { - return FPInfo::Zero(sign); + return FPT(FPInfo::Zero(sign)); } if (type == FPType::Zero) { FPProcessException(FPExc::DivideByZero, fpcr, fpsr); - return FPInfo::Infinity(sign); + return FPT(FPInfo::Infinity(sign)); } if (value.exponent < FPInfo::exponent_min - 2) { @@ -58,13 +58,13 @@ FPT FPRecipEstimate(FPT op, FPCR fpcr, FPSR& fpsr) { FPProcessException(FPExc::Overflow, fpcr, fpsr); FPProcessException(FPExc::Inexact, fpcr, fpsr); - return overflow_to_inf ? FPInfo::Infinity(sign) : FPInfo::MaxNormal(sign); + return overflow_to_inf ? FPT(FPInfo::Infinity(sign)) : FPT(FPInfo::MaxNormal(sign)); } if ((fpcr.FZ() && !std::is_same_v) || (fpcr.FZ16() && std::is_same_v)) { if (value.exponent >= -FPInfo::exponent_min) { fpsr.UFC(true); - return FPInfo::Zero(sign); + return FPT(FPInfo::Zero(sign)); } } @@ -87,12 +87,13 @@ FPT FPRecipEstimate(FPT op, FPCR fpcr, FPSR& fpsr) { } } - const FPT bits_sign = FPInfo::Zero(sign); + const FPT bits_sign = FPT(FPInfo::Zero(sign)); const FPT bits_exponent = static_cast(result_exponent + FPInfo::exponent_bias); const FPT bits_mantissa = static_cast(estimate); - return (bits_exponent << FPInfo::explicit_mantissa_width) | (bits_mantissa & FPInfo::mantissa_mask) | bits_sign; + return FPT((bits_exponent << FPInfo::explicit_mantissa_width) | (bits_mantissa & FPInfo::mantissa_mask) | bits_sign); } +template u16 FPRecipEstimate(u16 op, FPCR fpcr, FPSR& fpsr); template u32 FPRecipEstimate(u32 op, FPCR fpcr, FPSR& fpsr); template u64 FPRecipEstimate(u64 op, FPCR fpcr, FPSR& fpsr); diff --git a/src/common/fp/op/FPRecipStepFused.cpp b/src/common/fp/op/FPRecipStepFused.cpp index 9ba62212..938650ce 100644 --- a/src/common/fp/op/FPRecipStepFused.cpp +++ b/src/common/fp/op/FPRecipStepFused.cpp @@ -21,7 +21,7 @@ FPT FPRecipStepFused(FPT op1, FPT op2, FPCR fpcr, FPSR& fpsr) { const auto [type1, sign1, value1] = FPUnpack(op1, fpcr, fpsr); const auto [type2, sign2, value2] = FPUnpack(op2, fpcr, fpsr); - + if (const auto maybe_nan = FPProcessNaNs(type1, type2, op1, op2, fpcr, fpsr)) { return *maybe_nan; } @@ -37,18 +37,19 @@ FPT FPRecipStepFused(FPT op1, FPT op2, FPCR fpcr, FPSR& fpsr) { } if (inf1 || inf2) { - return FPInfo::Infinity(sign1 != sign2); + return FPT(FPInfo::Infinity(sign1 != sign2)); } // result_value = 2.0 + (value1 * value2) - FPUnpacked result_value = FusedMulAdd(ToNormalized(false, 0, 2), value1, value2); + const FPUnpacked result_value = FusedMulAdd(ToNormalized(false, 0, 2), value1, value2); if (result_value.mantissa == 0) { - return FPInfo::Zero(fpcr.RMode() == RoundingMode::TowardsMinusInfinity); + return FPT(FPInfo::Zero(fpcr.RMode() == RoundingMode::TowardsMinusInfinity)); } return FPRound(result_value, fpcr, fpsr); } +template u16 FPRecipStepFused(u16 op1, u16 op2, FPCR fpcr, FPSR& fpsr); template u32 FPRecipStepFused(u32 op1, u32 op2, FPCR fpcr, FPSR& fpsr); template u64 FPRecipStepFused(u64 op1, u64 op2, FPCR fpcr, FPSR& fpsr); diff --git a/src/frontend/A64/decoder/a64.inc b/src/frontend/A64/decoder/a64.inc index 865ac8ac..6d44f970 100644 --- a/src/frontend/A64/decoder/a64.inc +++ b/src/frontend/A64/decoder/a64.inc @@ -384,7 +384,7 @@ INST(DUP_elt_1, "DUP (element)", "01011 INST(FMULX_vec_2, "FMULX", "010111100z1mmmmm110111nnnnnddddd") //INST(FCMEQ_reg_1, "FCMEQ (register)", "01011110010mmmmm001001nnnnnddddd") INST(FCMEQ_reg_2, "FCMEQ (register)", "010111100z1mmmmm111001nnnnnddddd") -//INST(FRECPS_1, "FRECPS", "01011110010mmmmm001111nnnnnddddd") +INST(FRECPS_1, "FRECPS", "01011110010mmmmm001111nnnnnddddd") INST(FRECPS_2, "FRECPS", "010111100z1mmmmm111111nnnnnddddd") //INST(FRSQRTS_1, "FRSQRTS", "01011110110mmmmm001111nnnnnddddd") INST(FRSQRTS_2, "FRSQRTS", "010111101z1mmmmm111111nnnnnddddd") @@ -418,7 +418,7 @@ INST(FCMLT_2, "FCMLT (zero)", "01011 INST(FCVTPS_2, "FCVTPS (vector)", "010111101z100001101010nnnnnddddd") //INST(FCVTZS_int_1, "FCVTZS (vector, integer)", "0101111011111001101110nnnnnddddd") INST(FCVTZS_int_2, "FCVTZS (vector, integer)", "010111101z100001101110nnnnnddddd") -//INST(FRECPE_1, "FRECPE", "0101111011111001110110nnnnnddddd") +INST(FRECPE_1, "FRECPE", "0101111011111001110110nnnnnddddd") INST(FRECPE_2, "FRECPE", "010111101z100001110110nnnnnddddd") INST(FRECPX_1, "FRECPX", "0101111011111001111110nnnnnddddd") INST(FRECPX_2, "FRECPX", "010111101z100001111110nnnnnddddd") @@ -575,7 +575,7 @@ INST(INS_elt, "INS (element)", "01101 // Data Processing - FP and SIMD - SIMD Three same //INST(FMULX_vec_3, "FMULX", "0Q001110010mmmmm000111nnnnnddddd") //INST(FCMEQ_reg_3, "FCMEQ (register)", "0Q001110010mmmmm001001nnnnnddddd") -//INST(FRECPS_3, "FRECPS", "0Q001110010mmmmm001111nnnnnddddd") +INST(FRECPS_3, "FRECPS", "0Q001110010mmmmm001111nnnnnddddd") //INST(FRSQRTS_3, "FRSQRTS", "0Q001110110mmmmm001111nnnnnddddd") //INST(FCMGE_reg_3, "FCMGE (register)", "0Q101110010mmmmm001001nnnnnddddd") //INST(FACGE_3, "FACGE", "0Q101110010mmmmm001011nnnnnddddd") @@ -650,7 +650,7 @@ INST(FCVTPS_4, "FCVTPS (vector)", "0Q001 //INST(FCVTZS_int_3, "FCVTZS (vector, integer)", "0Q00111011111001101110nnnnnddddd") INST(FCVTZS_int_4, "FCVTZS (vector, integer)", "0Q0011101z100001101110nnnnnddddd") INST(URECPE, "URECPE", "0Q0011101z100001110010nnnnnddddd") -//INST(FRECPE_3, "FRECPE", "0Q00111011111001110110nnnnnddddd") +INST(FRECPE_3, "FRECPE", "0Q00111011111001110110nnnnnddddd") INST(FRECPE_4, "FRECPE", "0Q0011101z100001110110nnnnnddddd") INST(REV32_asimd, "REV32 (vector)", "0Q101110zz100000000010nnnnnddddd") INST(UADDLP, "UADDLP", "0Q101110zz100000001010nnnnnddddd") diff --git a/src/frontend/A64/translate/impl/simd_scalar_three_same.cpp b/src/frontend/A64/translate/impl/simd_scalar_three_same.cpp index 320ea986..f4eaa6e1 100644 --- a/src/frontend/A64/translate/impl/simd_scalar_three_same.cpp +++ b/src/frontend/A64/translate/impl/simd_scalar_three_same.cpp @@ -294,6 +294,17 @@ bool TranslatorVisitor::FMULX_vec_2(bool sz, Vec Vm, Vec Vn, Vec Vd) { return true; } +bool TranslatorVisitor::FRECPS_1(Vec Vm, Vec Vn, Vec Vd) { + const size_t esize = 16; + + const IR::U16 operand1 = V_scalar(esize, Vn); + const IR::U16 operand2 = V_scalar(esize, Vm); + const IR::U16 result = ir.FPRecipStepFused(operand1, operand2); + + V_scalar(esize, Vd, result); + return true; +} + bool TranslatorVisitor::FRECPS_2(bool sz, Vec Vm, Vec Vn, Vec Vd) { const size_t esize = sz ? 64 : 32; diff --git a/src/frontend/A64/translate/impl/simd_scalar_two_register_misc.cpp b/src/frontend/A64/translate/impl/simd_scalar_two_register_misc.cpp index 5e898caf..17bad70b 100644 --- a/src/frontend/A64/translate/impl/simd_scalar_two_register_misc.cpp +++ b/src/frontend/A64/translate/impl/simd_scalar_two_register_misc.cpp @@ -172,6 +172,16 @@ bool TranslatorVisitor::FCVTZU_int_2(bool sz, Vec Vn, Vec Vd) { return ScalarFPConvertWithRound(*this, sz, Vn, Vd, FP::RoundingMode::TowardsZero, Signedness::Unsigned); } +bool TranslatorVisitor::FRECPE_1(Vec Vn, Vec Vd) { + const size_t esize = 16; + + const IR::U16 operand = V_scalar(esize, Vn); + const IR::U16 result = ir.FPRecipEstimate(operand); + + V_scalar(esize, Vd, result); + return true; +} + bool TranslatorVisitor::FRECPE_2(bool sz, Vec Vn, Vec Vd) { const size_t esize = sz ? 64 : 32; diff --git a/src/frontend/A64/translate/impl/simd_three_same.cpp b/src/frontend/A64/translate/impl/simd_three_same.cpp index a484dc17..da8939d5 100644 --- a/src/frontend/A64/translate/impl/simd_three_same.cpp +++ b/src/frontend/A64/translate/impl/simd_three_same.cpp @@ -939,6 +939,18 @@ bool TranslatorVisitor::FSUB_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) { return true; } +bool TranslatorVisitor::FRECPS_3(bool Q, Vec Vm, Vec Vn, Vec Vd) { + const size_t esize = 16; + const size_t datasize = Q ? 128 : 64; + + const IR::U128 operand1 = V(datasize, Vn); + const IR::U128 operand2 = V(datasize, Vm); + const IR::U128 result = ir.FPVectorRecipStepFused(esize, operand1, operand2); + + V(datasize, Vd, result); + return true; +} + bool TranslatorVisitor::FRECPS_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) { if (sz && !Q) { return ReservedValue(); diff --git a/src/frontend/A64/translate/impl/simd_two_register_misc.cpp b/src/frontend/A64/translate/impl/simd_two_register_misc.cpp index 0610347c..c31028eb 100644 --- a/src/frontend/A64/translate/impl/simd_two_register_misc.cpp +++ b/src/frontend/A64/translate/impl/simd_two_register_misc.cpp @@ -518,6 +518,17 @@ bool TranslatorVisitor::FRINTI_2(bool Q, bool sz, Vec Vn, Vec Vd) { return FloatRoundToIntegral(*this, Q, sz, Vn, Vd,ir.current_location->FPCR().RMode(), false); } +bool TranslatorVisitor::FRECPE_3(bool Q, Vec Vn, Vec Vd) { + const size_t datasize = Q ? 128 : 64; + const size_t esize = 16; + + const IR::U128 operand = V(datasize, Vn); + const IR::U128 result = ir.FPVectorRecipEstimate(esize, operand); + + V(datasize, Vd, result); + return true; +} + bool TranslatorVisitor::FRECPE_4(bool Q, bool sz, Vec Vn, Vec Vd) { if (sz && !Q) { return ReservedValue(); diff --git a/src/frontend/ir/ir_emitter.cpp b/src/frontend/ir/ir_emitter.cpp index f5a28853..e9c655ab 100644 --- a/src/frontend/ir/ir_emitter.cpp +++ b/src/frontend/ir/ir_emitter.cpp @@ -1922,11 +1922,18 @@ U16U32U64 IREmitter::FPNeg(const U16U32U64& a) { } } -U32U64 IREmitter::FPRecipEstimate(const U32U64& a) { - if (a.GetType() == Type::U32) { +U16U32U64 IREmitter::FPRecipEstimate(const U16U32U64& a) { + switch (a.GetType()) { + case Type::U16: + return Inst(Opcode::FPRecipEstimate16, a); + case Type::U32: return Inst(Opcode::FPRecipEstimate32, a); + case Type::U64: + return Inst(Opcode::FPRecipEstimate64, a); + default: + UNREACHABLE(); + return U16U32U64{}; } - return Inst(Opcode::FPRecipEstimate64, a); } U16U32U64 IREmitter::FPRecipExponent(const U16U32U64& a) { @@ -1943,11 +1950,20 @@ U16U32U64 IREmitter::FPRecipExponent(const U16U32U64& a) { } } -U32U64 IREmitter::FPRecipStepFused(const U32U64& a, const U32U64& b) { - if (a.GetType() == Type::U32) { +U16U32U64 IREmitter::FPRecipStepFused(const U16U32U64& a, const U16U32U64& b) { + ASSERT(a.GetType() == b.GetType()); + + switch (a.GetType()) { + case Type::U16: + return Inst(Opcode::FPRecipStepFused16, a, b); + case Type::U32: return Inst(Opcode::FPRecipStepFused32, a, b); + case Type::U64: + return Inst(Opcode::FPRecipStepFused64, a, b); + default: + UNREACHABLE(); + return U16U32U64{}; } - return Inst(Opcode::FPRecipStepFused64, a, b); } U16U32U64 IREmitter::FPRoundInt(const U16U32U64& a, FP::RoundingMode rounding, bool exact) { @@ -2264,6 +2280,8 @@ U128 IREmitter::FPVectorPairedAddLower(size_t esize, const U128& a, const U128& U128 IREmitter::FPVectorRecipEstimate(size_t esize, const U128& a) { switch (esize) { + case 16: + return Inst(Opcode::FPVectorRecipEstimate16, a); case 32: return Inst(Opcode::FPVectorRecipEstimate32, a); case 64: @@ -2275,6 +2293,8 @@ U128 IREmitter::FPVectorRecipEstimate(size_t esize, const U128& a) { U128 IREmitter::FPVectorRecipStepFused(size_t esize, const U128& a, const U128& b) { switch (esize) { + case 16: + return Inst(Opcode::FPVectorRecipStepFused16, a, b); case 32: return Inst(Opcode::FPVectorRecipStepFused32, a, b); case 64: diff --git a/src/frontend/ir/ir_emitter.h b/src/frontend/ir/ir_emitter.h index 27b02776..6fe44dab 100644 --- a/src/frontend/ir/ir_emitter.h +++ b/src/frontend/ir/ir_emitter.h @@ -305,9 +305,9 @@ public: U16U32U64 FPMulAdd(const U16U32U64& addend, const U16U32U64& op1, const U16U32U64& op2, bool fpcr_controlled); U32U64 FPMulX(const U32U64& a, const U32U64& b); U16U32U64 FPNeg(const U16U32U64& a); - U32U64 FPRecipEstimate(const U32U64& a); + U16U32U64 FPRecipEstimate(const U16U32U64& a); U16U32U64 FPRecipExponent(const U16U32U64& a); - U32U64 FPRecipStepFused(const U32U64& a, const U32U64& b); + U16U32U64 FPRecipStepFused(const U16U32U64& a, const U16U32U64& b); U16U32U64 FPRoundInt(const U16U32U64& a, FP::RoundingMode rounding, bool exact); U16U32U64 FPRSqrtEstimate(const U16U32U64& a); U32U64 FPRSqrtStepFused(const U32U64& a, const U32U64& b); diff --git a/src/frontend/ir/microinstruction.cpp b/src/frontend/ir/microinstruction.cpp index bd4bb02c..3577fee7 100644 --- a/src/frontend/ir/microinstruction.cpp +++ b/src/frontend/ir/microinstruction.cpp @@ -272,11 +272,13 @@ bool Inst::ReadsFromAndWritesToFPSRCumulativeExceptionBits() const { case Opcode::FPMulAdd16: case Opcode::FPMulAdd32: case Opcode::FPMulAdd64: + case Opcode::FPRecipEstimate16: case Opcode::FPRecipEstimate32: case Opcode::FPRecipEstimate64: case Opcode::FPRecipExponent16: case Opcode::FPRecipExponent32: case Opcode::FPRecipExponent64: + case Opcode::FPRecipStepFused16: case Opcode::FPRecipStepFused32: case Opcode::FPRecipStepFused64: case Opcode::FPRoundInt16: @@ -336,8 +338,10 @@ bool Inst::ReadsFromAndWritesToFPSRCumulativeExceptionBits() const { case Opcode::FPVectorPairedAddLower64: case Opcode::FPVectorPairedAdd32: case Opcode::FPVectorPairedAdd64: + case Opcode::FPVectorRecipEstimate16: case Opcode::FPVectorRecipEstimate32: case Opcode::FPVectorRecipEstimate64: + case Opcode::FPVectorRecipStepFused16: case Opcode::FPVectorRecipStepFused32: case Opcode::FPVectorRecipStepFused64: case Opcode::FPVectorRoundInt16: diff --git a/src/frontend/ir/opcodes.inc b/src/frontend/ir/opcodes.inc index e1aeec4a..0adab017 100644 --- a/src/frontend/ir/opcodes.inc +++ b/src/frontend/ir/opcodes.inc @@ -491,11 +491,13 @@ OPCODE(FPMulX64, U64, U64, OPCODE(FPNeg16, U16, U16 ) OPCODE(FPNeg32, U32, U32 ) OPCODE(FPNeg64, U64, U64 ) +OPCODE(FPRecipEstimate16, U16, U16 ) OPCODE(FPRecipEstimate32, U32, U32 ) OPCODE(FPRecipEstimate64, U64, U64 ) OPCODE(FPRecipExponent16, U16, U16 ) OPCODE(FPRecipExponent32, U32, U32 ) OPCODE(FPRecipExponent64, U64, U64 ) +OPCODE(FPRecipStepFused16, U16, U16, U16 ) OPCODE(FPRecipStepFused32, U32, U32, U32 ) OPCODE(FPRecipStepFused64, U64, U64, U64 ) OPCODE(FPRoundInt16, U16, U16, U8, U1 ) @@ -571,8 +573,10 @@ OPCODE(FPVectorPairedAdd32, U128, U128 OPCODE(FPVectorPairedAdd64, U128, U128, U128 ) OPCODE(FPVectorPairedAddLower32, U128, U128, U128 ) OPCODE(FPVectorPairedAddLower64, U128, U128, U128 ) +OPCODE(FPVectorRecipEstimate16, U128, U128 ) OPCODE(FPVectorRecipEstimate32, U128, U128 ) OPCODE(FPVectorRecipEstimate64, U128, U128 ) +OPCODE(FPVectorRecipStepFused16, U128, U128, U128 ) OPCODE(FPVectorRecipStepFused32, U128, U128, U128 ) OPCODE(FPVectorRecipStepFused64, U128, U128, U128 ) OPCODE(FPVectorRoundInt16, U128, U128, U8, U1 )