From cf188448d4eaabfc0d54927e313312017cc735cf Mon Sep 17 00:00:00 2001 From: Lioncash Date: Wed, 23 May 2018 16:28:29 -0400 Subject: [PATCH] emit_x64_vector: Vectorize fallback case in EmitVectorMultiply64() Gets rid of the need to perform a fallback. --- src/backend_x64/emit_x64_vector.cpp | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/src/backend_x64/emit_x64_vector.cpp b/src/backend_x64/emit_x64_vector.cpp index 240b0d24..ef9cab86 100644 --- a/src/backend_x64/emit_x64_vector.cpp +++ b/src/backend_x64/emit_x64_vector.cpp @@ -1464,8 +1464,9 @@ void EmitX64::EmitVectorMultiply64(EmitContext& ctx, IR::Inst* inst) { return; } + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]); Xbyak::Xmm b = ctx.reg_alloc.UseXmm(args[1]); Xbyak::Reg64 tmp1 = ctx.reg_alloc.ScratchGpr(); @@ -1484,9 +1485,28 @@ void EmitX64::EmitVectorMultiply64(EmitContext& ctx, IR::Inst* inst) { return; } - EmitTwoArgumentFallback(code, ctx, inst, [](std::array& result, const std::array& a, const std::array& b) { - std::transform(a.begin(), a.end(), b.begin(), result.begin(), std::multiplies<>()); - }); + const Xbyak::Xmm a = ctx.reg_alloc.UseXmm(args[0]); + const Xbyak::Xmm b = ctx.reg_alloc.UseScratchXmm(args[1]); + const Xbyak::Xmm tmp1 = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Xmm tmp2 = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Xmm tmp3 = ctx.reg_alloc.ScratchXmm(); + + code.movdqa(tmp1, a); + code.movdqa(tmp2, a); + code.movdqa(tmp3, b); + + code.psrlq(tmp1, 32); + code.psrlq(tmp3, 32); + + code.pmuludq(tmp2, b); + code.pmuludq(tmp3, a); + code.pmuludq(b, tmp1); + + code.paddq(b, tmp3); + code.psllq(b, 32); + code.paddq(tmp2, b); + + ctx.reg_alloc.DefineValue(inst, tmp2); } void EmitX64::EmitVectorNarrow16(EmitContext& ctx, IR::Inst* inst) {