Revert "block_of_code: Refactor MConst
to Xmm{B}Const
"
This reverts commit 5d9b720189
.
Obscure bugs resulting from this commit due to assumptions regarding zero-extension of higher bits.
This commit is contained in:
parent
fbdcfeab99
commit
f33c6f062b
9 changed files with 205 additions and 200 deletions
|
@ -393,7 +393,7 @@ void BlockOfCode::LookupBlock() {
|
||||||
cb.LookupBlock->EmitCall(*this);
|
cb.LookupBlock->EmitCall(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
Xbyak::Address BlockOfCode::XmmConst(const Xbyak::AddressFrame& frame, u64 lower, u64 upper) {
|
Xbyak::Address BlockOfCode::MConst(const Xbyak::AddressFrame& frame, u64 lower, u64 upper) {
|
||||||
return constant_pool.GetConstant(frame, lower, upper);
|
return constant_pool.GetConstant(frame, lower, upper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,6 @@
|
||||||
#include "dynarmic/backend/x64/jitstate_info.h"
|
#include "dynarmic/backend/x64/jitstate_info.h"
|
||||||
#include "dynarmic/common/cast_util.h"
|
#include "dynarmic/common/cast_util.h"
|
||||||
#include "dynarmic/interface/halt_reason.h"
|
#include "dynarmic/interface/halt_reason.h"
|
||||||
#include "mcl/bit/bit_field.hpp"
|
|
||||||
|
|
||||||
namespace Dynarmic::Backend::X64 {
|
namespace Dynarmic::Backend::X64 {
|
||||||
|
|
||||||
|
@ -117,13 +116,7 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Xbyak::Address XmmConst(const Xbyak::AddressFrame& frame, u64 lower, u64 upper);
|
Xbyak::Address MConst(const Xbyak::AddressFrame& frame, u64 lower, u64 upper = 0);
|
||||||
|
|
||||||
template<size_t esize>
|
|
||||||
Xbyak::Address XmmBConst(const Xbyak::AddressFrame& frame, u64 value) {
|
|
||||||
return XmmConst(frame, mcl::bit::replicate_element<u64>(esize, value),
|
|
||||||
mcl::bit::replicate_element<u64>(esize, value));
|
|
||||||
}
|
|
||||||
|
|
||||||
CodePtr GetCodeBegin() const;
|
CodePtr GetCodeBegin() const;
|
||||||
size_t GetTotalCodeSize() const;
|
size_t GetTotalCodeSize() const;
|
||||||
|
|
|
@ -42,7 +42,7 @@ static void EmitCRC32ISO(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, co
|
||||||
const Xbyak::Xmm xmm_const = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm xmm_const = ctx.reg_alloc.ScratchXmm();
|
||||||
const Xbyak::Xmm xmm_tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm xmm_tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movdqa(xmm_const, code.XmmConst(xword, 0xb4e5b025'f7011641, 0x00000001'DB710641));
|
code.movdqa(xmm_const, code.MConst(xword, 0xb4e5b025'f7011641, 0x00000001'DB710641));
|
||||||
|
|
||||||
code.movzx(value.cvt32(), value.changeBit(data_size));
|
code.movzx(value.cvt32(), value.changeBit(data_size));
|
||||||
code.xor_(value.cvt32(), crc);
|
code.xor_(value.cvt32(), crc);
|
||||||
|
@ -72,7 +72,7 @@ static void EmitCRC32ISO(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, co
|
||||||
const Xbyak::Xmm xmm_value = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm xmm_value = ctx.reg_alloc.ScratchXmm();
|
||||||
const Xbyak::Xmm xmm_const = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm xmm_const = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movdqa(xmm_const, code.XmmConst(xword, 0xb4e5b025'f7011641, 0x00000001'DB710641));
|
code.movdqa(xmm_const, code.MConst(xword, 0xb4e5b025'f7011641, 0x00000001'DB710641));
|
||||||
|
|
||||||
code.xor_(crc, value);
|
code.xor_(crc, value);
|
||||||
code.shl(crc.cvt64(), 32);
|
code.shl(crc.cvt64(), 32);
|
||||||
|
@ -93,7 +93,7 @@ static void EmitCRC32ISO(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, co
|
||||||
const Xbyak::Xmm xmm_value = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm xmm_value = ctx.reg_alloc.ScratchXmm();
|
||||||
const Xbyak::Xmm xmm_const = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm xmm_const = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movdqa(xmm_const, code.XmmConst(xword, 0xb4e5b025'f7011641, 0x00000001'DB710641));
|
code.movdqa(xmm_const, code.MConst(xword, 0xb4e5b025'f7011641, 0x00000001'DB710641));
|
||||||
|
|
||||||
code.mov(crc, crc);
|
code.mov(crc, crc);
|
||||||
code.xor_(crc.cvt64(), value);
|
code.xor_(crc.cvt64(), value);
|
||||||
|
|
|
@ -90,9 +90,10 @@ void DenormalsAreZero(BlockOfCode& code, EmitContext& ctx, std::initializer_list
|
||||||
FpFixup::Norm_Src,
|
FpFixup::Norm_Src,
|
||||||
FpFixup::Norm_Src,
|
FpFixup::Norm_Src,
|
||||||
FpFixup::Norm_Src);
|
FpFixup::Norm_Src);
|
||||||
|
constexpr u64 denormal_to_zero64 = mcl::bit::replicate_element<fsize, u64>(denormal_to_zero);
|
||||||
|
|
||||||
const Xbyak::Xmm tmp = xmm16;
|
const Xbyak::Xmm tmp = xmm16;
|
||||||
FCODE(vmovap)(tmp, code.XmmBConst<fsize>(xword, denormal_to_zero));
|
FCODE(vmovap)(tmp, code.MConst(xword, u64(denormal_to_zero64), u64(denormal_to_zero64)));
|
||||||
|
|
||||||
for (const Xbyak::Xmm& xmm : to_daz) {
|
for (const Xbyak::Xmm& xmm : to_daz) {
|
||||||
FCODE(vfixupimms)(xmm, xmm, tmp, u8(0));
|
FCODE(vfixupimms)(xmm, xmm, tmp, u8(0));
|
||||||
|
@ -101,17 +102,17 @@ void DenormalsAreZero(BlockOfCode& code, EmitContext& ctx, std::initializer_list
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const Xbyak::Xmm& xmm : to_daz) {
|
for (const Xbyak::Xmm& xmm : to_daz) {
|
||||||
code.movaps(xmm0, code.XmmBConst<fsize>(xword, fsize == 32 ? f32_non_sign_mask : f64_non_sign_mask));
|
code.movaps(xmm0, code.MConst(xword, fsize == 32 ? f32_non_sign_mask : f64_non_sign_mask));
|
||||||
code.andps(xmm0, xmm);
|
code.andps(xmm0, xmm);
|
||||||
if constexpr (fsize == 32) {
|
if constexpr (fsize == 32) {
|
||||||
code.pcmpgtd(xmm0, code.XmmBConst<32>(xword, f32_smallest_normal - 1));
|
code.pcmpgtd(xmm0, code.MConst(xword, f32_smallest_normal - 1));
|
||||||
} else if (code.HasHostFeature(HostFeature::SSE42)) {
|
} else if (code.HasHostFeature(HostFeature::SSE42)) {
|
||||||
code.pcmpgtq(xmm0, code.XmmBConst<64>(xword, f64_smallest_normal - 1));
|
code.pcmpgtq(xmm0, code.MConst(xword, f64_smallest_normal - 1));
|
||||||
} else {
|
} else {
|
||||||
code.pcmpgtd(xmm0, code.XmmBConst<64>(xword, f64_smallest_normal - 1));
|
code.pcmpgtd(xmm0, code.MConst(xword, f64_smallest_normal - 1));
|
||||||
code.pshufd(xmm0, xmm0, 0b11100101);
|
code.pshufd(xmm0, xmm0, 0b11100101);
|
||||||
}
|
}
|
||||||
code.orps(xmm0, code.XmmBConst<fsize>(xword, fsize == 32 ? f32_negative_zero : f64_negative_zero));
|
code.orps(xmm0, code.MConst(xword, fsize == 32 ? f32_negative_zero : f64_negative_zero));
|
||||||
code.andps(xmm, xmm0);
|
code.andps(xmm, xmm0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -122,7 +123,7 @@ void ZeroIfNaN(BlockOfCode& code, Xbyak::Xmm xmm_value, Xbyak::Xmm xmm_scratch)
|
||||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||||
constexpr u32 nan_to_zero = FixupLUT(FpFixup::PosZero,
|
constexpr u32 nan_to_zero = FixupLUT(FpFixup::PosZero,
|
||||||
FpFixup::PosZero);
|
FpFixup::PosZero);
|
||||||
FCODE(vfixupimms)(xmm_value, xmm_value, code.XmmBConst<32>(ptr, nan_to_zero), u8(0));
|
FCODE(vfixupimms)(xmm_value, xmm_value, code.MConst(ptr, u64(nan_to_zero)), u8(0));
|
||||||
} else if (code.HasHostFeature(HostFeature::AVX)) {
|
} else if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
FCODE(vcmpords)(xmm_scratch, xmm_value, xmm_value);
|
FCODE(vcmpords)(xmm_scratch, xmm_value, xmm_value);
|
||||||
FCODE(vandp)(xmm_value, xmm_value, xmm_scratch);
|
FCODE(vandp)(xmm_value, xmm_value, xmm_scratch);
|
||||||
|
@ -138,15 +139,15 @@ void ForceToDefaultNaN(BlockOfCode& code, Xbyak::Xmm result) {
|
||||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||||
const Xbyak::Opmask nan_mask = k1;
|
const Xbyak::Opmask nan_mask = k1;
|
||||||
FCODE(vfpclasss)(nan_mask, result, u8(FpClass::QNaN | FpClass::SNaN));
|
FCODE(vfpclasss)(nan_mask, result, u8(FpClass::QNaN | FpClass::SNaN));
|
||||||
FCODE(vblendmp)(result | nan_mask, result, code.XmmBConst<fsize>(ptr_b, fsize == 32 ? f32_nan : f64_nan));
|
FCODE(vblendmp)(result | nan_mask, result, code.MConst(ptr_b, fsize == 32 ? f32_nan : f64_nan));
|
||||||
} else if (code.HasHostFeature(HostFeature::AVX)) {
|
} else if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
FCODE(vcmpunords)(xmm0, result, result);
|
FCODE(vcmpunords)(xmm0, result, result);
|
||||||
FCODE(blendvp)(result, code.XmmBConst<fsize>(xword, fsize == 32 ? f32_nan : f64_nan));
|
FCODE(blendvp)(result, code.MConst(xword, fsize == 32 ? f32_nan : f64_nan));
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Label end;
|
Xbyak::Label end;
|
||||||
FCODE(ucomis)(result, result);
|
FCODE(ucomis)(result, result);
|
||||||
code.jnp(end);
|
code.jnp(end);
|
||||||
code.movaps(result, code.XmmBConst<fsize>(xword, fsize == 32 ? f32_nan : f64_nan));
|
code.movaps(result, code.MConst(xword, fsize == 32 ? f32_nan : f64_nan));
|
||||||
code.L(end);
|
code.L(end);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -160,7 +161,7 @@ SharedLabel ProcessNaN(BlockOfCode& code, EmitContext& ctx, Xbyak::Xmm a) {
|
||||||
|
|
||||||
ctx.deferred_emits.emplace_back([=, &code] {
|
ctx.deferred_emits.emplace_back([=, &code] {
|
||||||
code.L(*nan);
|
code.L(*nan);
|
||||||
code.orps(a, code.XmmBConst<fsize>(xword, fsize == 32 ? 0x00400000 : 0x0008'0000'0000'0000));
|
code.orps(a, code.MConst(xword, fsize == 32 ? 0x00400000 : 0x0008'0000'0000'0000));
|
||||||
code.jmp(*end, code.T_NEAR);
|
code.jmp(*end, code.T_NEAR);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -256,10 +257,10 @@ void EmitPostProcessNaNs(BlockOfCode& code, Xbyak::Xmm result, Xbyak::Xmm op1, X
|
||||||
|
|
||||||
// Silence the SNaN as required by spec.
|
// Silence the SNaN as required by spec.
|
||||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vorps(result, op2, code.XmmBConst<fsize>(xword, mantissa_msb));
|
code.vorps(result, op2, code.MConst(xword, mantissa_msb));
|
||||||
} else {
|
} else {
|
||||||
code.movaps(result, op2);
|
code.movaps(result, op2);
|
||||||
code.orps(result, code.XmmBConst<fsize>(xword, mantissa_msb));
|
code.orps(result, code.MConst(xword, mantissa_msb));
|
||||||
}
|
}
|
||||||
code.jmp(end, code.T_NEAR);
|
code.jmp(end, code.T_NEAR);
|
||||||
}
|
}
|
||||||
|
@ -340,7 +341,7 @@ void FPThreeOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Function fn)
|
||||||
FCODE(ucomis)(op1, op2);
|
FCODE(ucomis)(op1, op2);
|
||||||
code.jp(op_are_nans);
|
code.jp(op_are_nans);
|
||||||
// Here we must return a positive NaN, because the indefinite value on x86 is a negative NaN!
|
// Here we must return a positive NaN, because the indefinite value on x86 is a negative NaN!
|
||||||
code.movaps(result, code.XmmBConst<fsize>(xword, FP::FPInfo<FPT>::DefaultNaN()));
|
code.movaps(result, code.MConst(xword, FP::FPInfo<FPT>::DefaultNaN()));
|
||||||
code.jmp(*end, code.T_NEAR);
|
code.jmp(*end, code.T_NEAR);
|
||||||
code.L(op_are_nans);
|
code.L(op_are_nans);
|
||||||
EmitPostProcessNaNs<fsize>(code, result, op1, op2, tmp, *end);
|
EmitPostProcessNaNs<fsize>(code, result, op1, op2, tmp, *end);
|
||||||
|
@ -358,7 +359,7 @@ void FPAbs(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Address mask = code.XmmBConst<fsize>(xword, non_sign_mask);
|
const Xbyak::Address mask = code.MConst(xword, non_sign_mask);
|
||||||
|
|
||||||
code.andps(result, mask);
|
code.andps(result, mask);
|
||||||
|
|
||||||
|
@ -384,7 +385,7 @@ void FPNeg(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Address mask = code.XmmBConst<fsize>(xword, u64(sign_mask));
|
const Xbyak::Address mask = code.MConst(xword, u64(sign_mask));
|
||||||
|
|
||||||
code.xorps(result, mask);
|
code.xorps(result, mask);
|
||||||
|
|
||||||
|
@ -455,7 +456,7 @@ static void EmitFPMinMax(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
code.L(nan);
|
code.L(nan);
|
||||||
if (ctx.FPCR().DN()) {
|
if (ctx.FPCR().DN()) {
|
||||||
code.movaps(result, code.XmmBConst<fsize>(xword, fsize == 32 ? f32_nan : f64_nan));
|
code.movaps(result, code.MConst(xword, fsize == 32 ? f32_nan : f64_nan));
|
||||||
code.jmp(*end);
|
code.jmp(*end);
|
||||||
} else {
|
} else {
|
||||||
code.movaps(tmp, result);
|
code.movaps(tmp, result);
|
||||||
|
@ -487,7 +488,7 @@ static void EmitFPMinMaxNumeric(BlockOfCode& code, EmitContext& ctx, IR::Inst* i
|
||||||
|
|
||||||
if (ctx.FPCR().DN()) {
|
if (ctx.FPCR().DN()) {
|
||||||
FCODE(vcmps)(k1, op2, op2, Cmp::Unordered_Q);
|
FCODE(vcmps)(k1, op2, op2, Cmp::Unordered_Q);
|
||||||
FCODE(vmovs)(op2 | k1, code.XmmBConst<fsize>(xword, default_nan));
|
FCODE(vmovs)(op2 | k1, code.MConst(xword, default_nan));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Reg tmp = ctx.reg_alloc.ScratchGpr();
|
Xbyak::Reg tmp = ctx.reg_alloc.ScratchGpr();
|
||||||
|
@ -544,12 +545,12 @@ static void EmitFPMinMaxNumeric(BlockOfCode& code, EmitContext& ctx, IR::Inst* i
|
||||||
code.jc(maybe_both_nan);
|
code.jc(maybe_both_nan);
|
||||||
if (ctx.FPCR().DN()) {
|
if (ctx.FPCR().DN()) {
|
||||||
code.L(snan);
|
code.L(snan);
|
||||||
code.movaps(op2, code.XmmBConst<fsize>(xword, default_nan));
|
code.movaps(op2, code.MConst(xword, default_nan));
|
||||||
code.jmp(*end);
|
code.jmp(*end);
|
||||||
} else {
|
} else {
|
||||||
code.movaps(op2, op1);
|
code.movaps(op2, op1);
|
||||||
code.L(snan);
|
code.L(snan);
|
||||||
code.orps(op2, code.XmmBConst<fsize>(xword, FP::FPInfo<FPT>::mantissa_msb));
|
code.orps(op2, code.MConst(xword, FP::FPInfo<FPT>::mantissa_msb));
|
||||||
code.jmp(*end);
|
code.jmp(*end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -649,9 +650,9 @@ static void EmitFPMulAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.movaps(result, operand1);
|
code.movaps(result, operand1);
|
||||||
FCODE(vfmadd231s)(result, operand2, operand3);
|
FCODE(vfmadd231s)(result, operand2, operand3);
|
||||||
|
|
||||||
code.movaps(tmp, code.XmmBConst<fsize>(xword, fsize == 32 ? f32_non_sign_mask : f64_non_sign_mask));
|
code.movaps(tmp, code.MConst(xword, fsize == 32 ? f32_non_sign_mask : f64_non_sign_mask));
|
||||||
code.andps(tmp, result);
|
code.andps(tmp, result);
|
||||||
FCODE(ucomis)(tmp, code.XmmBConst<fsize>(xword, fsize == 32 ? f32_smallest_normal : f64_smallest_normal));
|
FCODE(ucomis)(tmp, code.MConst(xword, fsize == 32 ? f32_smallest_normal : f64_smallest_normal));
|
||||||
code.jz(*fallback, code.T_NEAR);
|
code.jz(*fallback, code.T_NEAR);
|
||||||
code.L(*end);
|
code.L(*end);
|
||||||
|
|
||||||
|
@ -761,12 +762,12 @@ static void EmitFPMulX(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.movaps(result, op1);
|
code.movaps(result, op1);
|
||||||
code.xorps(result, op2);
|
code.xorps(result, op2);
|
||||||
}
|
}
|
||||||
code.andps(result, code.XmmBConst<fsize>(xword, FP::FPInfo<FPT>::sign_mask));
|
code.andps(result, code.MConst(xword, FP::FPInfo<FPT>::sign_mask));
|
||||||
code.orps(result, code.XmmBConst<fsize>(xword, FP::FPValue<FPT, false, 0, 2>()));
|
code.orps(result, code.MConst(xword, FP::FPValue<FPT, false, 0, 2>()));
|
||||||
code.jmp(*end, code.T_NEAR);
|
code.jmp(*end, code.T_NEAR);
|
||||||
code.L(op_are_nans);
|
code.L(op_are_nans);
|
||||||
if (do_default_nan) {
|
if (do_default_nan) {
|
||||||
code.movaps(result, code.XmmBConst<fsize>(xword, FP::FPInfo<FPT>::DefaultNaN()));
|
code.movaps(result, code.MConst(xword, FP::FPInfo<FPT>::DefaultNaN()));
|
||||||
code.jmp(*end, code.T_NEAR);
|
code.jmp(*end, code.T_NEAR);
|
||||||
} else {
|
} else {
|
||||||
EmitPostProcessNaNs<fsize>(code, result, op1, op2, tmp, *end);
|
EmitPostProcessNaNs<fsize>(code, result, op1, op2, tmp, *end);
|
||||||
|
@ -867,7 +868,7 @@ static void EmitFPRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst*
|
||||||
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movaps(result, code.XmmBConst<fsize>(xword, FP::FPValue<FPT, false, 0, 2>()));
|
code.movaps(result, code.MConst(xword, FP::FPValue<FPT, false, 0, 2>()));
|
||||||
FCODE(vfnmadd231s)(result, operand1, operand2);
|
FCODE(vfnmadd231s)(result, operand1, operand2);
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -881,7 +882,7 @@ static void EmitFPRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst*
|
||||||
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movaps(result, code.XmmBConst<fsize>(xword, FP::FPValue<FPT, false, 0, 2>()));
|
code.movaps(result, code.MConst(xword, FP::FPValue<FPT, false, 0, 2>()));
|
||||||
FCODE(vfnmadd231s)(result, operand1, operand2);
|
FCODE(vfnmadd231s)(result, operand1, operand2);
|
||||||
FCODE(ucomis)(result, result);
|
FCODE(ucomis)(result, result);
|
||||||
code.jp(*fallback, code.T_NEAR);
|
code.jp(*fallback, code.T_NEAR);
|
||||||
|
@ -913,7 +914,7 @@ static void EmitFPRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst*
|
||||||
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movaps(result, code.XmmBConst<fsize>(xword, FP::FPValue<FPT, false, 0, 2>()));
|
code.movaps(result, code.MConst(xword, FP::FPValue<FPT, false, 0, 2>()));
|
||||||
FCODE(muls)(operand1, operand2);
|
FCODE(muls)(operand1, operand2);
|
||||||
FCODE(subs)(result, operand1);
|
FCODE(subs)(result, operand1);
|
||||||
|
|
||||||
|
@ -1042,19 +1043,19 @@ static void EmitFPRSqrtEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* i
|
||||||
|
|
||||||
code.movaps(value, operand);
|
code.movaps(value, operand);
|
||||||
|
|
||||||
code.movaps(xmm0, code.XmmBConst<fsize>(xword, fsize == 32 ? 0xFFFF8000 : 0xFFFF'F000'0000'0000));
|
code.movaps(xmm0, code.MConst(xword, fsize == 32 ? 0xFFFF8000 : 0xFFFF'F000'0000'0000));
|
||||||
code.pand(value, xmm0);
|
code.pand(value, xmm0);
|
||||||
code.por(value, code.XmmBConst<fsize>(xword, fsize == 32 ? 0x00008000 : 0x0000'1000'0000'0000));
|
code.por(value, code.MConst(xword, fsize == 32 ? 0x00008000 : 0x0000'1000'0000'0000));
|
||||||
|
|
||||||
// Detect NaNs, negatives, zeros, denormals and infinities
|
// Detect NaNs, negatives, zeros, denormals and infinities
|
||||||
FCODE(ucomis)(value, code.XmmBConst<fsize>(xword, FPT(1) << FP::FPInfo<FPT>::explicit_mantissa_width));
|
FCODE(ucomis)(value, code.MConst(xword, FPT(1) << FP::FPInfo<FPT>::explicit_mantissa_width));
|
||||||
code.jna(*bad_values, code.T_NEAR);
|
code.jna(*bad_values, code.T_NEAR);
|
||||||
|
|
||||||
FCODE(sqrts)(value, value);
|
FCODE(sqrts)(value, value);
|
||||||
ICODE(mov)(result, code.XmmBConst<fsize>(xword, FP::FPValue<FPT, false, 0, 1>()));
|
ICODE(mov)(result, code.MConst(xword, FP::FPValue<FPT, false, 0, 1>()));
|
||||||
FCODE(divs)(result, value);
|
FCODE(divs)(result, value);
|
||||||
|
|
||||||
ICODE(padd)(result, code.XmmBConst<fsize>(xword, fsize == 32 ? 0x00004000 : 0x0000'0800'0000'0000));
|
ICODE(padd)(result, code.MConst(xword, fsize == 32 ? 0x00004000 : 0x0000'0800'0000'0000));
|
||||||
code.pand(result, xmm0);
|
code.pand(result, xmm0);
|
||||||
|
|
||||||
code.L(*end);
|
code.L(*end);
|
||||||
|
@ -1095,7 +1096,7 @@ static void EmitFPRSqrtEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* i
|
||||||
}
|
}
|
||||||
|
|
||||||
code.L(default_nan);
|
code.L(default_nan);
|
||||||
code.movd(result, code.XmmBConst<32>(xword, 0x7FC00000));
|
code.movd(result, code.MConst(xword, 0x7FC00000));
|
||||||
code.jmp(*end, code.T_NEAR);
|
code.jmp(*end, code.T_NEAR);
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Label nan, zero;
|
Xbyak::Label nan, zero;
|
||||||
|
@ -1124,26 +1125,26 @@ static void EmitFPRSqrtEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* i
|
||||||
|
|
||||||
code.L(zero);
|
code.L(zero);
|
||||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vpor(result, value, code.XmmBConst<64>(xword, 0x7FF0'0000'0000'0000));
|
code.vpor(result, value, code.MConst(xword, 0x7FF0'0000'0000'0000));
|
||||||
} else {
|
} else {
|
||||||
code.movaps(result, value);
|
code.movaps(result, value);
|
||||||
code.por(result, code.XmmBConst<64>(xword, 0x7FF0'0000'0000'0000));
|
code.por(result, code.MConst(xword, 0x7FF0'0000'0000'0000));
|
||||||
}
|
}
|
||||||
code.jmp(*end, code.T_NEAR);
|
code.jmp(*end, code.T_NEAR);
|
||||||
|
|
||||||
code.L(nan);
|
code.L(nan);
|
||||||
if (!ctx.FPCR().DN()) {
|
if (!ctx.FPCR().DN()) {
|
||||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vpor(result, operand, code.XmmBConst<64>(xword, 0x0008'0000'0000'0000));
|
code.vpor(result, operand, code.MConst(xword, 0x0008'0000'0000'0000));
|
||||||
} else {
|
} else {
|
||||||
code.movaps(result, operand);
|
code.movaps(result, operand);
|
||||||
code.por(result, code.XmmBConst<64>(xword, 0x0008'0000'0000'0000));
|
code.por(result, code.MConst(xword, 0x0008'0000'0000'0000));
|
||||||
}
|
}
|
||||||
code.jmp(*end, code.T_NEAR);
|
code.jmp(*end, code.T_NEAR);
|
||||||
}
|
}
|
||||||
|
|
||||||
code.L(default_nan);
|
code.L(default_nan);
|
||||||
code.movq(result, code.XmmBConst<64>(xword, 0x7FF8'0000'0000'0000));
|
code.movq(result, code.MConst(xword, 0x7FF8'0000'0000'0000));
|
||||||
code.jmp(*end, code.T_NEAR);
|
code.jmp(*end, code.T_NEAR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1196,9 +1197,9 @@ static void EmitFPRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst*
|
||||||
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.vmovaps(result, code.XmmBConst<fsize>(xword, FP::FPValue<FPT, false, 0, 3>()));
|
code.vmovaps(result, code.MConst(xword, FP::FPValue<FPT, false, 0, 3>()));
|
||||||
FCODE(vfnmadd231s)(result, operand1, operand2);
|
FCODE(vfnmadd231s)(result, operand1, operand2);
|
||||||
FCODE(vmuls)(result, result, code.XmmBConst<fsize>(xword, FP::FPValue<FPT, false, -1, 1>()));
|
FCODE(vmuls)(result, result, code.MConst(xword, FP::FPValue<FPT, false, -1, 1>()));
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
return;
|
return;
|
||||||
|
@ -1211,7 +1212,7 @@ static void EmitFPRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst*
|
||||||
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.vmovaps(result, code.XmmBConst<fsize>(xword, FP::FPValue<FPT, false, 0, 3>()));
|
code.vmovaps(result, code.MConst(xword, FP::FPValue<FPT, false, 0, 3>()));
|
||||||
FCODE(vfnmadd231s)(result, operand1, operand2);
|
FCODE(vfnmadd231s)(result, operand1, operand2);
|
||||||
|
|
||||||
// Detect if the intermediate result is infinity or NaN or nearly an infinity.
|
// Detect if the intermediate result is infinity or NaN or nearly an infinity.
|
||||||
|
@ -1226,7 +1227,7 @@ static void EmitFPRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst*
|
||||||
|
|
||||||
code.jae(*fallback, code.T_NEAR);
|
code.jae(*fallback, code.T_NEAR);
|
||||||
|
|
||||||
FCODE(vmuls)(result, result, code.XmmBConst<fsize>(xword, FP::FPValue<FPT, false, -1, 1>()));
|
FCODE(vmuls)(result, result, code.MConst(xword, FP::FPValue<FPT, false, -1, 1>()));
|
||||||
code.L(*end);
|
code.L(*end);
|
||||||
|
|
||||||
ctx.deferred_emits.emplace_back([=, &code, &ctx] {
|
ctx.deferred_emits.emplace_back([=, &code, &ctx] {
|
||||||
|
@ -1255,10 +1256,10 @@ static void EmitFPRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst*
|
||||||
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movaps(result, code.XmmBConst<fsize>(xword, FP::FPValue<FPT, false, 0, 3>()));
|
code.movaps(result, code.MConst(xword, FP::FPValue<FPT, false, 0, 3>()));
|
||||||
FCODE(muls)(operand1, operand2);
|
FCODE(muls)(operand1, operand2);
|
||||||
FCODE(subs)(result, operand1);
|
FCODE(subs)(result, operand1);
|
||||||
FCODE(muls)(result, code.XmmBConst<fsize>(xword, FP::FPValue<FPT, false, -1, 1>()));
|
FCODE(muls)(result, code.MConst(xword, FP::FPValue<FPT, false, -1, 1>()));
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, operand1);
|
ctx.reg_alloc.DefineValue(inst, operand1);
|
||||||
return;
|
return;
|
||||||
|
@ -1510,7 +1511,7 @@ static void EmitFPToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
if constexpr (fsize == 64) {
|
if constexpr (fsize == 64) {
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u64 scale_factor = static_cast<u64>((fbits + 1023) << 52);
|
const u64 scale_factor = static_cast<u64>((fbits + 1023) << 52);
|
||||||
code.mulsd(src, code.XmmBConst<64>(xword, scale_factor));
|
code.mulsd(src, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!truncating) {
|
if (!truncating) {
|
||||||
|
@ -1519,7 +1520,7 @@ static void EmitFPToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
} else {
|
} else {
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u32 scale_factor = static_cast<u32>((fbits + 127) << 23);
|
const u32 scale_factor = static_cast<u32>((fbits + 127) << 23);
|
||||||
code.mulss(src, code.XmmBConst<32>(xword, scale_factor));
|
code.mulss(src, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!truncating) {
|
if (!truncating) {
|
||||||
|
@ -1537,7 +1538,7 @@ static void EmitFPToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
ZeroIfNaN<64>(code, src, scratch);
|
ZeroIfNaN<64>(code, src, scratch);
|
||||||
|
|
||||||
code.movsd(scratch, code.XmmBConst<64>(xword, f64_max_s64_lim));
|
code.movsd(scratch, code.MConst(xword, f64_max_s64_lim));
|
||||||
code.comisd(scratch, src);
|
code.comisd(scratch, src);
|
||||||
code.jna(*saturate_max, code.T_NEAR);
|
code.jna(*saturate_max, code.T_NEAR);
|
||||||
code.cvttsd2si(result, src); // 64 bit gpr
|
code.cvttsd2si(result, src); // 64 bit gpr
|
||||||
|
@ -1556,7 +1557,7 @@ static void EmitFPToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.pxor(xmm0, xmm0);
|
code.pxor(xmm0, xmm0);
|
||||||
|
|
||||||
code.movaps(scratch, src);
|
code.movaps(scratch, src);
|
||||||
code.subsd(scratch, code.XmmBConst<64>(xword, f64_max_s64_lim));
|
code.subsd(scratch, code.MConst(xword, f64_max_s64_lim));
|
||||||
|
|
||||||
// these both result in zero if src/scratch are NaN
|
// these both result in zero if src/scratch are NaN
|
||||||
code.maxsd(src, xmm0);
|
code.maxsd(src, xmm0);
|
||||||
|
@ -1578,21 +1579,21 @@ static void EmitFPToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm scratch = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm scratch = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
ZeroIfNaN<64>(code, src, scratch);
|
ZeroIfNaN<64>(code, src, scratch);
|
||||||
code.minsd(src, code.XmmBConst<64>(xword, f64_max_s32));
|
code.minsd(src, code.MConst(xword, f64_max_s32));
|
||||||
// maxsd not required as cvttsd2si results in 0x8000'0000 when out of range
|
// maxsd not required as cvttsd2si results in 0x8000'0000 when out of range
|
||||||
code.cvttsd2si(result.cvt32(), src); // 32 bit gpr
|
code.cvttsd2si(result.cvt32(), src); // 32 bit gpr
|
||||||
} else {
|
} else {
|
||||||
code.pxor(xmm0, xmm0);
|
code.pxor(xmm0, xmm0);
|
||||||
code.maxsd(src, xmm0); // results in a zero if src is NaN
|
code.maxsd(src, xmm0); // results in a zero if src is NaN
|
||||||
code.minsd(src, code.XmmBConst<64>(xword, f64_max_u32));
|
code.minsd(src, code.MConst(xword, f64_max_u32));
|
||||||
code.cvttsd2si(result, src); // 64 bit gpr
|
code.cvttsd2si(result, src); // 64 bit gpr
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
const Xbyak::Xmm scratch = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm scratch = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
ZeroIfNaN<64>(code, src, scratch);
|
ZeroIfNaN<64>(code, src, scratch);
|
||||||
code.maxsd(src, code.XmmBConst<64>(xword, unsigned_ ? f64_min_u16 : f64_min_s16));
|
code.maxsd(src, code.MConst(xword, unsigned_ ? f64_min_u16 : f64_min_s16));
|
||||||
code.minsd(src, code.XmmBConst<64>(xword, unsigned_ ? f64_max_u16 : f64_max_s16));
|
code.minsd(src, code.MConst(xword, unsigned_ ? f64_max_u16 : f64_max_s16));
|
||||||
code.cvttsd2si(result, src); // 64 bit gpr
|
code.cvttsd2si(result, src); // 64 bit gpr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1717,7 +1718,7 @@ void EmitX64::EmitFPFixedS16ToSingle(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
||||||
code.mulss(result, code.XmmBConst<32>(xword, scale_factor));
|
code.mulss(result, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1737,7 +1738,7 @@ void EmitX64::EmitFPFixedU16ToSingle(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
||||||
code.mulss(result, code.XmmBConst<32>(xword, scale_factor));
|
code.mulss(result, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1762,7 +1763,7 @@ void EmitX64::EmitFPFixedS32ToSingle(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
||||||
code.mulss(result, code.XmmBConst<32>(xword, scale_factor));
|
code.mulss(result, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1798,7 +1799,7 @@ void EmitX64::EmitFPFixedU32ToSingle(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
||||||
code.mulss(result, code.XmmBConst<32>(xword, scale_factor));
|
code.mulss(result, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1818,7 +1819,7 @@ void EmitX64::EmitFPFixedS16ToDouble(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
||||||
code.mulsd(result, code.XmmBConst<64>(xword, scale_factor));
|
code.mulsd(result, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1838,7 +1839,7 @@ void EmitX64::EmitFPFixedU16ToDouble(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
||||||
code.mulsd(result, code.XmmBConst<64>(xword, scale_factor));
|
code.mulsd(result, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1856,7 +1857,7 @@ void EmitX64::EmitFPFixedS32ToDouble(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
||||||
code.mulsd(result, code.XmmBConst<64>(xword, scale_factor));
|
code.mulsd(result, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1881,7 +1882,7 @@ void EmitX64::EmitFPFixedU32ToDouble(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
||||||
code.mulsd(to, code.XmmBConst<64>(xword, scale_factor));
|
code.mulsd(to, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, to);
|
ctx.reg_alloc.DefineValue(inst, to);
|
||||||
|
@ -1900,7 +1901,7 @@ void EmitX64::EmitFPFixedS64ToDouble(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
||||||
code.mulsd(result, code.XmmBConst<64>(xword, scale_factor));
|
code.mulsd(result, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1919,7 +1920,7 @@ void EmitX64::EmitFPFixedS64ToSingle(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
||||||
code.mulss(result, code.XmmBConst<32>(xword, scale_factor));
|
code.mulss(result, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1940,18 +1941,18 @@ void EmitX64::EmitFPFixedU64ToDouble(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movq(tmp, from);
|
code.movq(tmp, from);
|
||||||
code.punpckldq(tmp, code.XmmConst(xword, 0x4530000043300000, 0));
|
code.punpckldq(tmp, code.MConst(xword, 0x4530000043300000, 0));
|
||||||
code.subpd(tmp, code.XmmConst(xword, 0x4330000000000000, 0x4530000000000000));
|
code.subpd(tmp, code.MConst(xword, 0x4330000000000000, 0x4530000000000000));
|
||||||
code.pshufd(result, tmp, 0b01001110);
|
code.pshufd(result, tmp, 0b01001110);
|
||||||
code.addpd(result, tmp);
|
code.addpd(result, tmp);
|
||||||
if (ctx.FPCR().RMode() == FP::RoundingMode::TowardsMinusInfinity) {
|
if (ctx.FPCR().RMode() == FP::RoundingMode::TowardsMinusInfinity) {
|
||||||
code.pand(result, code.XmmBConst<64>(xword, f64_non_sign_mask));
|
code.pand(result, code.MConst(xword, f64_non_sign_mask));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
const u64 scale_factor = static_cast<u64>((1023 - fbits) << 52);
|
||||||
code.mulsd(result, code.XmmBConst<64>(xword, scale_factor));
|
code.mulsd(result, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1995,7 +1996,7 @@ void EmitX64::EmitFPFixedU64ToSingle(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
if (fbits != 0) {
|
if (fbits != 0) {
|
||||||
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
const u32 scale_factor = static_cast<u32>((127 - fbits) << 23);
|
||||||
code.mulss(result, code.XmmBConst<32>(xword, scale_factor));
|
code.mulss(result, code.MConst(xword, scale_factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
|
|
@ -94,8 +94,8 @@ void EmitX64::EmitPackedAddU16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
// !(b <= a+b) == b > a+b
|
// !(b <= a+b) == b > a+b
|
||||||
code.movdqa(tmp_a, xmm_a);
|
code.movdqa(tmp_a, xmm_a);
|
||||||
code.movdqa(tmp_b, xmm_b);
|
code.movdqa(tmp_b, xmm_b);
|
||||||
code.paddw(tmp_a, code.XmmConst(xword, 0x80008000, 0));
|
code.paddw(tmp_a, code.MConst(xword, 0x80008000));
|
||||||
code.paddw(tmp_b, code.XmmConst(xword, 0x80008000, 0));
|
code.paddw(tmp_b, code.MConst(xword, 0x80008000));
|
||||||
code.pcmpgtw(tmp_b, tmp_a); // *Signed* comparison!
|
code.pcmpgtw(tmp_b, tmp_a); // *Signed* comparison!
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(ge_inst, tmp_b);
|
ctx.reg_alloc.DefineValue(ge_inst, tmp_b);
|
||||||
|
@ -217,8 +217,8 @@ void EmitX64::EmitPackedSubU16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
// (a >= b) == !(b > a)
|
// (a >= b) == !(b > a)
|
||||||
code.pcmpeqb(ones, ones);
|
code.pcmpeqb(ones, ones);
|
||||||
code.paddw(xmm_a, code.XmmConst(xword, 0x80008000, 0));
|
code.paddw(xmm_a, code.MConst(xword, 0x80008000));
|
||||||
code.paddw(xmm_b, code.XmmConst(xword, 0x80008000, 0));
|
code.paddw(xmm_b, code.MConst(xword, 0x80008000));
|
||||||
code.movdqa(xmm_ge, xmm_b);
|
code.movdqa(xmm_ge, xmm_b);
|
||||||
code.pcmpgtw(xmm_ge, xmm_a); // *Signed* comparison!
|
code.pcmpgtw(xmm_ge, xmm_a); // *Signed* comparison!
|
||||||
code.pxor(xmm_ge, ones);
|
code.pxor(xmm_ge, ones);
|
||||||
|
@ -654,7 +654,7 @@ void EmitX64::EmitPackedAbsDiffSumS8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
// TODO: Optimize with zero-extension detection
|
// TODO: Optimize with zero-extension detection
|
||||||
code.movaps(tmp, code.XmmConst(xword, 0x0000'0000'ffff'ffff, 0));
|
code.movaps(tmp, code.MConst(xword, 0x0000'0000'ffff'ffff));
|
||||||
code.pand(xmm_a, tmp);
|
code.pand(xmm_a, tmp);
|
||||||
code.pand(xmm_b, tmp);
|
code.pand(xmm_b, tmp);
|
||||||
code.psadbw(xmm_a, xmm_b);
|
code.psadbw(xmm_a, xmm_b);
|
||||||
|
|
|
@ -458,7 +458,7 @@ static void ArithmeticShiftRightByte(EmitContext& ctx, BlockOfCode& code, const
|
||||||
const u64 shift_matrix = shift_amount < 8
|
const u64 shift_matrix = shift_amount < 8
|
||||||
? (0x0102040810204080 << (shift_amount * 8)) | (0x8080808080808080 >> (64 - shift_amount * 8))
|
? (0x0102040810204080 << (shift_amount * 8)) | (0x8080808080808080 >> (64 - shift_amount * 8))
|
||||||
: 0x8080808080808080;
|
: 0x8080808080808080;
|
||||||
code.gf2p8affineqb(result, code.XmmBConst<64>(xword, shift_matrix), 0);
|
code.gf2p8affineqb(result, code.MConst(xword, shift_matrix, shift_matrix), 0);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -519,7 +519,7 @@ void EmitX64::EmitVectorArithmeticShiftRight64(EmitContext& ctx, IR::Inst* inst)
|
||||||
|
|
||||||
code.pxor(tmp2, tmp2);
|
code.pxor(tmp2, tmp2);
|
||||||
code.psrlq(result, shift_amount);
|
code.psrlq(result, shift_amount);
|
||||||
code.movdqa(tmp1, code.XmmBConst<64>(xword, sign_bit));
|
code.movdqa(tmp1, code.MConst(xword, sign_bit, sign_bit));
|
||||||
code.pand(tmp1, result);
|
code.pand(tmp1, result);
|
||||||
code.psubq(tmp2, tmp1);
|
code.psubq(tmp2, tmp1);
|
||||||
code.por(result, tmp2);
|
code.por(result, tmp2);
|
||||||
|
@ -571,7 +571,7 @@ void EmitX64::EmitVectorArithmeticVShift16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm right_shift = xmm16;
|
const Xbyak::Xmm right_shift = xmm16;
|
||||||
const Xbyak::Xmm tmp = xmm17;
|
const Xbyak::Xmm tmp = xmm17;
|
||||||
|
|
||||||
code.vmovdqa32(tmp, code.XmmBConst<16>(xword, 0x00FF));
|
code.vmovdqa32(tmp, code.MConst(xword, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF));
|
||||||
code.vpxord(right_shift, right_shift, right_shift);
|
code.vpxord(right_shift, right_shift, right_shift);
|
||||||
code.vpsubw(right_shift, right_shift, left_shift);
|
code.vpsubw(right_shift, right_shift, left_shift);
|
||||||
|
|
||||||
|
@ -606,7 +606,7 @@ void EmitX64::EmitVectorArithmeticVShift32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm right_shift = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm right_shift = ctx.reg_alloc.ScratchXmm();
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.vmovdqa(tmp, code.XmmBConst<32>(xword, 0x000000FF));
|
code.vmovdqa(tmp, code.MConst(xword, 0x000000FF000000FF, 0x000000FF000000FF));
|
||||||
code.vpxor(right_shift, right_shift, right_shift);
|
code.vpxor(right_shift, right_shift, right_shift);
|
||||||
code.vpsubd(right_shift, right_shift, left_shift);
|
code.vpsubd(right_shift, right_shift, left_shift);
|
||||||
|
|
||||||
|
@ -637,7 +637,7 @@ void EmitX64::EmitVectorArithmeticVShift64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm right_shift = xmm16;
|
const Xbyak::Xmm right_shift = xmm16;
|
||||||
const Xbyak::Xmm tmp = xmm17;
|
const Xbyak::Xmm tmp = xmm17;
|
||||||
|
|
||||||
code.vmovdqa32(tmp, code.XmmBConst<64>(xword, 0x00000000000000FF));
|
code.vmovdqa32(tmp, code.MConst(xword, 0x00000000000000FF, 0x00000000000000FF));
|
||||||
code.vpxorq(right_shift, right_shift, right_shift);
|
code.vpxorq(right_shift, right_shift, right_shift);
|
||||||
code.vpsubq(right_shift, right_shift, left_shift);
|
code.vpsubq(right_shift, right_shift, left_shift);
|
||||||
|
|
||||||
|
@ -925,15 +925,15 @@ void EmitX64::EmitVectorCountLeadingZeros8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm tmp1 = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp1 = ctx.reg_alloc.ScratchXmm();
|
||||||
const Xbyak::Xmm tmp2 = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp2 = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movdqa(tmp1, code.XmmConst(xword, 0x0101010102020304, 0x0000000000000000));
|
code.movdqa(tmp1, code.MConst(xword, 0x0101010102020304, 0x0000000000000000));
|
||||||
code.movdqa(tmp2, tmp1);
|
code.movdqa(tmp2, tmp1);
|
||||||
|
|
||||||
code.pshufb(tmp2, data);
|
code.pshufb(tmp2, data);
|
||||||
code.psrlw(data, 4);
|
code.psrlw(data, 4);
|
||||||
code.pand(data, code.XmmBConst<8>(xword, 0x0F));
|
code.pand(data, code.MConst(xword, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F));
|
||||||
code.pshufb(tmp1, data);
|
code.pshufb(tmp1, data);
|
||||||
|
|
||||||
code.movdqa(data, code.XmmBConst<8>(xword, 0x04));
|
code.movdqa(data, code.MConst(xword, 0x0404040404040404, 0x0404040404040404));
|
||||||
|
|
||||||
code.pcmpeqb(data, tmp1);
|
code.pcmpeqb(data, tmp1);
|
||||||
code.pand(data, tmp2);
|
code.pand(data, tmp2);
|
||||||
|
@ -966,11 +966,11 @@ void EmitX64::EmitVectorCountLeadingZeros16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.vpcmpeqw(zeros, zeros, zeros);
|
code.vpcmpeqw(zeros, zeros, zeros);
|
||||||
code.vpcmpeqw(tmp, tmp, tmp);
|
code.vpcmpeqw(tmp, tmp, tmp);
|
||||||
code.vpcmpeqw(zeros, zeros, data);
|
code.vpcmpeqw(zeros, zeros, data);
|
||||||
code.vpmullw(data, data, code.XmmBConst<16>(xword, 0xf0d3));
|
code.vpmullw(data, data, code.MConst(xword, 0xf0d3f0d3f0d3f0d3, 0xf0d3f0d3f0d3f0d3));
|
||||||
code.vpsllw(tmp, tmp, 15);
|
code.vpsllw(tmp, tmp, 15);
|
||||||
code.vpsllw(zeros, zeros, 7);
|
code.vpsllw(zeros, zeros, 7);
|
||||||
code.vpsrlw(data, data, 12);
|
code.vpsrlw(data, data, 12);
|
||||||
code.vmovdqa(result, code.XmmConst(xword, 0x0903060a040b0c10, 0x0f080e0207050d01));
|
code.vmovdqa(result, code.MConst(xword, 0x0903060a040b0c10, 0x0f080e0207050d01));
|
||||||
code.vpor(tmp, tmp, zeros);
|
code.vpor(tmp, tmp, zeros);
|
||||||
code.vpor(data, data, tmp);
|
code.vpor(data, data, tmp);
|
||||||
code.vpshufb(result, result, data);
|
code.vpshufb(result, result, data);
|
||||||
|
@ -1002,11 +1002,11 @@ void EmitX64::EmitVectorCountLeadingZeros16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.pcmpeqw(zeros, zeros);
|
code.pcmpeqw(zeros, zeros);
|
||||||
code.pcmpeqw(tmp, tmp);
|
code.pcmpeqw(tmp, tmp);
|
||||||
code.pcmpeqw(zeros, data);
|
code.pcmpeqw(zeros, data);
|
||||||
code.pmullw(data, code.XmmBConst<16>(xword, 0xf0d3));
|
code.pmullw(data, code.MConst(xword, 0xf0d3f0d3f0d3f0d3, 0xf0d3f0d3f0d3f0d3));
|
||||||
code.psllw(tmp, 15);
|
code.psllw(tmp, 15);
|
||||||
code.psllw(zeros, 7);
|
code.psllw(zeros, 7);
|
||||||
code.psrlw(data, 12);
|
code.psrlw(data, 12);
|
||||||
code.movdqa(result, code.XmmConst(xword, 0x0903060a040b0c10, 0x0f080e0207050d01));
|
code.movdqa(result, code.MConst(xword, 0x0903060a040b0c10, 0x0f080e0207050d01));
|
||||||
code.por(tmp, zeros);
|
code.por(tmp, zeros);
|
||||||
code.por(data, tmp);
|
code.por(data, tmp);
|
||||||
code.pshufb(result, data);
|
code.pshufb(result, data);
|
||||||
|
@ -1038,7 +1038,7 @@ void EmitX64::EmitVectorDeinterleaveEven8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm rhs = ctx.reg_alloc.UseScratchXmm(args[1]);
|
const Xbyak::Xmm rhs = ctx.reg_alloc.UseScratchXmm(args[1]);
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movdqa(tmp, code.XmmBConst<16>(xword, 0x00FF));
|
code.movdqa(tmp, code.MConst(xword, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF));
|
||||||
code.pand(lhs, tmp);
|
code.pand(lhs, tmp);
|
||||||
code.pand(rhs, tmp);
|
code.pand(rhs, tmp);
|
||||||
code.packuswb(lhs, rhs);
|
code.packuswb(lhs, rhs);
|
||||||
|
@ -1088,7 +1088,7 @@ void EmitX64::EmitVectorDeinterleaveEvenLower8(EmitContext& ctx, IR::Inst* inst)
|
||||||
const Xbyak::Xmm rhs = ctx.reg_alloc.UseScratchXmm(args[1]);
|
const Xbyak::Xmm rhs = ctx.reg_alloc.UseScratchXmm(args[1]);
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movdqa(tmp, code.XmmBConst<16>(xword, 0x00FF));
|
code.movdqa(tmp, code.MConst(xword, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF));
|
||||||
code.pand(lhs, tmp);
|
code.pand(lhs, tmp);
|
||||||
code.pand(rhs, tmp);
|
code.pand(rhs, tmp);
|
||||||
code.packuswb(lhs, rhs);
|
code.packuswb(lhs, rhs);
|
||||||
|
@ -1423,13 +1423,13 @@ static void EmitVectorHalvingAddUnsigned(size_t esize, EmitContext& ctx, IR::Ins
|
||||||
case 8:
|
case 8:
|
||||||
code.pavgb(tmp, a);
|
code.pavgb(tmp, a);
|
||||||
code.pxor(a, b);
|
code.pxor(a, b);
|
||||||
code.pand(a, code.XmmBConst<8>(xword, 0x01));
|
code.pand(a, code.MConst(xword, 0x0101010101010101, 0x0101010101010101));
|
||||||
code.psubb(tmp, a);
|
code.psubb(tmp, a);
|
||||||
break;
|
break;
|
||||||
case 16:
|
case 16:
|
||||||
code.pavgw(tmp, a);
|
code.pavgw(tmp, a);
|
||||||
code.pxor(a, b);
|
code.pxor(a, b);
|
||||||
code.pand(a, code.XmmBConst<16>(xword, 0x0001));
|
code.pand(a, code.MConst(xword, 0x0001000100010001, 0x0001000100010001));
|
||||||
code.psubw(tmp, a);
|
code.psubw(tmp, a);
|
||||||
break;
|
break;
|
||||||
case 32:
|
case 32:
|
||||||
|
@ -1464,7 +1464,7 @@ static void EmitVectorHalvingSubSigned(size_t esize, EmitContext& ctx, IR::Inst*
|
||||||
switch (esize) {
|
switch (esize) {
|
||||||
case 8: {
|
case 8: {
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(tmp, code.XmmBConst<8>(xword, 0x80));
|
code.movdqa(tmp, code.MConst(xword, 0x8080808080808080, 0x8080808080808080));
|
||||||
code.pxor(a, tmp);
|
code.pxor(a, tmp);
|
||||||
code.pxor(b, tmp);
|
code.pxor(b, tmp);
|
||||||
code.pavgb(b, a);
|
code.pavgb(b, a);
|
||||||
|
@ -1473,7 +1473,7 @@ static void EmitVectorHalvingSubSigned(size_t esize, EmitContext& ctx, IR::Inst*
|
||||||
}
|
}
|
||||||
case 16: {
|
case 16: {
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(tmp, code.XmmBConst<16>(xword, 0x8000));
|
code.movdqa(tmp, code.MConst(xword, 0x8000800080008000, 0x8000800080008000));
|
||||||
code.pxor(a, tmp);
|
code.pxor(a, tmp);
|
||||||
code.pxor(b, tmp);
|
code.pxor(b, tmp);
|
||||||
code.pavgw(b, a);
|
code.pavgw(b, a);
|
||||||
|
@ -1635,13 +1635,13 @@ void EmitX64::EmitVectorLogicalShiftLeft8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.paddb(result, result);
|
code.paddb(result, result);
|
||||||
} else if (code.HasHostFeature(HostFeature::GFNI)) {
|
} else if (code.HasHostFeature(HostFeature::GFNI)) {
|
||||||
const u64 shift_matrix = 0x0102040810204080 >> (shift_amount * 8);
|
const u64 shift_matrix = 0x0102040810204080 >> (shift_amount * 8);
|
||||||
code.gf2p8affineqb(result, code.XmmBConst<64>(xword, shift_matrix), 0);
|
code.gf2p8affineqb(result, code.MConst(xword, shift_matrix, shift_matrix), 0);
|
||||||
} else {
|
} else {
|
||||||
const u64 replicand = (0xFFULL << shift_amount) & 0xFF;
|
const u64 replicand = (0xFFULL << shift_amount) & 0xFF;
|
||||||
const u64 mask = mcl::bit::replicate_element<u8, u64>(replicand);
|
const u64 mask = mcl::bit::replicate_element<u8, u64>(replicand);
|
||||||
|
|
||||||
code.psllw(result, shift_amount);
|
code.psllw(result, shift_amount);
|
||||||
code.pand(result, code.XmmBConst<64>(xword, mask));
|
code.pand(result, code.MConst(xword, mask, mask));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1692,13 +1692,13 @@ void EmitX64::EmitVectorLogicalShiftRight8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.pxor(result, result);
|
code.pxor(result, result);
|
||||||
} else if (code.HasHostFeature(HostFeature::GFNI)) {
|
} else if (code.HasHostFeature(HostFeature::GFNI)) {
|
||||||
const u64 shift_matrix = 0x0102040810204080 << (shift_amount * 8);
|
const u64 shift_matrix = 0x0102040810204080 << (shift_amount * 8);
|
||||||
code.gf2p8affineqb(result, code.XmmBConst<64>(xword, shift_matrix), 0);
|
code.gf2p8affineqb(result, code.MConst(xword, shift_matrix, shift_matrix), 0);
|
||||||
} else {
|
} else {
|
||||||
const u64 replicand = 0xFEULL >> shift_amount;
|
const u64 replicand = 0xFEULL >> shift_amount;
|
||||||
const u64 mask = mcl::bit::replicate_element<u8, u64>(replicand);
|
const u64 mask = mcl::bit::replicate_element<u8, u64>(replicand);
|
||||||
|
|
||||||
code.psrlw(result, shift_amount);
|
code.psrlw(result, shift_amount);
|
||||||
code.pand(result, code.XmmConst(xword, mask, mask));
|
code.pand(result, code.MConst(xword, mask, mask));
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -1752,7 +1752,7 @@ void EmitX64::EmitVectorLogicalVShift16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm right_shift = xmm16;
|
const Xbyak::Xmm right_shift = xmm16;
|
||||||
const Xbyak::Xmm tmp = xmm17;
|
const Xbyak::Xmm tmp = xmm17;
|
||||||
|
|
||||||
code.vmovdqa32(tmp, code.XmmBConst<16>(xword, 0x00FF));
|
code.vmovdqa32(tmp, code.MConst(xword, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF));
|
||||||
code.vpxord(right_shift, right_shift, right_shift);
|
code.vpxord(right_shift, right_shift, right_shift);
|
||||||
code.vpsubw(right_shift, right_shift, left_shift);
|
code.vpsubw(right_shift, right_shift, left_shift);
|
||||||
code.vpandd(left_shift, left_shift, tmp);
|
code.vpandd(left_shift, left_shift, tmp);
|
||||||
|
@ -1780,7 +1780,7 @@ void EmitX64::EmitVectorLogicalVShift32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm right_shift = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm right_shift = ctx.reg_alloc.ScratchXmm();
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.vmovdqa(tmp, code.XmmBConst<32>(xword, 0x000000FF));
|
code.vmovdqa(tmp, code.MConst(xword, 0x000000FF000000FF, 0x000000FF000000FF));
|
||||||
code.vpxor(right_shift, right_shift, right_shift);
|
code.vpxor(right_shift, right_shift, right_shift);
|
||||||
code.vpsubd(right_shift, right_shift, left_shift);
|
code.vpsubd(right_shift, right_shift, left_shift);
|
||||||
code.vpand(left_shift, left_shift, tmp);
|
code.vpand(left_shift, left_shift, tmp);
|
||||||
|
@ -1808,7 +1808,7 @@ void EmitX64::EmitVectorLogicalVShift64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm right_shift = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm right_shift = ctx.reg_alloc.ScratchXmm();
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.vmovdqa(tmp, code.XmmBConst<64>(xword, 0x00000000000000FF));
|
code.vmovdqa(tmp, code.MConst(xword, 0x00000000000000FF, 0x00000000000000FF));
|
||||||
code.vpxor(right_shift, right_shift, right_shift);
|
code.vpxor(right_shift, right_shift, right_shift);
|
||||||
code.vpsubq(right_shift, right_shift, left_shift);
|
code.vpsubq(right_shift, right_shift, left_shift);
|
||||||
code.vpand(left_shift, left_shift, tmp);
|
code.vpand(left_shift, left_shift, tmp);
|
||||||
|
@ -1928,7 +1928,7 @@ void EmitX64::EmitVectorMaxU32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
|
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(tmp, code.XmmBConst<32>(xword, 0x80000000));
|
code.movdqa(tmp, code.MConst(xword, 0x8000000080000000, 0x8000000080000000));
|
||||||
|
|
||||||
const Xbyak::Xmm tmp_b = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp_b = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(tmp_b, b);
|
code.movdqa(tmp_b, b);
|
||||||
|
@ -1957,7 +1957,7 @@ void EmitX64::EmitVectorMaxU64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm y = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm y = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.vmovdqa(xmm0, code.XmmBConst<64>(xword, 0x8000000000000000));
|
code.vmovdqa(xmm0, code.MConst(xword, 0x8000000000000000, 0x8000000000000000));
|
||||||
code.vpsubq(tmp, y, xmm0);
|
code.vpsubq(tmp, y, xmm0);
|
||||||
code.vpsubq(xmm0, x, xmm0);
|
code.vpsubq(xmm0, x, xmm0);
|
||||||
code.vpcmpgtq(xmm0, tmp, xmm0);
|
code.vpcmpgtq(xmm0, tmp, xmm0);
|
||||||
|
@ -2076,7 +2076,7 @@ void EmitX64::EmitVectorMinU32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
|
|
||||||
const Xbyak::Xmm sint_max_plus_one = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm sint_max_plus_one = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(sint_max_plus_one, code.XmmBConst<32>(xword, 0x80000000));
|
code.movdqa(sint_max_plus_one, code.MConst(xword, 0x8000000080000000, 0x8000000080000000));
|
||||||
|
|
||||||
const Xbyak::Xmm tmp_a = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp_a = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(tmp_a, a);
|
code.movdqa(tmp_a, a);
|
||||||
|
@ -2107,7 +2107,7 @@ void EmitX64::EmitVectorMinU64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm y = ctx.reg_alloc.UseScratchXmm(args[1]);
|
const Xbyak::Xmm y = ctx.reg_alloc.UseScratchXmm(args[1]);
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.vmovdqa(xmm0, code.XmmBConst<64>(xword, 0x8000000000000000));
|
code.vmovdqa(xmm0, code.MConst(xword, 0x8000000000000000, 0x8000000000000000));
|
||||||
code.vpsubq(tmp, y, xmm0);
|
code.vpsubq(tmp, y, xmm0);
|
||||||
code.vpsubq(xmm0, x, xmm0);
|
code.vpsubq(xmm0, x, xmm0);
|
||||||
code.vpcmpgtq(xmm0, tmp, xmm0);
|
code.vpcmpgtq(xmm0, tmp, xmm0);
|
||||||
|
@ -2136,7 +2136,7 @@ void EmitX64::EmitVectorMultiply8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.psrlw(tmp_a, 8);
|
code.psrlw(tmp_a, 8);
|
||||||
code.psrlw(tmp_b, 8);
|
code.psrlw(tmp_b, 8);
|
||||||
code.pmullw(tmp_a, tmp_b);
|
code.pmullw(tmp_a, tmp_b);
|
||||||
code.pand(a, code.XmmBConst<16>(xword, 0x00FF));
|
code.pand(a, code.MConst(xword, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF));
|
||||||
code.psllw(tmp_a, 8);
|
code.psllw(tmp_a, 8);
|
||||||
code.por(a, tmp_a);
|
code.por(a, tmp_a);
|
||||||
|
|
||||||
|
@ -2238,7 +2238,7 @@ void EmitX64::EmitVectorNarrow16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm zeros = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm zeros = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.pxor(zeros, zeros);
|
code.pxor(zeros, zeros);
|
||||||
code.pand(a, code.XmmBConst<16>(xword, 0x00FF));
|
code.pand(a, code.MConst(xword, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF));
|
||||||
code.packuswb(a, zeros);
|
code.packuswb(a, zeros);
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, a);
|
ctx.reg_alloc.DefineValue(inst, a);
|
||||||
|
@ -2522,7 +2522,7 @@ void EmitX64::EmitVectorPairedAddSignedWiden32(EmitContext& ctx, IR::Inst* inst)
|
||||||
|
|
||||||
code.movdqa(c, a);
|
code.movdqa(c, a);
|
||||||
code.psllq(a, 32);
|
code.psllq(a, 32);
|
||||||
code.movdqa(tmp1, code.XmmBConst<64>(xword, 0x80000000'00000000));
|
code.movdqa(tmp1, code.MConst(xword, 0x80000000'00000000, 0x80000000'00000000));
|
||||||
code.movdqa(tmp2, tmp1);
|
code.movdqa(tmp2, tmp1);
|
||||||
code.pand(tmp1, a);
|
code.pand(tmp1, a);
|
||||||
code.pand(tmp2, c);
|
code.pand(tmp2, c);
|
||||||
|
@ -2674,7 +2674,7 @@ void EmitX64::EmitVectorPairedMaxU32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
ctx.reg_alloc.DefineValue(inst, x);
|
ctx.reg_alloc.DefineValue(inst, x);
|
||||||
} else {
|
} else {
|
||||||
const Xbyak::Xmm tmp3 = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp3 = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(tmp3, code.XmmBConst<32>(xword, 0x80000000));
|
code.movdqa(tmp3, code.MConst(xword, 0x8000000080000000, 0x8000000080000000));
|
||||||
|
|
||||||
const Xbyak::Xmm tmp2 = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp2 = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(tmp2, x);
|
code.movdqa(tmp2, x);
|
||||||
|
@ -2759,7 +2759,7 @@ void EmitX64::EmitVectorPairedMinU32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
ctx.reg_alloc.DefineValue(inst, x);
|
ctx.reg_alloc.DefineValue(inst, x);
|
||||||
} else {
|
} else {
|
||||||
const Xbyak::Xmm tmp3 = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp3 = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(tmp3, code.XmmBConst<32>(xword, 0x80000000));
|
code.movdqa(tmp3, code.MConst(xword, 0x8000000080000000, 0x8000000080000000));
|
||||||
|
|
||||||
const Xbyak::Xmm tmp2 = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp2 = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(tmp2, tmp1);
|
code.movdqa(tmp2, tmp1);
|
||||||
|
@ -2803,7 +2803,7 @@ void EmitX64::EmitVectorPolynomialMultiply8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
Xbyak::Label loop;
|
Xbyak::Label loop;
|
||||||
|
|
||||||
code.pxor(result, result);
|
code.pxor(result, result);
|
||||||
code.movdqa(mask, code.XmmBConst<8>(xword, 0x01));
|
code.movdqa(mask, code.MConst(xword, 0x0101010101010101, 0x0101010101010101));
|
||||||
code.mov(counter, 8);
|
code.mov(counter, 8);
|
||||||
|
|
||||||
code.L(loop);
|
code.L(loop);
|
||||||
|
@ -2847,7 +2847,7 @@ void EmitX64::EmitVectorPolynomialMultiplyLong8(EmitContext& ctx, IR::Inst* inst
|
||||||
code.pmovzxbw(xmm_a, xmm_a);
|
code.pmovzxbw(xmm_a, xmm_a);
|
||||||
code.pmovzxbw(xmm_b, xmm_b);
|
code.pmovzxbw(xmm_b, xmm_b);
|
||||||
code.pxor(result, result);
|
code.pxor(result, result);
|
||||||
code.movdqa(mask, code.XmmBConst<16>(xword, 0x0001));
|
code.movdqa(mask, code.MConst(xword, 0x0001000100010001, 0x0001000100010001));
|
||||||
code.mov(counter, 8);
|
code.mov(counter, 8);
|
||||||
|
|
||||||
code.L(loop);
|
code.L(loop);
|
||||||
|
@ -2930,11 +2930,11 @@ void EmitX64::EmitVectorPopulationCount(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
code.movdqa(high_a, low_a);
|
code.movdqa(high_a, low_a);
|
||||||
code.psrlw(high_a, 4);
|
code.psrlw(high_a, 4);
|
||||||
code.movdqa(tmp1, code.XmmBConst<8>(xword, 0x0F));
|
code.movdqa(tmp1, code.MConst(xword, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F));
|
||||||
code.pand(high_a, tmp1); // High nibbles
|
code.pand(high_a, tmp1); // High nibbles
|
||||||
code.pand(low_a, tmp1); // Low nibbles
|
code.pand(low_a, tmp1); // Low nibbles
|
||||||
|
|
||||||
code.movdqa(tmp1, code.XmmConst(xword, 0x0302020102010100, 0x0403030203020201));
|
code.movdqa(tmp1, code.MConst(xword, 0x0302020102010100, 0x0403030203020201));
|
||||||
code.movdqa(tmp2, tmp1);
|
code.movdqa(tmp2, tmp1);
|
||||||
code.pshufb(tmp1, low_a);
|
code.pshufb(tmp1, low_a);
|
||||||
code.pshufb(tmp2, high_a);
|
code.pshufb(tmp2, high_a);
|
||||||
|
@ -2958,10 +2958,10 @@ void EmitX64::EmitVectorReverseBits(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm data = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm data = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
|
|
||||||
if (code.HasHostFeature(HostFeature::GFNI)) {
|
if (code.HasHostFeature(HostFeature::GFNI)) {
|
||||||
code.gf2p8affineqb(data, code.XmmBConst<64>(xword, 0x8040201008040201), 0);
|
code.gf2p8affineqb(data, code.MConst(xword, 0x8040201008040201, 0x8040201008040201), 0);
|
||||||
} else {
|
} else {
|
||||||
const Xbyak::Xmm high_nibble_reg = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm high_nibble_reg = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(high_nibble_reg, code.XmmBConst<8>(xword, 0xF0));
|
code.movdqa(high_nibble_reg, code.MConst(xword, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0));
|
||||||
code.pand(high_nibble_reg, data);
|
code.pand(high_nibble_reg, data);
|
||||||
code.pxor(data, high_nibble_reg);
|
code.pxor(data, high_nibble_reg);
|
||||||
code.psrld(high_nibble_reg, 4);
|
code.psrld(high_nibble_reg, 4);
|
||||||
|
@ -2969,25 +2969,25 @@ void EmitX64::EmitVectorReverseBits(EmitContext& ctx, IR::Inst* inst) {
|
||||||
if (code.HasHostFeature(HostFeature::SSSE3)) {
|
if (code.HasHostFeature(HostFeature::SSSE3)) {
|
||||||
// High lookup
|
// High lookup
|
||||||
const Xbyak::Xmm high_reversed_reg = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm high_reversed_reg = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(high_reversed_reg, code.XmmConst(xword, 0xE060A020C0408000, 0xF070B030D0509010));
|
code.movdqa(high_reversed_reg, code.MConst(xword, 0xE060A020C0408000, 0xF070B030D0509010));
|
||||||
code.pshufb(high_reversed_reg, data);
|
code.pshufb(high_reversed_reg, data);
|
||||||
|
|
||||||
// Low lookup (low nibble equivalent of the above)
|
// Low lookup (low nibble equivalent of the above)
|
||||||
code.movdqa(data, code.XmmConst(xword, 0x0E060A020C040800, 0x0F070B030D050901));
|
code.movdqa(data, code.MConst(xword, 0x0E060A020C040800, 0x0F070B030D050901));
|
||||||
code.pshufb(data, high_nibble_reg);
|
code.pshufb(data, high_nibble_reg);
|
||||||
code.por(data, high_reversed_reg);
|
code.por(data, high_reversed_reg);
|
||||||
} else {
|
} else {
|
||||||
code.pslld(data, 4);
|
code.pslld(data, 4);
|
||||||
code.por(data, high_nibble_reg);
|
code.por(data, high_nibble_reg);
|
||||||
|
|
||||||
code.movdqa(high_nibble_reg, code.XmmBConst<8>(xword, 0xCC));
|
code.movdqa(high_nibble_reg, code.MConst(xword, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC));
|
||||||
code.pand(high_nibble_reg, data);
|
code.pand(high_nibble_reg, data);
|
||||||
code.pxor(data, high_nibble_reg);
|
code.pxor(data, high_nibble_reg);
|
||||||
code.psrld(high_nibble_reg, 2);
|
code.psrld(high_nibble_reg, 2);
|
||||||
code.pslld(data, 2);
|
code.pslld(data, 2);
|
||||||
code.por(data, high_nibble_reg);
|
code.por(data, high_nibble_reg);
|
||||||
|
|
||||||
code.movdqa(high_nibble_reg, code.XmmBConst<8>(xword, 0xAA));
|
code.movdqa(high_nibble_reg, code.MConst(xword, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA));
|
||||||
code.pand(high_nibble_reg, data);
|
code.pand(high_nibble_reg, data);
|
||||||
code.pxor(data, high_nibble_reg);
|
code.pxor(data, high_nibble_reg);
|
||||||
code.psrld(high_nibble_reg, 1);
|
code.psrld(high_nibble_reg, 1);
|
||||||
|
@ -3037,7 +3037,7 @@ void EmitX64::EmitVectorReduceAdd16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.paddw(data, temp);
|
code.paddw(data, temp);
|
||||||
|
|
||||||
// Add pairs of 16-bit values into 32-bit lanes
|
// Add pairs of 16-bit values into 32-bit lanes
|
||||||
code.movdqa(temp, code.XmmBConst<16>(xword, 0x0001));
|
code.movdqa(temp, code.MConst(xword, 0x0001000100010001, 0x0001000100010001));
|
||||||
code.pmaddwd(data, temp);
|
code.pmaddwd(data, temp);
|
||||||
|
|
||||||
// Sum adjacent 32-bit lanes
|
// Sum adjacent 32-bit lanes
|
||||||
|
@ -3100,7 +3100,7 @@ static void EmitVectorRoundingHalvingAddSigned(size_t esize, EmitContext& ctx, I
|
||||||
switch (esize) {
|
switch (esize) {
|
||||||
case 8: {
|
case 8: {
|
||||||
const Xbyak::Xmm vec_128 = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm vec_128 = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(vec_128, code.XmmBConst<8>(xword, 0x80));
|
code.movdqa(vec_128, code.MConst(xword, 0x8080808080808080, 0x8080808080808080));
|
||||||
|
|
||||||
code.paddb(a, vec_128);
|
code.paddb(a, vec_128);
|
||||||
code.paddb(b, vec_128);
|
code.paddb(b, vec_128);
|
||||||
|
@ -3110,7 +3110,7 @@ static void EmitVectorRoundingHalvingAddSigned(size_t esize, EmitContext& ctx, I
|
||||||
}
|
}
|
||||||
case 16: {
|
case 16: {
|
||||||
const Xbyak::Xmm vec_32768 = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm vec_32768 = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movdqa(vec_32768, code.XmmBConst<16>(xword, 0x8000));
|
code.movdqa(vec_32768, code.MConst(xword, 0x8000800080008000, 0x8000800080008000));
|
||||||
|
|
||||||
code.paddw(a, vec_32768);
|
code.paddw(a, vec_32768);
|
||||||
code.paddw(b, vec_32768);
|
code.paddw(b, vec_32768);
|
||||||
|
@ -3506,7 +3506,7 @@ void EmitX64::EmitVectorSignedMultiply32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.pand(tmp, y);
|
code.pand(tmp, y);
|
||||||
code.pand(sign_correction, x);
|
code.pand(sign_correction, x);
|
||||||
code.paddd(sign_correction, tmp);
|
code.paddd(sign_correction, tmp);
|
||||||
code.pand(sign_correction, code.XmmBConst<32>(xword, 0x7FFFFFFF));
|
code.pand(sign_correction, code.MConst(xword, 0x7FFFFFFF7FFFFFFF, 0x7FFFFFFF7FFFFFFF));
|
||||||
|
|
||||||
// calculate unsigned multiply
|
// calculate unsigned multiply
|
||||||
code.movdqa(tmp, x);
|
code.movdqa(tmp, x);
|
||||||
|
@ -3547,13 +3547,13 @@ static void EmitVectorSignedSaturatedAbs(size_t esize, BlockOfCode& code, EmitCo
|
||||||
const Xbyak::Address mask = [esize, &code] {
|
const Xbyak::Address mask = [esize, &code] {
|
||||||
switch (esize) {
|
switch (esize) {
|
||||||
case 8:
|
case 8:
|
||||||
return code.XmmBConst<8>(xword, 0x80);
|
return code.MConst(xword, 0x8080808080808080, 0x8080808080808080);
|
||||||
case 16:
|
case 16:
|
||||||
return code.XmmBConst<16>(xword, 0x8000);
|
return code.MConst(xword, 0x8000800080008000, 0x8000800080008000);
|
||||||
case 32:
|
case 32:
|
||||||
return code.XmmBConst<32>(xword, 0x80000000);
|
return code.MConst(xword, 0x8000000080000000, 0x8000000080000000);
|
||||||
case 64:
|
case 64:
|
||||||
return code.XmmBConst<64>(xword, 0x8000000000000000);
|
return code.MConst(xword, 0x8000000000000000, 0x8000000000000000);
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
@ -3717,7 +3717,7 @@ static void EmitVectorSignedSaturatedAccumulateUnsigned(BlockOfCode& code, EmitC
|
||||||
code.vpblendvb(xmm0, tmp, tmp2, xmm0);
|
code.vpblendvb(xmm0, tmp, tmp2, xmm0);
|
||||||
ctx.reg_alloc.Release(tmp2);
|
ctx.reg_alloc.Release(tmp2);
|
||||||
} else {
|
} else {
|
||||||
code.pand(xmm0, code.XmmBConst<8>(xword, 0x80));
|
code.pand(xmm0, code.MConst(xword, 0x8080808080808080, 0x8080808080808080));
|
||||||
code.movdqa(tmp, xmm0);
|
code.movdqa(tmp, xmm0);
|
||||||
code.psrlw(tmp, 7);
|
code.psrlw(tmp, 7);
|
||||||
code.pxor(xmm0, xmm0);
|
code.pxor(xmm0, xmm0);
|
||||||
|
@ -3835,14 +3835,14 @@ void EmitX64::EmitVectorSignedSaturatedDoublingMultiply16(EmitContext& ctx, IR::
|
||||||
code.vpsrlw(lower_tmp, lower_tmp, 15);
|
code.vpsrlw(lower_tmp, lower_tmp, 15);
|
||||||
code.vpaddw(upper_tmp, upper_tmp, upper_tmp);
|
code.vpaddw(upper_tmp, upper_tmp, upper_tmp);
|
||||||
code.vpor(upper_result, upper_tmp, lower_tmp);
|
code.vpor(upper_result, upper_tmp, lower_tmp);
|
||||||
code.vpcmpeqw(upper_tmp, upper_result, code.XmmBConst<16>(xword, 0x8000));
|
code.vpcmpeqw(upper_tmp, upper_result, code.MConst(xword, 0x8000800080008000, 0x8000800080008000));
|
||||||
code.vpxor(upper_result, upper_result, upper_tmp);
|
code.vpxor(upper_result, upper_result, upper_tmp);
|
||||||
} else {
|
} else {
|
||||||
code.paddw(upper_tmp, upper_tmp);
|
code.paddw(upper_tmp, upper_tmp);
|
||||||
code.psrlw(lower_tmp, 15);
|
code.psrlw(lower_tmp, 15);
|
||||||
code.movdqa(upper_result, upper_tmp);
|
code.movdqa(upper_result, upper_tmp);
|
||||||
code.por(upper_result, lower_tmp);
|
code.por(upper_result, lower_tmp);
|
||||||
code.movdqa(upper_tmp, code.XmmBConst<16>(xword, 0x8000));
|
code.movdqa(upper_tmp, code.MConst(xword, 0x8000800080008000, 0x8000800080008000));
|
||||||
code.pcmpeqw(upper_tmp, upper_result);
|
code.pcmpeqw(upper_tmp, upper_result);
|
||||||
code.pxor(upper_result, upper_tmp);
|
code.pxor(upper_result, upper_tmp);
|
||||||
}
|
}
|
||||||
|
@ -3888,7 +3888,7 @@ void EmitX64::EmitVectorSignedSaturatedDoublingMultiply32(EmitContext& ctx, IR::
|
||||||
const Xbyak::Xmm mask = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm mask = ctx.reg_alloc.ScratchXmm();
|
||||||
const Xbyak::Reg32 bit = ctx.reg_alloc.ScratchGpr().cvt32();
|
const Xbyak::Reg32 bit = ctx.reg_alloc.ScratchGpr().cvt32();
|
||||||
|
|
||||||
code.vpcmpeqd(mask, upper_result, code.XmmBConst<32>(xword, 0x80000000));
|
code.vpcmpeqd(mask, upper_result, code.MConst(xword, 0x8000000080000000, 0x8000000080000000));
|
||||||
code.vpxor(upper_result, upper_result, mask);
|
code.vpxor(upper_result, upper_result, mask);
|
||||||
code.pmovmskb(bit, mask);
|
code.pmovmskb(bit, mask);
|
||||||
code.or_(code.dword[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], bit);
|
code.or_(code.dword[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], bit);
|
||||||
|
@ -3957,7 +3957,7 @@ void EmitX64::EmitVectorSignedSaturatedDoublingMultiply32(EmitContext& ctx, IR::
|
||||||
|
|
||||||
const Xbyak::Reg32 bit = ctx.reg_alloc.ScratchGpr().cvt32();
|
const Xbyak::Reg32 bit = ctx.reg_alloc.ScratchGpr().cvt32();
|
||||||
|
|
||||||
code.movdqa(tmp, code.XmmBConst<32>(xword, 0x80000000));
|
code.movdqa(tmp, code.MConst(xword, 0x8000000080000000, 0x8000000080000000));
|
||||||
code.pcmpeqd(tmp, upper_result);
|
code.pcmpeqd(tmp, upper_result);
|
||||||
code.pxor(upper_result, tmp);
|
code.pxor(upper_result, tmp);
|
||||||
code.pmovmskb(bit, tmp);
|
code.pmovmskb(bit, tmp);
|
||||||
|
@ -3984,10 +3984,10 @@ void EmitX64::EmitVectorSignedSaturatedDoublingMultiplyLong16(EmitContext& ctx,
|
||||||
code.pmaddwd(x, y);
|
code.pmaddwd(x, y);
|
||||||
|
|
||||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vpcmpeqd(y, x, code.XmmBConst<32>(xword, 0x80000000));
|
code.vpcmpeqd(y, x, code.MConst(xword, 0x8000000080000000, 0x8000000080000000));
|
||||||
code.vpxor(x, x, y);
|
code.vpxor(x, x, y);
|
||||||
} else {
|
} else {
|
||||||
code.movdqa(y, code.XmmBConst<32>(xword, 0x80000000));
|
code.movdqa(y, code.MConst(xword, 0x8000000080000000, 0x8000000080000000));
|
||||||
code.pcmpeqd(y, x);
|
code.pcmpeqd(y, x);
|
||||||
code.pxor(x, y);
|
code.pxor(x, y);
|
||||||
}
|
}
|
||||||
|
@ -4037,11 +4037,11 @@ void EmitX64::EmitVectorSignedSaturatedDoublingMultiplyLong32(EmitContext& ctx,
|
||||||
|
|
||||||
const Xbyak::Reg32 bit = ctx.reg_alloc.ScratchGpr().cvt32();
|
const Xbyak::Reg32 bit = ctx.reg_alloc.ScratchGpr().cvt32();
|
||||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vpcmpeqq(y, x, code.XmmBConst<64>(xword, 0x8000000000000000));
|
code.vpcmpeqq(y, x, code.MConst(xword, 0x8000000000000000, 0x8000000000000000));
|
||||||
code.vpxor(x, x, y);
|
code.vpxor(x, x, y);
|
||||||
code.vpmovmskb(bit, y);
|
code.vpmovmskb(bit, y);
|
||||||
} else {
|
} else {
|
||||||
code.movdqa(y, code.XmmBConst<64>(xword, 0x8000000000000000));
|
code.movdqa(y, code.MConst(xword, 0x8000000000000000, 0x8000000000000000));
|
||||||
code.pcmpeqd(y, x);
|
code.pcmpeqd(y, x);
|
||||||
code.shufps(y, y, 0b11110101);
|
code.shufps(y, y, 0b11110101);
|
||||||
code.pxor(x, y);
|
code.pxor(x, y);
|
||||||
|
@ -4187,13 +4187,13 @@ static void EmitVectorSignedSaturatedNeg(size_t esize, BlockOfCode& code, EmitCo
|
||||||
const Xbyak::Address mask = [esize, &code] {
|
const Xbyak::Address mask = [esize, &code] {
|
||||||
switch (esize) {
|
switch (esize) {
|
||||||
case 8:
|
case 8:
|
||||||
return code.XmmBConst<8>(xword, 0x80);
|
return code.MConst(xword, 0x8080808080808080, 0x8080808080808080);
|
||||||
case 16:
|
case 16:
|
||||||
return code.XmmBConst<16>(xword, 0x8000);
|
return code.MConst(xword, 0x8000800080008000, 0x8000800080008000);
|
||||||
case 32:
|
case 32:
|
||||||
return code.XmmBConst<32>(xword, 0x80000000);
|
return code.MConst(xword, 0x8000000080000000, 0x8000000080000000);
|
||||||
case 64:
|
case 64:
|
||||||
return code.XmmBConst<64>(xword, 0x8000000000000000);
|
return code.MConst(xword, 0x8000000000000000, 0x8000000000000000);
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
@ -4448,7 +4448,7 @@ void EmitX64::EmitVectorTableLookup64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
ctx.reg_alloc.Release(xmm_table0_upper);
|
ctx.reg_alloc.Release(xmm_table0_upper);
|
||||||
}
|
}
|
||||||
|
|
||||||
code.paddusb(indicies, code.XmmConst(xword, 0x7070707070707070, 0xFFFFFFFFFFFFFFFF));
|
code.paddusb(indicies, code.MConst(xword, 0x7070707070707070, 0xFFFFFFFFFFFFFFFF));
|
||||||
code.pshufb(xmm_table0, indicies);
|
code.pshufb(xmm_table0, indicies);
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, xmm_table0);
|
ctx.reg_alloc.DefineValue(inst, xmm_table0);
|
||||||
|
@ -4467,10 +4467,10 @@ void EmitX64::EmitVectorTableLookup64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vpaddusb(xmm0, indicies, code.XmmConst(xword, sat_const[table_size], 0xFFFFFFFFFFFFFFFF));
|
code.vpaddusb(xmm0, indicies, code.MConst(xword, sat_const[table_size], 0xFFFFFFFFFFFFFFFF));
|
||||||
} else {
|
} else {
|
||||||
code.movaps(xmm0, indicies);
|
code.movaps(xmm0, indicies);
|
||||||
code.paddusb(xmm0, code.XmmConst(xword, sat_const[table_size], 0xFFFFFFFFFFFFFFFF));
|
code.paddusb(xmm0, code.MConst(xword, sat_const[table_size], 0xFFFFFFFFFFFFFFFF));
|
||||||
}
|
}
|
||||||
code.pshufb(xmm_table0, indicies);
|
code.pshufb(xmm_table0, indicies);
|
||||||
code.pblendvb(xmm_table0, defaults);
|
code.pblendvb(xmm_table0, defaults);
|
||||||
|
@ -4496,12 +4496,12 @@ void EmitX64::EmitVectorTableLookup64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vpaddusb(xmm0, indicies, code.XmmConst(xword, 0x7070707070707070, 0xFFFFFFFFFFFFFFFF));
|
code.vpaddusb(xmm0, indicies, code.MConst(xword, 0x7070707070707070, 0xFFFFFFFFFFFFFFFF));
|
||||||
} else {
|
} else {
|
||||||
code.movaps(xmm0, indicies);
|
code.movaps(xmm0, indicies);
|
||||||
code.paddusb(xmm0, code.XmmConst(xword, 0x7070707070707070, 0xFFFFFFFFFFFFFFFF));
|
code.paddusb(xmm0, code.MConst(xword, 0x7070707070707070, 0xFFFFFFFFFFFFFFFF));
|
||||||
}
|
}
|
||||||
code.paddusb(indicies, code.XmmConst(xword, 0x6060606060606060, 0xFFFFFFFFFFFFFFFF));
|
code.paddusb(indicies, code.MConst(xword, 0x6060606060606060, 0xFFFFFFFFFFFFFFFF));
|
||||||
code.pshufb(xmm_table0, xmm0);
|
code.pshufb(xmm_table0, xmm0);
|
||||||
code.pshufb(xmm_table1, indicies);
|
code.pshufb(xmm_table1, indicies);
|
||||||
code.pblendvb(xmm_table0, xmm_table1);
|
code.pblendvb(xmm_table0, xmm_table1);
|
||||||
|
@ -4528,19 +4528,19 @@ void EmitX64::EmitVectorTableLookup64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vpaddusb(xmm0, indicies, code.XmmConst(xword, 0x7070707070707070, 0xFFFFFFFFFFFFFFFF));
|
code.vpaddusb(xmm0, indicies, code.MConst(xword, 0x7070707070707070, 0xFFFFFFFFFFFFFFFF));
|
||||||
} else {
|
} else {
|
||||||
code.movaps(xmm0, indicies);
|
code.movaps(xmm0, indicies);
|
||||||
code.paddusb(xmm0, code.XmmConst(xword, 0x7070707070707070, 0xFFFFFFFFFFFFFFFF));
|
code.paddusb(xmm0, code.MConst(xword, 0x7070707070707070, 0xFFFFFFFFFFFFFFFF));
|
||||||
}
|
}
|
||||||
code.pshufb(xmm_table0, indicies);
|
code.pshufb(xmm_table0, indicies);
|
||||||
code.pshufb(xmm_table1, indicies);
|
code.pshufb(xmm_table1, indicies);
|
||||||
code.pblendvb(xmm_table0, xmm_table1);
|
code.pblendvb(xmm_table0, xmm_table1);
|
||||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vpaddusb(xmm0, indicies, code.XmmConst(xword, sat_const[table_size], 0xFFFFFFFFFFFFFFFF));
|
code.vpaddusb(xmm0, indicies, code.MConst(xword, sat_const[table_size], 0xFFFFFFFFFFFFFFFF));
|
||||||
} else {
|
} else {
|
||||||
code.movaps(xmm0, indicies);
|
code.movaps(xmm0, indicies);
|
||||||
code.paddusb(xmm0, code.XmmConst(xword, sat_const[table_size], 0xFFFFFFFFFFFFFFFF));
|
code.paddusb(xmm0, code.MConst(xword, sat_const[table_size], 0xFFFFFFFFFFFFFFFF));
|
||||||
}
|
}
|
||||||
code.pblendvb(xmm_table0, defaults);
|
code.pblendvb(xmm_table0, defaults);
|
||||||
|
|
||||||
|
@ -4605,7 +4605,7 @@ void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm xmm_table0 = ctx.reg_alloc.UseScratchXmm(table[0]);
|
const Xbyak::Xmm xmm_table0 = ctx.reg_alloc.UseScratchXmm(table[0]);
|
||||||
const Xbyak::Xmm xmm_table1 = ctx.reg_alloc.UseScratchXmm(table[1]);
|
const Xbyak::Xmm xmm_table1 = ctx.reg_alloc.UseScratchXmm(table[1]);
|
||||||
|
|
||||||
code.vptestnmb(write_mask, indicies, code.XmmBConst<8>(xword, 0xE0));
|
code.vptestnmb(write_mask, indicies, code.MConst(xword, 0xE0E0E0E0E0E0E0E0, 0xE0E0E0E0E0E0E0E0));
|
||||||
code.vpermi2b(indicies | write_mask, xmm_table0, xmm_table1);
|
code.vpermi2b(indicies | write_mask, xmm_table0, xmm_table1);
|
||||||
|
|
||||||
ctx.reg_alloc.Release(xmm_table0);
|
ctx.reg_alloc.Release(xmm_table0);
|
||||||
|
@ -4619,7 +4619,7 @@ void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
// Handle vector-table 2,3
|
// Handle vector-table 2,3
|
||||||
// vpcmpuble
|
// vpcmpuble
|
||||||
code.vpcmpub(upper_mask, indicies, code.XmmBConst<8>(xword, 0x3F), CmpInt::LessEqual);
|
code.vpcmpub(upper_mask, indicies, code.MConst(xword, 0x3F3F3F3F3F3F3F3F, 0x3F3F3F3F3F3F3F3F), CmpInt::LessEqual);
|
||||||
code.kandnw(write_mask, write_mask, upper_mask);
|
code.kandnw(write_mask, write_mask, upper_mask);
|
||||||
|
|
||||||
const Xbyak::Xmm xmm_table2 = ctx.reg_alloc.UseScratchXmm(table[2]);
|
const Xbyak::Xmm xmm_table2 = ctx.reg_alloc.UseScratchXmm(table[2]);
|
||||||
|
@ -4639,7 +4639,7 @@ void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm xmm_table1 = ctx.reg_alloc.UseScratchXmm(table[1]);
|
const Xbyak::Xmm xmm_table1 = ctx.reg_alloc.UseScratchXmm(table[1]);
|
||||||
const Xbyak::Opmask write_mask = k1;
|
const Xbyak::Opmask write_mask = k1;
|
||||||
|
|
||||||
code.vptestnmb(write_mask, indicies, code.XmmBConst<8>(xword, 0xE0));
|
code.vptestnmb(write_mask, indicies, code.MConst(xword, 0xE0E0E0E0E0E0E0E0, 0xE0E0E0E0E0E0E0E0));
|
||||||
code.vpermi2b(indicies, xmm_table0, xmm_table1);
|
code.vpermi2b(indicies, xmm_table0, xmm_table1);
|
||||||
|
|
||||||
if (is_defaults_zero) {
|
if (is_defaults_zero) {
|
||||||
|
@ -4656,7 +4656,7 @@ void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm indicies = ctx.reg_alloc.UseScratchXmm(args[2]);
|
const Xbyak::Xmm indicies = ctx.reg_alloc.UseScratchXmm(args[2]);
|
||||||
const Xbyak::Xmm xmm_table0 = ctx.reg_alloc.UseScratchXmm(table[0]);
|
const Xbyak::Xmm xmm_table0 = ctx.reg_alloc.UseScratchXmm(table[0]);
|
||||||
|
|
||||||
code.paddusb(indicies, code.XmmBConst<8>(xword, 0x70));
|
code.paddusb(indicies, code.MConst(xword, 0x7070707070707070, 0x7070707070707070));
|
||||||
code.pshufb(xmm_table0, indicies);
|
code.pshufb(xmm_table0, indicies);
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, xmm_table0);
|
ctx.reg_alloc.DefineValue(inst, xmm_table0);
|
||||||
|
@ -4669,10 +4669,10 @@ void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm xmm_table0 = ctx.reg_alloc.UseScratchXmm(table[0]);
|
const Xbyak::Xmm xmm_table0 = ctx.reg_alloc.UseScratchXmm(table[0]);
|
||||||
|
|
||||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vpaddusb(xmm0, indicies, code.XmmBConst<8>(xword, 0x70));
|
code.vpaddusb(xmm0, indicies, code.MConst(xword, 0x7070707070707070, 0x7070707070707070));
|
||||||
} else {
|
} else {
|
||||||
code.movaps(xmm0, indicies);
|
code.movaps(xmm0, indicies);
|
||||||
code.paddusb(xmm0, code.XmmBConst<8>(xword, 0x70));
|
code.paddusb(xmm0, code.MConst(xword, 0x7070707070707070, 0x7070707070707070));
|
||||||
}
|
}
|
||||||
code.pshufb(xmm_table0, indicies);
|
code.pshufb(xmm_table0, indicies);
|
||||||
code.pblendvb(xmm_table0, defaults);
|
code.pblendvb(xmm_table0, defaults);
|
||||||
|
@ -4687,12 +4687,12 @@ void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm xmm_table1 = ctx.reg_alloc.UseScratchXmm(table[1]);
|
const Xbyak::Xmm xmm_table1 = ctx.reg_alloc.UseScratchXmm(table[1]);
|
||||||
|
|
||||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vpaddusb(xmm0, indicies, code.XmmBConst<8>(xword, 0x70));
|
code.vpaddusb(xmm0, indicies, code.MConst(xword, 0x7070707070707070, 0x7070707070707070));
|
||||||
} else {
|
} else {
|
||||||
code.movaps(xmm0, indicies);
|
code.movaps(xmm0, indicies);
|
||||||
code.paddusb(xmm0, code.XmmBConst<8>(xword, 0x70));
|
code.paddusb(xmm0, code.MConst(xword, 0x7070707070707070, 0x7070707070707070));
|
||||||
}
|
}
|
||||||
code.paddusb(indicies, code.XmmBConst<8>(xword, 0x60));
|
code.paddusb(indicies, code.MConst(xword, 0x6060606060606060, 0x6060606060606060));
|
||||||
code.pshufb(xmm_table0, xmm0);
|
code.pshufb(xmm_table0, xmm0);
|
||||||
code.pshufb(xmm_table1, indicies);
|
code.pshufb(xmm_table1, indicies);
|
||||||
code.pblendvb(xmm_table0, xmm_table1);
|
code.pblendvb(xmm_table0, xmm_table1);
|
||||||
|
@ -4706,14 +4706,14 @@ void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm masked = xmm16;
|
const Xbyak::Xmm masked = xmm16;
|
||||||
|
|
||||||
code.vpandd(masked, indicies, code.XmmBConst<8>(xword_b, 0xF0));
|
code.vpandd(masked, indicies, code.MConst(xword_b, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0));
|
||||||
|
|
||||||
for (size_t i = 0; i < table_size; ++i) {
|
for (size_t i = 0; i < table_size; ++i) {
|
||||||
const Xbyak::Xmm xmm_table = ctx.reg_alloc.UseScratchXmm(table[i]);
|
const Xbyak::Xmm xmm_table = ctx.reg_alloc.UseScratchXmm(table[i]);
|
||||||
const Xbyak::Opmask table_mask = k1;
|
const Xbyak::Opmask table_mask = k1;
|
||||||
const u8 table_index = u8(i * 16);
|
const u64 table_index = mcl::bit::replicate_element<u8, u64>(i * 16);
|
||||||
|
|
||||||
code.vpcmpeqb(table_mask, masked, code.XmmBConst<8>(xword, i * 16));
|
code.vpcmpeqb(table_mask, masked, code.MConst(xword, table_index, table_index));
|
||||||
|
|
||||||
if (table_index == 0 && is_defaults_zero) {
|
if (table_index == 0 && is_defaults_zero) {
|
||||||
code.vpshufb(result | table_mask | T_z, xmm_table, indicies);
|
code.vpshufb(result | table_mask | T_z, xmm_table, indicies);
|
||||||
|
@ -4733,21 +4733,21 @@ void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm masked = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm masked = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.movaps(masked, code.XmmBConst<8>(xword, 0xF0));
|
code.movaps(masked, code.MConst(xword, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0));
|
||||||
code.pand(masked, indicies);
|
code.pand(masked, indicies);
|
||||||
|
|
||||||
for (size_t i = 0; i < table_size; ++i) {
|
for (size_t i = 0; i < table_size; ++i) {
|
||||||
const Xbyak::Xmm xmm_table = ctx.reg_alloc.UseScratchXmm(table[i]);
|
const Xbyak::Xmm xmm_table = ctx.reg_alloc.UseScratchXmm(table[i]);
|
||||||
|
|
||||||
const u8 table_index = u8(i * 16);
|
const u64 table_index = mcl::bit::replicate_element<u8, u64>(i * 16);
|
||||||
|
|
||||||
if (table_index == 0) {
|
if (table_index == 0) {
|
||||||
code.pxor(xmm0, xmm0);
|
code.pxor(xmm0, xmm0);
|
||||||
code.pcmpeqb(xmm0, masked);
|
code.pcmpeqb(xmm0, masked);
|
||||||
} else if (code.HasHostFeature(HostFeature::AVX)) {
|
} else if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
code.vpcmpeqb(xmm0, masked, code.XmmBConst<8>(xword, table_index));
|
code.vpcmpeqb(xmm0, masked, code.MConst(xword, table_index, table_index));
|
||||||
} else {
|
} else {
|
||||||
code.movaps(xmm0, code.XmmBConst<8>(xword, table_index));
|
code.movaps(xmm0, code.MConst(xword, table_index, table_index));
|
||||||
code.pcmpeqb(xmm0, masked);
|
code.pcmpeqb(xmm0, masked);
|
||||||
}
|
}
|
||||||
code.pshufb(xmm_table, indicies);
|
code.pshufb(xmm_table, indicies);
|
||||||
|
@ -4805,11 +4805,11 @@ void EmitX64::EmitVectorTranspose8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const bool part = args[2].GetImmediateU1();
|
const bool part = args[2].GetImmediateU1();
|
||||||
|
|
||||||
if (!part) {
|
if (!part) {
|
||||||
code.pand(lower, code.XmmBConst<16>(xword, 0x00FF));
|
code.pand(lower, code.MConst(xword, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF));
|
||||||
code.psllw(upper, 8);
|
code.psllw(upper, 8);
|
||||||
} else {
|
} else {
|
||||||
code.psrlw(lower, 8);
|
code.psrlw(lower, 8);
|
||||||
code.pand(upper, code.XmmBConst<16>(xword, 0xFF00));
|
code.pand(upper, code.MConst(xword, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00));
|
||||||
}
|
}
|
||||||
code.por(lower, upper);
|
code.por(lower, upper);
|
||||||
|
|
||||||
|
@ -4824,11 +4824,11 @@ void EmitX64::EmitVectorTranspose16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const bool part = args[2].GetImmediateU1();
|
const bool part = args[2].GetImmediateU1();
|
||||||
|
|
||||||
if (!part) {
|
if (!part) {
|
||||||
code.pand(lower, code.XmmBConst<32>(xword, 0x0000FFFF));
|
code.pand(lower, code.MConst(xword, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF));
|
||||||
code.pslld(upper, 16);
|
code.pslld(upper, 16);
|
||||||
} else {
|
} else {
|
||||||
code.psrld(lower, 16);
|
code.psrld(lower, 16);
|
||||||
code.pand(upper, code.XmmBConst<32>(xword, 0xFFFF0000));
|
code.pand(upper, code.MConst(xword, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000));
|
||||||
}
|
}
|
||||||
code.por(lower, upper);
|
code.por(lower, upper);
|
||||||
|
|
||||||
|
@ -4899,7 +4899,7 @@ static void EmitVectorUnsignedAbsoluteDifference(size_t esize, EmitContext& ctx,
|
||||||
const Xbyak::Xmm x = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm x = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm y = ctx.reg_alloc.UseScratchXmm(args[1]);
|
const Xbyak::Xmm y = ctx.reg_alloc.UseScratchXmm(args[1]);
|
||||||
|
|
||||||
code.movdqa(temp, code.XmmBConst<32>(xword, 0x80000000));
|
code.movdqa(temp, code.MConst(xword, 0x8000000080000000, 0x8000000080000000));
|
||||||
code.pxor(x, temp);
|
code.pxor(x, temp);
|
||||||
code.pxor(y, temp);
|
code.pxor(y, temp);
|
||||||
code.movdqa(temp, x);
|
code.movdqa(temp, x);
|
||||||
|
|
|
@ -145,12 +145,20 @@ void HandleNaNs(BlockOfCode& code, EmitContext& ctx, bool fpcr_controlled, std::
|
||||||
|
|
||||||
template<size_t fsize>
|
template<size_t fsize>
|
||||||
Xbyak::Address GetVectorOf(BlockOfCode& code, u64 value) {
|
Xbyak::Address GetVectorOf(BlockOfCode& code, u64 value) {
|
||||||
return code.XmmBConst<fsize>(xword, value);
|
if constexpr (fsize == 32) {
|
||||||
|
return code.MConst(xword, (value << 32) | value, (value << 32) | value);
|
||||||
|
} else {
|
||||||
|
return code.MConst(xword, value, value);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<size_t fsize, u64 value>
|
template<size_t fsize, u64 value>
|
||||||
Xbyak::Address GetVectorOf(BlockOfCode& code) {
|
Xbyak::Address GetVectorOf(BlockOfCode& code) {
|
||||||
return code.XmmBConst<fsize>(xword, value);
|
if constexpr (fsize == 32) {
|
||||||
|
return code.MConst(xword, (value << 32) | value, (value << 32) | value);
|
||||||
|
} else {
|
||||||
|
return code.MConst(xword, value, value);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<size_t fsize>
|
template<size_t fsize>
|
||||||
|
@ -206,7 +214,7 @@ void ZeroIfNaN(BlockOfCode& code, Xbyak::Xmm result) {
|
||||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||||
constexpr u32 nan_to_zero = FixupLUT(FpFixup::PosZero,
|
constexpr u32 nan_to_zero = FixupLUT(FpFixup::PosZero,
|
||||||
FpFixup::PosZero);
|
FpFixup::PosZero);
|
||||||
FCODE(vfixupimmp)(result, result, code.XmmBConst<32>(ptr_b, nan_to_zero), u8(0));
|
FCODE(vfixupimmp)(result, result, code.MConst(ptr_b, u64(nan_to_zero)), u8(0));
|
||||||
} else if (code.HasHostFeature(HostFeature::AVX)) {
|
} else if (code.HasHostFeature(HostFeature::AVX)) {
|
||||||
FCODE(vcmpordp)(nan_mask, result, result);
|
FCODE(vcmpordp)(nan_mask, result, result);
|
||||||
FCODE(vandp)(result, result, nan_mask);
|
FCODE(vandp)(result, result, nan_mask);
|
||||||
|
@ -230,8 +238,9 @@ void DenormalsAreZero(BlockOfCode& code, FP::FPCR fpcr, std::initializer_list<Xb
|
||||||
FpFixup::Norm_Src,
|
FpFixup::Norm_Src,
|
||||||
FpFixup::Norm_Src,
|
FpFixup::Norm_Src,
|
||||||
FpFixup::Norm_Src);
|
FpFixup::Norm_Src);
|
||||||
|
constexpr u64 denormal_to_zero64 = mcl::bit::replicate_element<fsize, u64>(denormal_to_zero);
|
||||||
|
|
||||||
FCODE(vmovap)(tmp, code.XmmBConst<fsize>(xword, denormal_to_zero));
|
FCODE(vmovap)(tmp, code.MConst(xword, u64(denormal_to_zero64), u64(denormal_to_zero64)));
|
||||||
|
|
||||||
for (const Xbyak::Xmm& xmm : to_daz) {
|
for (const Xbyak::Xmm& xmm : to_daz) {
|
||||||
FCODE(vfixupimmp)(xmm, xmm, tmp, u8(0));
|
FCODE(vfixupimmp)(xmm, xmm, tmp, u8(0));
|
||||||
|
@ -579,11 +588,12 @@ template<size_t fsize>
|
||||||
void FPVectorAbs(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
void FPVectorAbs(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||||
constexpr FPT non_sign_mask = FP::FPInfo<FPT>::sign_mask - FPT(1u);
|
constexpr FPT non_sign_mask = FP::FPInfo<FPT>::sign_mask - FPT(1u);
|
||||||
|
constexpr u64 non_sign_mask64 = mcl::bit::replicate_element<fsize, u64>(non_sign_mask);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Address mask = code.XmmBConst<fsize>(xword, non_sign_mask);
|
const Xbyak::Address mask = code.MConst(xword, non_sign_mask64, non_sign_mask64);
|
||||||
|
|
||||||
code.andps(a, mask);
|
code.andps(a, mask);
|
||||||
|
|
||||||
|
@ -777,9 +787,9 @@ void EmitX64::EmitFPVectorFromUnsignedFixed32(EmitContext& ctx, IR::Inst* inst)
|
||||||
if (code.HasHostFeature(HostFeature::AVX512_Ortho)) {
|
if (code.HasHostFeature(HostFeature::AVX512_Ortho)) {
|
||||||
code.vcvtudq2ps(xmm, xmm);
|
code.vcvtudq2ps(xmm, xmm);
|
||||||
} else {
|
} else {
|
||||||
const Xbyak::Address mem_4B000000 = code.XmmBConst<32>(xword, 0x4B000000);
|
const Xbyak::Address mem_4B000000 = code.MConst(xword, 0x4B0000004B000000, 0x4B0000004B000000);
|
||||||
const Xbyak::Address mem_53000000 = code.XmmBConst<32>(xword, 0x53000000);
|
const Xbyak::Address mem_53000000 = code.MConst(xword, 0x5300000053000000, 0x5300000053000000);
|
||||||
const Xbyak::Address mem_D3000080 = code.XmmBConst<32>(xword, 0xD3000080);
|
const Xbyak::Address mem_D3000080 = code.MConst(xword, 0xD3000080D3000080, 0xD3000080D3000080);
|
||||||
|
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
|
@ -790,7 +800,7 @@ void EmitX64::EmitFPVectorFromUnsignedFixed32(EmitContext& ctx, IR::Inst* inst)
|
||||||
code.vaddps(xmm, xmm, mem_D3000080);
|
code.vaddps(xmm, xmm, mem_D3000080);
|
||||||
code.vaddps(xmm, tmp, xmm);
|
code.vaddps(xmm, tmp, xmm);
|
||||||
} else {
|
} else {
|
||||||
const Xbyak::Address mem_0xFFFF = code.XmmBConst<32>(xword, 0x0000FFFF);
|
const Xbyak::Address mem_0xFFFF = code.MConst(xword, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF);
|
||||||
|
|
||||||
code.movdqa(tmp, mem_0xFFFF);
|
code.movdqa(tmp, mem_0xFFFF);
|
||||||
|
|
||||||
|
@ -808,7 +818,7 @@ void EmitX64::EmitFPVectorFromUnsignedFixed32(EmitContext& ctx, IR::Inst* inst)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx.FPCR(fpcr_controlled).RMode() == FP::RoundingMode::TowardsMinusInfinity) {
|
if (ctx.FPCR(fpcr_controlled).RMode() == FP::RoundingMode::TowardsMinusInfinity) {
|
||||||
code.pand(xmm, code.XmmBConst<32>(xword, 0x7FFFFFFF));
|
code.pand(xmm, code.MConst(xword, 0x7FFFFFFF7FFFFFFF, 0x7FFFFFFF7FFFFFFF));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -827,8 +837,8 @@ void EmitX64::EmitFPVectorFromUnsignedFixed64(EmitContext& ctx, IR::Inst* inst)
|
||||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||||
code.vcvtuqq2pd(xmm, xmm);
|
code.vcvtuqq2pd(xmm, xmm);
|
||||||
} else {
|
} else {
|
||||||
const Xbyak::Address unpack = code.XmmConst(xword, 0x4530000043300000, 0);
|
const Xbyak::Address unpack = code.MConst(xword, 0x4530000043300000, 0);
|
||||||
const Xbyak::Address subtrahend = code.XmmConst(xword, 0x4330000000000000, 0x4530000000000000);
|
const Xbyak::Address subtrahend = code.MConst(xword, 0x4330000000000000, 0x4530000000000000);
|
||||||
|
|
||||||
const Xbyak::Xmm unpack_reg = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm unpack_reg = ctx.reg_alloc.ScratchXmm();
|
||||||
const Xbyak::Xmm subtrahend_reg = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm subtrahend_reg = ctx.reg_alloc.ScratchXmm();
|
||||||
|
@ -875,7 +885,7 @@ void EmitX64::EmitFPVectorFromUnsignedFixed64(EmitContext& ctx, IR::Inst* inst)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx.FPCR(fpcr_controlled).RMode() == FP::RoundingMode::TowardsMinusInfinity) {
|
if (ctx.FPCR(fpcr_controlled).RMode() == FP::RoundingMode::TowardsMinusInfinity) {
|
||||||
code.pand(xmm, code.XmmBConst<64>(xword, 0x7FFFFFFFFFFFFFFF));
|
code.pand(xmm, code.MConst(xword, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -1244,11 +1254,12 @@ template<size_t fsize>
|
||||||
void FPVectorNeg(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
void FPVectorNeg(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||||
constexpr FPT sign_mask = FP::FPInfo<FPT>::sign_mask;
|
constexpr FPT sign_mask = FP::FPInfo<FPT>::sign_mask;
|
||||||
|
constexpr u64 sign_mask64 = mcl::bit::replicate_element<fsize, u64>(sign_mask);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Address mask = code.XmmBConst<fsize>(xword, sign_mask);
|
const Xbyak::Address mask = code.MConst(xword, sign_mask64, sign_mask64);
|
||||||
|
|
||||||
code.xorps(a, mask);
|
code.xorps(a, mask);
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ enum class Op {
|
||||||
template<Op op, size_t esize>
|
template<Op op, size_t esize>
|
||||||
void EmitVectorSignedSaturated(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
void EmitVectorSignedSaturated(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
static_assert(esize == 32 || esize == 64);
|
static_assert(esize == 32 || esize == 64);
|
||||||
constexpr u64 msb_mask = esize == 32 ? 0x80000000 : 0x8000000000000000;
|
constexpr u64 msb_mask = esize == 32 ? 0x8000000080000000 : 0x8000000000000000;
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ void EmitVectorSignedSaturated(BlockOfCode& code, EmitContext& ctx, IR::Inst* in
|
||||||
code.vpmovq2m(k1, xmm0);
|
code.vpmovq2m(k1, xmm0);
|
||||||
}
|
}
|
||||||
ICODE(vpsra)(result | k1, result, u8(esize - 1));
|
ICODE(vpsra)(result | k1, result, u8(esize - 1));
|
||||||
ICODE(vpxor)(result | k1, result, code.XmmBConst<esize>(xword_b, msb_mask));
|
ICODE(vpxor)(result | k1, result, code.MConst(xword_b, msb_mask, msb_mask));
|
||||||
|
|
||||||
code.ktestb(k1, k1);
|
code.ktestb(k1, k1);
|
||||||
code.setnz(overflow);
|
code.setnz(overflow);
|
||||||
|
@ -148,10 +148,10 @@ void EmitVectorSignedSaturated(BlockOfCode& code, EmitContext& ctx, IR::Inst* in
|
||||||
if constexpr (esize == 64) {
|
if constexpr (esize == 64) {
|
||||||
code.pshufd(tmp, tmp, 0b11110101);
|
code.pshufd(tmp, tmp, 0b11110101);
|
||||||
}
|
}
|
||||||
code.pxor(tmp, code.XmmBConst<esize>(xword, msb_mask));
|
code.pxor(tmp, code.MConst(xword, msb_mask, msb_mask));
|
||||||
|
|
||||||
if (code.HasHostFeature(HostFeature::SSE41)) {
|
if (code.HasHostFeature(HostFeature::SSE41)) {
|
||||||
code.ptest(xmm0, code.XmmBConst<esize>(xword, msb_mask));
|
code.ptest(xmm0, code.MConst(xword, msb_mask, msb_mask));
|
||||||
} else {
|
} else {
|
||||||
FCODE(movmskp)(overflow.cvt32(), xmm0);
|
FCODE(movmskp)(overflow.cvt32(), xmm0);
|
||||||
code.test(overflow.cvt32(), overflow.cvt32());
|
code.test(overflow.cvt32(), overflow.cvt32());
|
||||||
|
|
|
@ -541,7 +541,7 @@ HostLoc RegAlloc::LoadImmediate(IR::Value imm, HostLoc host_loc) {
|
||||||
if (imm_value == 0) {
|
if (imm_value == 0) {
|
||||||
MAYBE_AVX(xorps, reg, reg);
|
MAYBE_AVX(xorps, reg, reg);
|
||||||
} else {
|
} else {
|
||||||
MAYBE_AVX(movaps, reg, code.XmmBConst<64>(code.xword, imm_value));
|
MAYBE_AVX(movaps, reg, code.MConst(code.xword, imm_value));
|
||||||
}
|
}
|
||||||
return host_loc;
|
return host_loc;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue