frontend/ir_emitter: Add half-precision variant of FPNeg
This commit is contained in:
parent
f01afc5ae6
commit
c97efcb978
4 changed files with 24 additions and 6 deletions
|
@ -38,6 +38,8 @@ namespace {
|
||||||
|
|
||||||
const Xbyak::Reg64 INVALID_REG = Xbyak::Reg64(-1);
|
const Xbyak::Reg64 INVALID_REG = Xbyak::Reg64(-1);
|
||||||
|
|
||||||
|
constexpr u64 f16_negative_zero = 0x8000;
|
||||||
|
|
||||||
constexpr u64 f32_negative_zero = 0x80000000u;
|
constexpr u64 f32_negative_zero = 0x80000000u;
|
||||||
constexpr u64 f32_nan = 0x7fc00000u;
|
constexpr u64 f32_nan = 0x7fc00000u;
|
||||||
constexpr u64 f32_non_sign_mask = 0x7fffffffu;
|
constexpr u64 f32_non_sign_mask = 0x7fffffffu;
|
||||||
|
@ -341,9 +343,18 @@ void EmitX64::EmitFPAbs64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void EmitX64::EmitFPNeg16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
|
|
||||||
|
code.pxor(result, code.MConst(xword, f16_negative_zero));
|
||||||
|
|
||||||
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
|
}
|
||||||
|
|
||||||
void EmitX64::EmitFPNeg32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitFPNeg32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
|
|
||||||
code.pxor(result, code.MConst(xword, f32_negative_zero));
|
code.pxor(result, code.MConst(xword, f32_negative_zero));
|
||||||
|
|
||||||
|
@ -352,7 +363,7 @@ void EmitX64::EmitFPNeg32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
void EmitX64::EmitFPNeg64(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitFPNeg64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
|
|
||||||
code.pxor(result, code.MConst(xword, f64_negative_zero));
|
code.pxor(result, code.MConst(xword, f64_negative_zero));
|
||||||
|
|
||||||
|
|
|
@ -1880,11 +1880,17 @@ U32U64 IREmitter::FPMulX(const U32U64& a, const U32U64& b) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
U32U64 IREmitter::FPNeg(const U32U64& a) {
|
U16U32U64 IREmitter::FPNeg(const U16U32U64& a) {
|
||||||
if (a.GetType() == Type::U32) {
|
switch (a.GetType()) {
|
||||||
|
case Type::U16:
|
||||||
|
return Inst<U16>(Opcode::FPNeg16, a);
|
||||||
|
case Type::U32:
|
||||||
return Inst<U32>(Opcode::FPNeg32, a);
|
return Inst<U32>(Opcode::FPNeg32, a);
|
||||||
} else {
|
case Type::U64:
|
||||||
return Inst<U64>(Opcode::FPNeg64, a);
|
return Inst<U64>(Opcode::FPNeg64, a);
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
return U16U32U64{};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -303,7 +303,7 @@ public:
|
||||||
U32U64 FPMul(const U32U64& a, const U32U64& b, bool fpcr_controlled);
|
U32U64 FPMul(const U32U64& a, const U32U64& b, bool fpcr_controlled);
|
||||||
U32U64 FPMulAdd(const U32U64& addend, const U32U64& op1, const U32U64& op2, bool fpcr_controlled);
|
U32U64 FPMulAdd(const U32U64& addend, const U32U64& op1, const U32U64& op2, bool fpcr_controlled);
|
||||||
U32U64 FPMulX(const U32U64& a, const U32U64& b);
|
U32U64 FPMulX(const U32U64& a, const U32U64& b);
|
||||||
U32U64 FPNeg(const U32U64& a);
|
U16U32U64 FPNeg(const U16U32U64& a);
|
||||||
U32U64 FPRecipEstimate(const U32U64& a);
|
U32U64 FPRecipEstimate(const U32U64& a);
|
||||||
U16U32U64 FPRecipExponent(const U16U32U64& a);
|
U16U32U64 FPRecipExponent(const U16U32U64& a);
|
||||||
U32U64 FPRecipStepFused(const U32U64& a, const U32U64& b);
|
U32U64 FPRecipStepFused(const U32U64& a, const U32U64& b);
|
||||||
|
|
|
@ -482,6 +482,7 @@ OPCODE(FPMulAdd32, U32, U32,
|
||||||
OPCODE(FPMulAdd64, U64, U64, U64, U64 )
|
OPCODE(FPMulAdd64, U64, U64, U64, U64 )
|
||||||
OPCODE(FPMulX32, U32, U32, U32 )
|
OPCODE(FPMulX32, U32, U32, U32 )
|
||||||
OPCODE(FPMulX64, U64, U64, U64 )
|
OPCODE(FPMulX64, U64, U64, U64 )
|
||||||
|
OPCODE(FPNeg16, U16, U16 )
|
||||||
OPCODE(FPNeg32, U32, U32 )
|
OPCODE(FPNeg32, U32, U32 )
|
||||||
OPCODE(FPNeg64, U64, U64 )
|
OPCODE(FPNeg64, U64, U64 )
|
||||||
OPCODE(FPRecipEstimate32, U32, U32 )
|
OPCODE(FPRecipEstimate32, U32, U32 )
|
||||||
|
|
Loading…
Reference in a new issue