Merge pull request #458 from lioncash/float-op

A64: Handle half-precision floating point in FABS, FNEG, and scalar FMOV
This commit is contained in:
Merry 2019-03-24 11:23:21 +00:00 committed by MerryMage
commit 01bb1cdd88
5 changed files with 56 additions and 21 deletions

View file

@ -38,6 +38,9 @@ namespace {
const Xbyak::Reg64 INVALID_REG = Xbyak::Reg64(-1);
constexpr u64 f16_negative_zero = 0x8000;
constexpr u64 f16_non_sign_mask = 0x7fff;
constexpr u64 f32_negative_zero = 0x80000000u;
constexpr u64 f32_nan = 0x7fc00000u;
constexpr u64 f32_non_sign_mask = 0x7fffffffu;
@ -323,9 +326,18 @@ void FPThreeOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Function fn)
} // anonymous namespace
void EmitX64::EmitFPAbs16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
code.pand(result, code.MConst(xword, f16_non_sign_mask));
ctx.reg_alloc.DefineValue(inst, result);
}
void EmitX64::EmitFPAbs32(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
code.pand(result, code.MConst(xword, f32_non_sign_mask));
@ -334,16 +346,25 @@ void EmitX64::EmitFPAbs32(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitFPAbs64(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
code.pand(result, code.MConst(xword, f64_non_sign_mask));
ctx.reg_alloc.DefineValue(inst, result);
}
void EmitX64::EmitFPNeg16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
code.pxor(result, code.MConst(xword, f16_negative_zero));
ctx.reg_alloc.DefineValue(inst, result);
}
void EmitX64::EmitFPNeg32(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
code.pxor(result, code.MConst(xword, f32_negative_zero));
@ -352,7 +373,7 @@ void EmitX64::EmitFPNeg32(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitFPNeg64(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
code.pxor(result, code.MConst(xword, f64_negative_zero));

View file

@ -12,36 +12,36 @@ namespace Dynarmic::A64 {
bool TranslatorVisitor::FMOV_float(Imm<2> type, Vec Vn, Vec Vd) {
const auto datasize = FPGetDataSize(type);
if (!datasize || *datasize == 16) {
if (!datasize) {
return UnallocatedEncoding();
}
const IR::U128 operand = V(*datasize, Vn);
const IR::U16U32U64 operand = V_scalar(*datasize, Vn);
V(*datasize, Vd, operand);
V_scalar(*datasize, Vd, operand);
return true;
}
bool TranslatorVisitor::FABS_float(Imm<2> type, Vec Vn, Vec Vd) {
const auto datasize = FPGetDataSize(type);
if (!datasize || *datasize == 16) {
if (!datasize) {
return UnallocatedEncoding();
}
const IR::U32U64 operand = V_scalar(*datasize, Vn);
const IR::U32U64 result = ir.FPAbs(operand);
const IR::U16U32U64 operand = V_scalar(*datasize, Vn);
const IR::U16U32U64 result = ir.FPAbs(operand);
V_scalar(*datasize, Vd, result);
return true;
}
bool TranslatorVisitor::FNEG_float(Imm<2> type, Vec Vn, Vec Vd) {
const auto datasize = FPGetDataSize(type);
if (!datasize || *datasize == 16) {
if (!datasize) {
return UnallocatedEncoding();
}
const IR::U32U64 operand = V_scalar(*datasize, Vn);
const IR::U32U64 result = ir.FPNeg(operand);
const IR::U16U32U64 operand = V_scalar(*datasize, Vn);
const IR::U16U32U64 result = ir.FPNeg(operand);
V_scalar(*datasize, Vd, result);
return true;
}

View file

@ -1773,11 +1773,17 @@ U128 IREmitter::ZeroVector() {
return Inst<U128>(Opcode::ZeroVector);
}
U32U64 IREmitter::FPAbs(const U32U64& a) {
if (a.GetType() == Type::U32) {
U16U32U64 IREmitter::FPAbs(const U16U32U64& a) {
switch (a.GetType()) {
case Type::U16:
return Inst<U16>(Opcode::FPAbs16, a);
case Type::U32:
return Inst<U32>(Opcode::FPAbs32, a);
} else {
case Type::U64:
return Inst<U64>(Opcode::FPAbs64, a);
default:
UNREACHABLE();
return U16U32U64{};
}
}
@ -1880,11 +1886,17 @@ U32U64 IREmitter::FPMulX(const U32U64& a, const U32U64& b) {
}
}
U32U64 IREmitter::FPNeg(const U32U64& a) {
if (a.GetType() == Type::U32) {
U16U32U64 IREmitter::FPNeg(const U16U32U64& a) {
switch (a.GetType()) {
case Type::U16:
return Inst<U16>(Opcode::FPNeg16, a);
case Type::U32:
return Inst<U32>(Opcode::FPNeg32, a);
} else {
case Type::U64:
return Inst<U64>(Opcode::FPNeg64, a);
default:
UNREACHABLE();
return U16U32U64{};
}
}

View file

@ -292,7 +292,7 @@ public:
U128 VectorZeroUpper(const U128& a);
U128 ZeroVector();
U32U64 FPAbs(const U32U64& a);
U16U32U64 FPAbs(const U16U32U64& a);
U32U64 FPAdd(const U32U64& a, const U32U64& b, bool fpcr_controlled);
NZCV FPCompare(const U32U64& a, const U32U64& b, bool exc_on_qnan, bool fpcr_controlled);
U32U64 FPDiv(const U32U64& a, const U32U64& b, bool fpcr_controlled);
@ -303,7 +303,7 @@ public:
U32U64 FPMul(const U32U64& a, const U32U64& b, bool fpcr_controlled);
U32U64 FPMulAdd(const U32U64& addend, const U32U64& op1, const U32U64& op2, bool fpcr_controlled);
U32U64 FPMulX(const U32U64& a, const U32U64& b);
U32U64 FPNeg(const U32U64& a);
U16U32U64 FPNeg(const U16U32U64& a);
U32U64 FPRecipEstimate(const U32U64& a);
U16U32U64 FPRecipExponent(const U16U32U64& a);
U32U64 FPRecipStepFused(const U32U64& a, const U32U64& b);

View file

@ -460,6 +460,7 @@ OPCODE(VectorZeroUpper, U128, U128
OPCODE(ZeroVector, U128, )
// Floating-point operations
OPCODE(FPAbs16, U16, U16 )
OPCODE(FPAbs32, U32, U32 )
OPCODE(FPAbs64, U64, U64 )
OPCODE(FPAdd32, U32, U32, U32 )
@ -482,6 +483,7 @@ OPCODE(FPMulAdd32, U32, U32,
OPCODE(FPMulAdd64, U64, U64, U64, U64 )
OPCODE(FPMulX32, U32, U32, U32 )
OPCODE(FPMulX64, U64, U64, U64 )
OPCODE(FPNeg16, U16, U16 )
OPCODE(FPNeg32, U32, U32 )
OPCODE(FPNeg64, U64, U64 )
OPCODE(FPRecipEstimate32, U32, U32 )