VFP: Implement VMOV (all variants)
This commit is contained in:
parent
aba705f6b9
commit
a2c2db277b
11 changed files with 301 additions and 27 deletions
|
@ -1168,6 +1168,34 @@ static void FPTwoOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block,
|
|||
}
|
||||
}
|
||||
|
||||
void EmitX64::EmitTransferFromFP32(IR::Block& block, IR::Inst* inst) {
|
||||
X64Reg result = reg_alloc.DefRegister(inst, any_gpr);
|
||||
X64Reg source = reg_alloc.UseRegister(inst->GetArg(0), any_xmm);
|
||||
// TODO: Eliminate this.
|
||||
code->MOVD_xmm(R(result), source);
|
||||
}
|
||||
|
||||
void EmitX64::EmitTransferFromFP64(IR::Block& block, IR::Inst* inst) {
|
||||
X64Reg result = reg_alloc.DefRegister(inst, any_gpr);
|
||||
X64Reg source = reg_alloc.UseRegister(inst->GetArg(0), any_xmm);
|
||||
// TODO: Eliminate this.
|
||||
code->MOVQ_xmm(R(result), source);
|
||||
}
|
||||
|
||||
void EmitX64::EmitTransferToFP32(IR::Block& block, IR::Inst* inst) {
|
||||
X64Reg result = reg_alloc.DefRegister(inst, any_xmm);
|
||||
X64Reg source = reg_alloc.UseRegister(inst->GetArg(0), any_gpr);
|
||||
// TODO: Eliminate this.
|
||||
code->MOVD_xmm(result, R(source));
|
||||
}
|
||||
|
||||
void EmitX64::EmitTransferToFP64(IR::Block& block, IR::Inst* inst) {
|
||||
X64Reg result = reg_alloc.DefRegister(inst, any_xmm);
|
||||
X64Reg source = reg_alloc.UseRegister(inst->GetArg(0), any_gpr);
|
||||
// TODO: Eliminate this.
|
||||
code->MOVQ_xmm(result, R(source));
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPAbs32(IR::Block&, IR::Inst* inst) {
|
||||
IR::Value a = inst->GetArg(0);
|
||||
|
||||
|
|
|
@ -243,10 +243,14 @@ Gen::X64Reg RegAlloc::UseScratchRegister(IR::Inst* use_inst, HostLocList desired
|
|||
DEBUG_ASSERT(LocInfo(new_location).IsScratch());
|
||||
return HostLocToX64(new_location);
|
||||
} else if (HostLocIsRegister(current_location)) {
|
||||
ASSERT(LocInfo(current_location).IsIdle());
|
||||
ASSERT(LocInfo(current_location).IsIdle()
|
||||
|| LocInfo(current_location).IsUse()
|
||||
|| LocInfo(current_location).IsUseDef());
|
||||
|
||||
if (current_location != new_location) {
|
||||
EmitMove(new_location, current_location);
|
||||
} else {
|
||||
ASSERT(LocInfo(current_location).IsIdle());
|
||||
}
|
||||
|
||||
LocInfo(new_location).is_being_used = true;
|
||||
|
|
|
@ -7,10 +7,10 @@
|
|||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cassert>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
|
||||
#include "common/ASSERT.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
|
@ -37,10 +37,10 @@ enum class Reg {
|
|||
};
|
||||
|
||||
inline Reg operator+(Reg reg, int number) {
|
||||
assert(reg != Reg::INVALID_REG);
|
||||
ASSERT(reg != Reg::INVALID_REG);
|
||||
|
||||
int new_reg = static_cast<int>(reg) + number;
|
||||
assert(new_reg >= 0 && new_reg <= 15);
|
||||
ASSERT(new_reg >= 0 && new_reg <= 15);
|
||||
|
||||
return static_cast<Reg>(new_reg);
|
||||
}
|
||||
|
@ -63,6 +63,15 @@ enum class ExtReg {
|
|||
D24, D25, D26, D27, D28, D29, D30, D31,
|
||||
};
|
||||
|
||||
inline ExtReg operator+(ExtReg reg, int number) {
|
||||
ExtReg new_reg = static_cast<ExtReg>(static_cast<int>(reg) + number);
|
||||
|
||||
ASSERT((reg >= ExtReg::S0 && reg <= ExtReg::S31 && new_reg >= ExtReg::S0 && new_reg <= ExtReg::S31)
|
||||
|| (reg >= ExtReg::D0 && reg <= ExtReg::D31 && new_reg >= ExtReg::D0 && new_reg <= ExtReg::D31));
|
||||
|
||||
return new_reg;
|
||||
}
|
||||
|
||||
using Imm3 = u32;
|
||||
using Imm4 = u32;
|
||||
using Imm5 = u32;
|
||||
|
|
|
@ -64,22 +64,31 @@ boost::optional<const VFP2Matcher<V>&> DecodeVFP2(u32 instruction) {
|
|||
// cccc1110________----101-__-0----
|
||||
|
||||
// Floating-point three-register data processing instructions
|
||||
INST(&V::vfp2_VMLA, "VMLA", "cccc11100D00nnnndddd101zN0M0mmmm"),
|
||||
INST(&V::vfp2_VMLS, "VMLS", "cccc11100D00nnnndddd101zN1M0mmmm"),
|
||||
INST(&V::vfp2_VNMLS, "VNMLS", "cccc11100D01nnnndddd101zN0M0mmmm"),
|
||||
INST(&V::vfp2_VNMLA, "VNMLA", "cccc11100D01nnnndddd101zN1M0mmmm"),
|
||||
INST(&V::vfp2_VMUL, "VMUL", "cccc11100D10nnnndddd101zN0M0mmmm"),
|
||||
INST(&V::vfp2_VNMUL, "VNMUL", "cccc11100D10nnnndddd101zN1M0mmmm"),
|
||||
INST(&V::vfp2_VADD, "VADD", "cccc11100D11nnnndddd101zN0M0mmmm"),
|
||||
INST(&V::vfp2_VSUB, "VSUB", "cccc11100D11nnnndddd101zN1M0mmmm"),
|
||||
INST(&V::vfp2_VDIV, "VDIV", "cccc11101D00nnnndddd101zN0M0mmmm"),
|
||||
INST(&V::vfp2_VMLA, "VMLA", "cccc11100D00nnnndddd101zN0M0mmmm"),
|
||||
INST(&V::vfp2_VMLS, "VMLS", "cccc11100D00nnnndddd101zN1M0mmmm"),
|
||||
INST(&V::vfp2_VNMLS, "VNMLS", "cccc11100D01nnnndddd101zN0M0mmmm"),
|
||||
INST(&V::vfp2_VNMLA, "VNMLA", "cccc11100D01nnnndddd101zN1M0mmmm"),
|
||||
INST(&V::vfp2_VMUL, "VMUL", "cccc11100D10nnnndddd101zN0M0mmmm"),
|
||||
INST(&V::vfp2_VNMUL, "VNMUL", "cccc11100D10nnnndddd101zN1M0mmmm"),
|
||||
INST(&V::vfp2_VADD, "VADD", "cccc11100D11nnnndddd101zN0M0mmmm"),
|
||||
INST(&V::vfp2_VSUB, "VSUB", "cccc11100D11nnnndddd101zN1M0mmmm"),
|
||||
INST(&V::vfp2_VDIV, "VDIV", "cccc11101D00nnnndddd101zN0M0mmmm"),
|
||||
|
||||
// Floating-point move instructions
|
||||
INST(&V::vfp2_VMOV_u32_f64, "VMOV (core to f64)", "cccc11100000ddddtttt1011D0010000"),
|
||||
INST(&V::vfp2_VMOV_f64_u32, "VMOV (f64 to core)", "cccc11100001nnnntttt1011N0010000"),
|
||||
INST(&V::vfp2_VMOV_u32_f32, "VMOV (core to f32)", "cccc11100000nnnntttt1010N0010000"),
|
||||
INST(&V::vfp2_VMOV_f32_u32, "VMOV (f32 to core)", "cccc11100001nnnntttt1010N0010000"),
|
||||
INST(&V::vfp2_VMOV_2u32_2f32, "VMOV (2xcore to 2xf32)", "cccc11000100uuuutttt101000M1mmmm"),
|
||||
INST(&V::vfp2_VMOV_2f32_2u32, "VMOV (2xf32 to 2xcore)", "cccc11000101uuuutttt101000M1mmmm"),
|
||||
INST(&V::vfp2_VMOV_2u32_f64, "VMOV (2xcore to f64)", "cccc11000100uuuutttt101100M1mmmm"),
|
||||
INST(&V::vfp2_VMOV_f64_2u32, "VMOV (f64 to 2xcore)", "cccc11000101uuuutttt101100M1mmmm"),
|
||||
INST(&V::vfp2_VMOV_reg, "VMOV (reg)", "cccc11101D110000dddd101z01M0mmmm"),
|
||||
|
||||
// Floating-point other instructions
|
||||
// VMOV_imm
|
||||
// VMOV_reg
|
||||
INST(&V::vfp2_VABS, "VABS", "cccc11101D110000dddd101z11M0mmmm"),
|
||||
INST(&V::vfp2_VNEG, "VNEG", "cccc11101D110001dddd101z01M0mmmm"),
|
||||
INST(&V::vfp2_VSQRT, "VSQRT", "cccc11101D110001dddd101z11M0mmmm"),
|
||||
INST(&V::vfp2_VABS, "VABS", "cccc11101D110000dddd101z11M0mmmm"),
|
||||
INST(&V::vfp2_VNEG, "VNEG", "cccc11101D110001dddd101z01M0mmmm"),
|
||||
INST(&V::vfp2_VSQRT, "VSQRT", "cccc11101D110001dddd101z11M0mmmm"),
|
||||
// VCMP
|
||||
// VCMPE
|
||||
// VCVT
|
||||
|
|
|
@ -92,6 +92,16 @@ public:
|
|||
return Common::StringFromFormat("%c%zu", dp_operation ? 'd' : 's', reg_num);
|
||||
}
|
||||
|
||||
std::string FPNextRegStr(bool dp_operation, size_t base, bool bit) {
|
||||
size_t reg_num;
|
||||
if (dp_operation) {
|
||||
reg_num = base + (bit ? 16 : 0);
|
||||
} else {
|
||||
reg_num = (base << 1) + (bit ? 1 : 0);
|
||||
}
|
||||
return Common::StringFromFormat("%c%zu", dp_operation ? 'd' : 's', reg_num + 1);
|
||||
}
|
||||
|
||||
// Branch instructions
|
||||
std::string arm_B(Cond cond, Imm24 imm24) {
|
||||
s32 offset = Common::SignExtend<26, s32>(imm24 << 2) + 8;
|
||||
|
@ -596,6 +606,42 @@ public:
|
|||
return Common::StringFromFormat("vdiv%s.%s %s, %s, %s", CondToString(cond), sz ? "f64" : "f32", FPRegStr(sz, Vd, D).c_str(), FPRegStr(sz, Vn, N).c_str(), FPRegStr(sz, Vm, M).c_str());
|
||||
}
|
||||
|
||||
std::string vfp2_VMOV_u32_f64(Cond cond, size_t Vd, Reg t, bool D){
|
||||
return Common::StringFromFormat("vmov%s.32 %s, %s", CondToString(cond), FPRegStr(true, Vd, D).c_str(), RegToString(t));
|
||||
}
|
||||
|
||||
std::string vfp2_VMOV_f64_u32(Cond cond, size_t Vn, Reg t, bool N){
|
||||
return Common::StringFromFormat("vmov%s.32 %s, %s", CondToString(cond), RegToString(t), FPRegStr(true, Vn, N).c_str());
|
||||
}
|
||||
|
||||
std::string vfp2_VMOV_u32_f32(Cond cond, size_t Vn, Reg t, bool N){
|
||||
return Common::StringFromFormat("vmov%s.32 %s, %s", CondToString(cond), FPRegStr(false, Vn, N).c_str(), RegToString(t));
|
||||
}
|
||||
|
||||
std::string vfp2_VMOV_f32_u32(Cond cond, size_t Vn, Reg t, bool N){
|
||||
return Common::StringFromFormat("vmov%s.32 %s, %s", CondToString(cond), RegToString(t), FPRegStr(false, Vn, N).c_str());
|
||||
}
|
||||
|
||||
std::string vfp2_VMOV_2u32_2f32(Cond cond, Reg t2, Reg t, bool M, size_t Vm){
|
||||
return Common::StringFromFormat("vmov%s %s, %s, %s, %s", CondToString(cond), FPRegStr(false, Vm, M).c_str(), FPNextRegStr(false, Vm, M).c_str(), RegToString(t), RegToString(t2));
|
||||
}
|
||||
|
||||
std::string vfp2_VMOV_2f32_2u32(Cond cond, Reg t2, Reg t, bool M, size_t Vm){
|
||||
return Common::StringFromFormat("vmov%s %s, %s, %s, %s", CondToString(cond), RegToString(t), RegToString(t2), FPRegStr(false, Vm, M).c_str(), FPNextRegStr(false, Vm, M).c_str());
|
||||
}
|
||||
|
||||
std::string vfp2_VMOV_2u32_f64(Cond cond, Reg t2, Reg t, bool M, size_t Vm){
|
||||
return Common::StringFromFormat("vmov%s %s, %s, %s", CondToString(cond), FPRegStr(true, Vm, M).c_str(), RegToString(t), RegToString(t2));
|
||||
}
|
||||
|
||||
std::string vfp2_VMOV_f64_2u32(Cond cond, Reg t2, Reg t, bool M, size_t Vm){
|
||||
return Common::StringFromFormat("vmov%s %s, %s, %s", CondToString(cond), RegToString(t), RegToString(t2), FPRegStr(true, Vm, M).c_str());
|
||||
}
|
||||
|
||||
std::string vfp2_VMOV_reg(Cond cond, bool D, size_t Vd, bool sz, bool M, size_t Vm){
|
||||
return Common::StringFromFormat("vmov%s.%s %s, %s", CondToString(cond), sz ? "f64" : "f32", FPRegStr(sz, Vd, D).c_str(), FPRegStr(sz, Vm, M).c_str());
|
||||
}
|
||||
|
||||
std::string vfp2_VABS(Cond cond, bool D, size_t Vd, bool sz, bool M, size_t Vm) {
|
||||
return Common::StringFromFormat("vadd%s.%s %s, %s", CondToString(cond), sz ? "f64" : "f32", FPRegStr(sz, Vd, D).c_str(), FPRegStr(sz, Vm, M).c_str());
|
||||
}
|
||||
|
|
|
@ -274,6 +274,22 @@ IR::Value IREmitter::ByteReverseDual(const IR::Value& a) {
|
|||
return Inst(IR::Opcode::ByteReverseDual, {a});
|
||||
}
|
||||
|
||||
IR::Value IREmitter::TransferToFP32(const IR::Value& a) {
|
||||
return Inst(IR::Opcode::TransferToFP32, {a});
|
||||
}
|
||||
|
||||
IR::Value IREmitter::TransferToFP64(const IR::Value& a) {
|
||||
return Inst(IR::Opcode::TransferToFP64, {a});
|
||||
}
|
||||
|
||||
IR::Value IREmitter::TransferFromFP32(const IR::Value& a) {
|
||||
return Inst(IR::Opcode::TransferFromFP32, {a});
|
||||
}
|
||||
|
||||
IR::Value IREmitter::TransferFromFP64(const IR::Value& a) {
|
||||
return Inst(IR::Opcode::TransferFromFP64, {a});
|
||||
}
|
||||
|
||||
IR::Value IREmitter::FPAbs32(const IR::Value& a) {
|
||||
return Inst(IR::Opcode::FPAbs32, {a});
|
||||
}
|
||||
|
|
|
@ -92,6 +92,10 @@ public:
|
|||
IR::Value ByteReverseHalf(const IR::Value& a);
|
||||
IR::Value ByteReverseDual(const IR::Value& a);
|
||||
|
||||
IR::Value TransferToFP32(const IR::Value& a);
|
||||
IR::Value TransferToFP64(const IR::Value& a);
|
||||
IR::Value TransferFromFP32(const IR::Value& a);
|
||||
IR::Value TransferFromFP64(const IR::Value& a);
|
||||
IR::Value FPAbs32(const IR::Value& a);
|
||||
IR::Value FPAbs64(const IR::Value& a);
|
||||
IR::Value FPAdd32(const IR::Value& a, const IR::Value& b, bool fpscr_controlled);
|
||||
|
|
|
@ -60,6 +60,10 @@ OPCODE(ByteReverseHalf, T::U16, T::U16
|
|||
OPCODE(ByteReverseDual, T::U64, T::U64 )
|
||||
|
||||
// Floating-point
|
||||
OPCODE(TransferToFP32, T::F32, T::U32 )
|
||||
OPCODE(TransferToFP64, T::F64, T::U64 )
|
||||
OPCODE(TransferFromFP32, T::U32, T::F32 )
|
||||
OPCODE(TransferFromFP64, T::U64, T::F64 )
|
||||
OPCODE(FPAbs32, T::F32, T::F32 )
|
||||
OPCODE(FPAbs64, T::F64, T::F64 )
|
||||
OPCODE(FPAdd32, T::F32, T::F32, T::F32 )
|
||||
|
|
|
@ -329,6 +329,17 @@ struct ArmTranslatorVisitor final {
|
|||
bool vfp2_VNMLS(Cond cond, bool D, size_t Vn, size_t Vd, bool sz, bool N, bool M, size_t Vm);
|
||||
bool vfp2_VDIV(Cond cond, bool D, size_t Vn, size_t Vd, bool sz, bool N, bool M, size_t Vm);
|
||||
|
||||
// Floating-point move instructions
|
||||
bool vfp2_VMOV_u32_f64(Cond cond, size_t Vd, Reg t, bool D);
|
||||
bool vfp2_VMOV_f64_u32(Cond cond, size_t Vn, Reg t, bool N);
|
||||
bool vfp2_VMOV_u32_f32(Cond cond, size_t Vn, Reg t, bool N);
|
||||
bool vfp2_VMOV_f32_u32(Cond cond, size_t Vn, Reg t, bool N);
|
||||
bool vfp2_VMOV_2u32_2f32(Cond cond, Reg t2, Reg t, bool M, size_t Vm);
|
||||
bool vfp2_VMOV_2f32_2u32(Cond cond, Reg t2, Reg t, bool M, size_t Vm);
|
||||
bool vfp2_VMOV_2u32_f64(Cond cond, Reg t2, Reg t, bool M, size_t Vm);
|
||||
bool vfp2_VMOV_f64_2u32(Cond cond, Reg t2, Reg t, bool M, size_t Vm);
|
||||
bool vfp2_VMOV_reg(Cond cond, bool D, size_t Vd, bool sz, bool M, size_t Vm);
|
||||
|
||||
// Floating-point misc instructions
|
||||
bool vfp2_VABS(Cond cond, bool D, size_t Vd, bool sz, bool M, size_t Vm);
|
||||
bool vfp2_VNEG(Cond cond, bool D, size_t Vd, bool sz, bool M, size_t Vm);
|
||||
|
|
|
@ -192,6 +192,123 @@ bool ArmTranslatorVisitor::vfp2_VDIV(Cond cond, bool D, size_t Vn, size_t Vd, bo
|
|||
return true;
|
||||
}
|
||||
|
||||
bool ArmTranslatorVisitor::vfp2_VMOV_u32_f64(Cond cond, size_t Vd, Reg t, bool D) {
|
||||
ExtReg d = ToExtReg(true, Vd, D);
|
||||
if (t == Reg::PC)
|
||||
return UnpredictableInstruction();
|
||||
// VMOV.32 <Dd[0]>, <Rt>
|
||||
if (ConditionPassed(cond)) {
|
||||
auto d_f64 = ir.GetExtendedRegister(d);
|
||||
auto t_u32 = ir.GetRegister(t);
|
||||
|
||||
auto d_u64 = ir.TransferFromFP64(d_f64);
|
||||
auto result = ir.Pack2x32To1x64(t_u32, ir.MostSignificantWord(d_u64).result);
|
||||
|
||||
ir.SetExtendedRegister(d, ir.TransferToFP64(result));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ArmTranslatorVisitor::vfp2_VMOV_f64_u32(Cond cond, size_t Vn, Reg t, bool N) {
|
||||
ExtReg n = ToExtReg(true, Vn, N);
|
||||
if (t == Reg::PC)
|
||||
return UnpredictableInstruction();
|
||||
// VMOV.32 <Rt>, <Dn[0]>
|
||||
if (ConditionPassed(cond)) {
|
||||
auto n_f64 = ir.GetExtendedRegister(n);
|
||||
ir.SetRegister(t, ir.LeastSignificantWord(ir.TransferFromFP64(n_f64)));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ArmTranslatorVisitor::vfp2_VMOV_u32_f32(Cond cond, size_t Vn, Reg t, bool N) {
|
||||
ExtReg n = ToExtReg(false, Vn, N);
|
||||
if (t == Reg::PC)
|
||||
return UnpredictableInstruction();
|
||||
// VMOV <Sn>, <Rt>
|
||||
if (ConditionPassed(cond)) {
|
||||
ir.SetExtendedRegister(n, ir.TransferToFP32(ir.GetRegister(t)));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ArmTranslatorVisitor::vfp2_VMOV_f32_u32(Cond cond, size_t Vn, Reg t, bool N) {
|
||||
ExtReg n = ToExtReg(false, Vn, N);
|
||||
if (t == Reg::PC)
|
||||
return UnpredictableInstruction();
|
||||
// VMOV <Rt>, <Sn>
|
||||
if (ConditionPassed(cond)) {
|
||||
ir.SetRegister(t, ir.TransferFromFP32(ir.GetExtendedRegister(n)));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ArmTranslatorVisitor::vfp2_VMOV_2u32_2f32(Cond cond, Reg t2, Reg t, bool M, size_t Vm) {
|
||||
ExtReg m = ToExtReg(false, Vm, M);
|
||||
if (t == Reg::PC || t2 == Reg::PC || m == ExtReg::S31)
|
||||
return UnpredictableInstruction();
|
||||
// VMOV <Sm>, <Sm1>, <Rt>, <Rt2>
|
||||
if (ConditionPassed(cond)) {
|
||||
ir.SetExtendedRegister(m, ir.TransferToFP32(ir.GetRegister(t)));
|
||||
ir.SetExtendedRegister(m+1, ir.TransferToFP32(ir.GetRegister(t2)));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ArmTranslatorVisitor::vfp2_VMOV_2f32_2u32(Cond cond, Reg t2, Reg t, bool M, size_t Vm) {
|
||||
ExtReg m = ToExtReg(false, Vm, M);
|
||||
if (t == Reg::PC || t2 == Reg::PC || m == ExtReg::S31)
|
||||
return UnpredictableInstruction();
|
||||
if (t == t2)
|
||||
return UnpredictableInstruction();
|
||||
// VMOV <Rt>, <Rt2>, <Sm>, <Sm1>
|
||||
if (ConditionPassed(cond)) {
|
||||
ir.SetRegister(t, ir.TransferFromFP32(ir.GetExtendedRegister(m)));
|
||||
ir.SetRegister(t2, ir.TransferFromFP32(ir.GetExtendedRegister(m+1)));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ArmTranslatorVisitor::vfp2_VMOV_2u32_f64(Cond cond, Reg t2, Reg t, bool M, size_t Vm) {
|
||||
ExtReg m = ToExtReg(true, Vm, M);
|
||||
if (t == Reg::PC || t2 == Reg::PC || m == ExtReg::S31)
|
||||
return UnpredictableInstruction();
|
||||
// VMOV<c> <Dm>, <Rt>, <Rt2>
|
||||
if (ConditionPassed(cond)) {
|
||||
auto value = ir.Pack2x32To1x64(ir.GetRegister(t), ir.GetRegister(t2));
|
||||
ir.SetExtendedRegister(m, ir.TransferToFP64(value));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ArmTranslatorVisitor::vfp2_VMOV_f64_2u32(Cond cond, Reg t2, Reg t, bool M, size_t Vm) {
|
||||
ExtReg m = ToExtReg(true, Vm, M);
|
||||
if (t == Reg::PC || t2 == Reg::PC || m == ExtReg::S31)
|
||||
return UnpredictableInstruction();
|
||||
if (t == t2)
|
||||
return UnpredictableInstruction();
|
||||
// VMOV<c> <Rt>, <Rt2>, <Dm>
|
||||
if (ConditionPassed(cond)) {
|
||||
auto value = ir.TransferFromFP64(ir.GetExtendedRegister(m));
|
||||
ir.SetRegister(t, ir.LeastSignificantWord(value));
|
||||
ir.SetRegister(t2, ir.MostSignificantWord(value).result);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ArmTranslatorVisitor::vfp2_VMOV_reg(Cond cond, bool D, size_t Vd, bool sz, bool M, size_t Vm) {
|
||||
if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1)
|
||||
return InterpretThisInstruction(); // TODO: Vectorised floating point instructions
|
||||
|
||||
ExtReg d = ToExtReg(sz, Vd, D);
|
||||
ExtReg m = ToExtReg(sz, Vm, M);
|
||||
// VMOV.{F32,F64} <{S,D}d>, <{S,D}m>
|
||||
if (ConditionPassed(cond)) {
|
||||
ir.SetExtendedRegister(d, ir.GetExtendedRegister(m));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ArmTranslatorVisitor::vfp2_VABS(Cond cond, bool D, size_t Vd, bool sz, bool M, size_t Vm) {
|
||||
if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1)
|
||||
return InterpretThisInstruction(); // TODO: Vectorised floating point instructions
|
||||
|
|
|
@ -26,6 +26,8 @@
|
|||
#include <signal.h>
|
||||
#endif
|
||||
|
||||
using Dynarmic::Common::Bits;
|
||||
|
||||
struct WriteRecord {
|
||||
size_t size;
|
||||
u32 address;
|
||||
|
@ -393,6 +395,30 @@ TEST_CASE("vfp: vadd", "[vfp]") {
|
|||
}
|
||||
}
|
||||
|
||||
TEST_CASE("VFP: VMOV", "[JitX64][vfp]") {
|
||||
const auto is_valid = [](u32 instr) -> bool {
|
||||
return Bits<0, 6>(instr) != 0b111111
|
||||
&& Bits<12, 15>(instr) != 0b1111
|
||||
&& Bits<16, 19>(instr) != 0b1111
|
||||
&& Bits<12, 15>(instr) != Bits<16, 19>(instr);
|
||||
};
|
||||
|
||||
const std::array<InstructionGenerator, 8> instructions = {{
|
||||
InstructionGenerator("cccc11100000ddddtttt1011D0010000", is_valid),
|
||||
InstructionGenerator("cccc11100001nnnntttt1011N0010000", is_valid),
|
||||
InstructionGenerator("cccc11100000nnnntttt1010N0010000", is_valid),
|
||||
InstructionGenerator("cccc11100001nnnntttt1010N0010000", is_valid),
|
||||
InstructionGenerator("cccc11000100uuuutttt101000M1mmmm", is_valid),
|
||||
InstructionGenerator("cccc11000101uuuutttt101000M1mmmm", is_valid),
|
||||
InstructionGenerator("cccc11000100uuuutttt101100M1mmmm", is_valid),
|
||||
InstructionGenerator("cccc11000101uuuutttt101100M1mmmm", is_valid),
|
||||
}};
|
||||
|
||||
FuzzJitArm(1, 1, 10000, [&instructions]() -> u32 {
|
||||
return instructions[RandInt<size_t>(0, instructions.size() - 1)].Generate();
|
||||
});
|
||||
}
|
||||
|
||||
TEST_CASE("Fuzz ARM data processing instructions", "[JitX64]") {
|
||||
const std::array<InstructionGenerator, 16> imm_instructions = {
|
||||
{
|
||||
|
@ -525,7 +551,7 @@ TEST_CASE("Fuzz ARM data processing instructions", "[JitX64]") {
|
|||
TEST_CASE("Fuzz ARM reversal instructions", "[JitX64]") {
|
||||
const auto is_valid = [](u32 instr) -> bool {
|
||||
// R15 is UNPREDICTABLE
|
||||
return Dynarmic::Common::Bits<0, 3>(instr) != 0b1111 && Dynarmic::Common::Bits<12, 15>(instr) != 0b1111;
|
||||
return Bits<0, 3>(instr) != 0b1111 && Bits<12, 15>(instr) != 0b1111;
|
||||
};
|
||||
|
||||
const std::array<InstructionGenerator, 3> rev_instructions = {
|
||||
|
@ -546,11 +572,11 @@ TEST_CASE("Fuzz ARM reversal instructions", "[JitX64]") {
|
|||
/*
|
||||
TEST_CASE("Fuzz ARM Load/Store instructions", "[JitX64]") {
|
||||
auto forbid_r15 = [](u32 inst) -> bool {
|
||||
return Dynarmic::Common::Bits<12, 15>(inst) != 0b1111;
|
||||
return Bits<12, 15>(inst) != 0b1111;
|
||||
};
|
||||
|
||||
auto forbid_r14_and_r15 = [](u32 inst) -> bool {
|
||||
return Dynarmic::Common::Bits<13, 15>(inst) != 0b111;
|
||||
return Bits<13, 15>(inst) != 0b111;
|
||||
};
|
||||
|
||||
const std::array<InstructionGenerator, 4> doubleword_instructions = {
|
||||
|
@ -674,7 +700,7 @@ TEST_CASE("Fuzz ARM Load/Store instructions", "[JitX64]") {
|
|||
TEST_CASE("Fuzz ARM extension instructions", "[JitX64]") {
|
||||
const auto is_valid = [](u32 instr) -> bool {
|
||||
// R15 as Rd or Rm is UNPREDICTABLE
|
||||
return Dynarmic::Common::Bits<0, 3>(instr) != 0b1111 && Dynarmic::Common::Bits<12, 15>(instr) != 0b1111;
|
||||
return Bits<0, 3>(instr) != 0b1111 && Bits<12, 15>(instr) != 0b1111;
|
||||
};
|
||||
|
||||
const std::array<InstructionGenerator, 6> signed_instructions = {
|
||||
|
@ -714,17 +740,17 @@ TEST_CASE("Fuzz ARM extension instructions", "[JitX64]") {
|
|||
|
||||
TEST_CASE("Fuzz ARM multiply instructions", "[JitX64]") {
|
||||
auto validate_d_m_n = [](u32 inst) -> bool {
|
||||
return Dynarmic::Common::Bits<16, 19>(inst) != 15 &&
|
||||
Dynarmic::Common::Bits<8, 11>(inst) != 15 &&
|
||||
Dynarmic::Common::Bits<0, 3>(inst) != 15;
|
||||
return Bits<16, 19>(inst) != 15 &&
|
||||
Bits<8, 11>(inst) != 15 &&
|
||||
Bits<0, 3>(inst) != 15;
|
||||
};
|
||||
auto validate_d_a_m_n = [&](u32 inst) -> bool {
|
||||
return validate_d_m_n(inst) &&
|
||||
Dynarmic::Common::Bits<12, 15>(inst) != 15;
|
||||
Bits<12, 15>(inst) != 15;
|
||||
};
|
||||
auto validate_h_l_m_n = [&](u32 inst) -> bool {
|
||||
return validate_d_a_m_n(inst) &&
|
||||
Dynarmic::Common::Bits<12, 15>(inst) != Dynarmic::Common::Bits<16, 19>(inst);
|
||||
Bits<12, 15>(inst) != Bits<16, 19>(inst);
|
||||
};
|
||||
|
||||
const std::array<InstructionGenerator, 10> instructions = {
|
||||
|
|
Loading…
Reference in a new issue