A64/translate/*: Apply const where applicable
Just some tidying up for consistency
This commit is contained in:
parent
dddba94c17
commit
dd315e89eb
4 changed files with 109 additions and 88 deletions
|
@ -9,8 +9,8 @@
|
|||
namespace Dynarmic::A64 {
|
||||
|
||||
static IR::U32U64 ReplicateBit(IREmitter& ir, const IR::U32U64& value, u8 bit_position_to_replicate) {
|
||||
u8 datasize = value.GetType() == IR::Type::U64 ? 64 : 32;
|
||||
auto bit = ir.LogicalShiftLeft(value, ir.Imm8(datasize - 1 - bit_position_to_replicate));
|
||||
const u8 datasize = value.GetType() == IR::Type::U64 ? 64 : 32;
|
||||
const auto bit = ir.LogicalShiftLeft(value, ir.Imm8(datasize - 1 - bit_position_to_replicate));
|
||||
return ir.ArithmeticShiftRight(bit, ir.Imm8(datasize - 1));
|
||||
}
|
||||
|
||||
|
|
|
@ -9,15 +9,15 @@
|
|||
namespace Dynarmic::A64 {
|
||||
|
||||
bool TranslatorVisitor::ADR(Imm<2> immlo, Imm<19> immhi, Reg Rd) {
|
||||
u64 imm = concatenate(immhi, immlo).SignExtend<u64>();
|
||||
u64 base = ir.PC();
|
||||
const u64 imm = concatenate(immhi, immlo).SignExtend<u64>();
|
||||
const u64 base = ir.PC();
|
||||
X(64, Rd, ir.Imm64(base + imm));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::ADRP(Imm<2> immlo, Imm<19> immhi, Reg Rd) {
|
||||
u64 imm = concatenate(immhi, immlo).SignExtend<u64>() << 12;
|
||||
u64 base = ir.PC() & ~u64(0xFFF);
|
||||
const u64 imm = concatenate(immhi, immlo).SignExtend<u64>() << 12;
|
||||
const u64 base = ir.PC() & ~u64(0xFFF);
|
||||
X(64, Rd, ir.Imm64(base + imm));
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ bool TranslatorVisitor::MOVI(bool Q, bool op, Imm<1> a, Imm<1> b, Imm<1> c, Imm<
|
|||
// MOVI
|
||||
// also FMOV (vector, immediate) when cmode == 0b1111
|
||||
const auto movi = [&]{
|
||||
u64 imm64 = AdvSIMDExpandImm(op, cmode, concatenate(a, b, c, d, e, f, g, h));
|
||||
const u64 imm64 = AdvSIMDExpandImm(op, cmode, concatenate(a, b, c, d, e, f, g, h));
|
||||
const IR::U128 imm = datasize == 64 ? ir.ZeroExtendToQuad(ir.Imm64(imm64)) : ir.VectorBroadcast(64, ir.Imm64(imm64));
|
||||
V(128, Vd, imm);
|
||||
return true;
|
||||
|
@ -23,7 +23,7 @@ bool TranslatorVisitor::MOVI(bool Q, bool op, Imm<1> a, Imm<1> b, Imm<1> c, Imm<
|
|||
|
||||
// MVNI
|
||||
const auto mvni = [&]{
|
||||
u64 imm64 = ~AdvSIMDExpandImm(op, cmode, concatenate(a, b, c, d, e, f, g, h));
|
||||
const u64 imm64 = ~AdvSIMDExpandImm(op, cmode, concatenate(a, b, c, d, e, f, g, h));
|
||||
const IR::U128 imm = datasize == 64 ? ir.ZeroExtendToQuad(ir.Imm64(imm64)) : ir.VectorBroadcast(64, ir.Imm64(imm64));
|
||||
V(128, Vd, imm);
|
||||
return true;
|
||||
|
@ -31,7 +31,7 @@ bool TranslatorVisitor::MOVI(bool Q, bool op, Imm<1> a, Imm<1> b, Imm<1> c, Imm<
|
|||
|
||||
// ORR (vector, immediate)
|
||||
const auto orr = [&]{
|
||||
u64 imm64 = AdvSIMDExpandImm(op, cmode, concatenate(a, b, c, d, e, f, g, h));
|
||||
const u64 imm64 = AdvSIMDExpandImm(op, cmode, concatenate(a, b, c, d, e, f, g, h));
|
||||
const IR::U128 imm = datasize == 64 ? ir.ZeroExtendToQuad(ir.Imm64(imm64)) : ir.VectorBroadcast(64, ir.Imm64(imm64));
|
||||
const IR::U128 operand = V(datasize, Vd);
|
||||
const IR::U128 result = ir.VectorOr(operand, imm);
|
||||
|
@ -41,7 +41,7 @@ bool TranslatorVisitor::MOVI(bool Q, bool op, Imm<1> a, Imm<1> b, Imm<1> c, Imm<
|
|||
|
||||
// BIC (vector, immediate)
|
||||
const auto bic = [&]{
|
||||
u64 imm64 = ~AdvSIMDExpandImm(op, cmode, concatenate(a, b, c, d, e, f, g, h));
|
||||
const u64 imm64 = ~AdvSIMDExpandImm(op, cmode, concatenate(a, b, c, d, e, f, g, h));
|
||||
const IR::U128 imm = datasize == 64 ? ir.ZeroExtendToQuad(ir.Imm64(imm64)) : ir.VectorBroadcast(64, ir.Imm64(imm64));
|
||||
const IR::U128 operand = V(datasize, Vd);
|
||||
const IR::U128 result = ir.VectorAnd(operand, imm);
|
||||
|
|
|
@ -394,19 +394,26 @@ bool SaturatingShiftLeft(TranslatorVisitor& v, bool Q, Imm<2> size, Vec Vm, Vec
|
|||
} // Anonymous namespace
|
||||
|
||||
bool TranslatorVisitor::CMGT_reg_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
||||
if (size == 0b11 && !Q) return ReservedValue();
|
||||
if (size == 0b11 && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
const IR::U128 result = ir.VectorGreaterSigned(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::CMGE_reg_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
||||
if (size == 0b11 && !Q) return ReservedValue();
|
||||
if (size == 0b11 && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
|
@ -478,48 +485,51 @@ bool TranslatorVisitor::SQRDMULH_vec_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec
|
|||
}
|
||||
|
||||
bool TranslatorVisitor::ADD_vector(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
||||
if (size == 0b11 && !Q) return ReservedValue();
|
||||
if (size == 0b11 && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
auto operand1 = V(datasize, Vn);
|
||||
auto operand2 = V(datasize, Vm);
|
||||
|
||||
auto result = ir.VectorAdd(esize, operand1, operand2);
|
||||
const auto operand1 = V(datasize, Vn);
|
||||
const auto operand2 = V(datasize, Vm);
|
||||
const auto result = ir.VectorAdd(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::MLA_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
||||
if (size == 0b11) return ReservedValue();
|
||||
if (size == 0b11) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
const IR::U128 operand3 = V(datasize, Vd);
|
||||
|
||||
const IR::U128 result = ir.VectorAdd(esize, ir.VectorMultiply(esize, operand1, operand2), operand3);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::MUL_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
||||
if (size == 0b11) return ReservedValue();
|
||||
if (size == 0b11) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
|
||||
const IR::U128 result = ir.VectorMultiply(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -628,17 +638,18 @@ bool TranslatorVisitor::URHADD(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
|||
}
|
||||
|
||||
bool TranslatorVisitor::ADDP_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
||||
if (size == 0b11 && !Q) return ReservedValue();
|
||||
if (size == 0b11 && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
|
||||
const IR::U128 result = Q ? ir.VectorPairedAdd(esize, operand1, operand2) : ir.VectorPairedAddLower(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -670,12 +681,14 @@ bool TranslatorVisitor::FADD_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (sz && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = sz ? 64 : 32;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
const IR::U128 result = ir.FPVectorAdd(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
@ -755,13 +768,11 @@ bool TranslatorVisitor::FCMGT_reg_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
|
|||
bool TranslatorVisitor::AND_asimd(bool Q, Vec Vm, Vec Vn, Vec Vd) {
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
auto operand1 = V(datasize, Vn);
|
||||
auto operand2 = V(datasize, Vm);
|
||||
|
||||
auto result = ir.VectorAnd(operand1, operand2);
|
||||
const auto operand1 = V(datasize, Vn);
|
||||
const auto operand2 = V(datasize, Vm);
|
||||
const auto result = ir.VectorAnd(operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -772,13 +783,11 @@ bool TranslatorVisitor::BIC_asimd_reg(bool Q, Vec Vm, Vec Vn, Vec Vd) {
|
|||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
|
||||
IR::U128 result = ir.VectorAnd(operand1, ir.VectorNot(operand2));
|
||||
|
||||
if (datasize == 64) {
|
||||
result = ir.VectorZeroUpper(result);
|
||||
}
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -786,12 +795,14 @@ bool TranslatorVisitor::CMHI_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (size == 0b11 && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
const IR::U128 result = ir.VectorGreaterUnsigned(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
@ -800,6 +811,7 @@ bool TranslatorVisitor::CMHS_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (size == 0b11 && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
|
@ -809,6 +821,7 @@ bool TranslatorVisitor::CMHS_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (datasize == 64) {
|
||||
result = ir.VectorZeroUpper(result);
|
||||
}
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
@ -842,12 +855,14 @@ bool TranslatorVisitor::SSHL_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (size == 0b11 && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
const IR::U128 result = ir.VectorArithmeticVShift(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
@ -864,12 +879,14 @@ bool TranslatorVisitor::USHL_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (size == 0b11 && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
const IR::U128 result = ir.VectorLogicalVShift(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
@ -929,12 +946,14 @@ bool TranslatorVisitor::FSUB_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (sz && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = sz ? 64 : 32;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
const IR::U128 result = ir.FPVectorSub(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
@ -955,12 +974,14 @@ bool TranslatorVisitor::FRECPS_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (sz && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = sz ? 64 : 32;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
const IR::U128 result = ir.FPVectorRecipStepFused(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
@ -981,12 +1002,14 @@ bool TranslatorVisitor::FRSQRTS_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (sz && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = sz ? 64 : 32;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
const IR::U128 result = ir.FPVectorRSqrtStepFused(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
@ -994,30 +1017,26 @@ bool TranslatorVisitor::FRSQRTS_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
|
|||
bool TranslatorVisitor::ORR_asimd_reg(bool Q, Vec Vm, Vec Vn, Vec Vd) {
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
auto operand1 = V(datasize, Vn);
|
||||
auto operand2 = V(datasize, Vm);
|
||||
|
||||
auto result = ir.VectorOr(operand1, operand2);
|
||||
const auto operand1 = V(datasize, Vn);
|
||||
const auto operand2 = V(datasize, Vm);
|
||||
const auto result = ir.VectorOr(operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::ORN_asimd(bool Q, Vec Vm, Vec Vn, Vec Vd) {
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
auto operand1 = V(datasize, Vn);
|
||||
auto operand2 = V(datasize, Vm);
|
||||
const auto operand1 = V(datasize, Vn);
|
||||
const auto operand2 = V(datasize, Vm);
|
||||
|
||||
auto result = ir.VectorOr(operand1, ir.VectorNot(operand2));
|
||||
|
||||
if (datasize == 64) {
|
||||
result = ir.VectorZeroUpper(result);
|
||||
}
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1037,22 +1056,26 @@ bool TranslatorVisitor::PMUL(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
|||
}
|
||||
|
||||
bool TranslatorVisitor::SUB_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
||||
if (size == 0b11 && !Q) return ReservedValue();
|
||||
if (size == 0b11 && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
auto operand1 = V(datasize, Vn);
|
||||
auto operand2 = V(datasize, Vm);
|
||||
|
||||
auto result = ir.VectorSub(esize, operand1, operand2);
|
||||
const auto operand1 = V(datasize, Vn);
|
||||
const auto operand2 = V(datasize, Vm);
|
||||
const auto result = ir.VectorSub(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::CMEQ_reg_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
||||
if (size == 0b11 && !Q) return ReservedValue();
|
||||
if (size == 0b11 && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
|
@ -1060,18 +1083,19 @@ bool TranslatorVisitor::CMEQ_reg_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd)
|
|||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
|
||||
IR::U128 result = ir.VectorEqual(esize, operand1, operand2);
|
||||
|
||||
if (datasize == 64) {
|
||||
result = ir.VectorZeroUpper(result);
|
||||
}
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::MLS_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
||||
if (size == 0b11) return ReservedValue();
|
||||
if (size == 0b11) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
|
@ -1082,20 +1106,17 @@ bool TranslatorVisitor::MLS_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
|
|||
const IR::U128 result = ir.VectorSub(esize, operand3, ir.VectorMultiply(esize, operand1, operand2));
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::EOR_asimd(bool Q, Vec Vm, Vec Vn, Vec Vd) {
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
auto operand1 = V(datasize, Vn);
|
||||
auto operand2 = V(datasize, Vm);
|
||||
|
||||
auto result = ir.VectorEor(operand1, operand2);
|
||||
const auto operand1 = V(datasize, Vn);
|
||||
const auto operand2 = V(datasize, Vm);
|
||||
const auto result = ir.VectorEor(operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1150,12 +1171,14 @@ bool TranslatorVisitor::FMUL_vec_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (sz && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = sz ? 64 : 32;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
IR::U128 result = ir.FPVectorMul(esize, operand1, operand2);
|
||||
const IR::U128 result = ir.FPVectorMul(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
@ -1164,12 +1187,14 @@ bool TranslatorVisitor::FMULX_vec_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (sz && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = sz ? 64 : 32;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
IR::U128 result = ir.FPVectorMulX(esize, operand1, operand2);
|
||||
const IR::U128 result = ir.FPVectorMulX(esize, operand1, operand2);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
@ -1178,6 +1203,7 @@ bool TranslatorVisitor::FDIV_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (sz && !Q) {
|
||||
return ReservedValue();
|
||||
}
|
||||
|
||||
const size_t esize = sz ? 64 : 32;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
|
@ -1187,50 +1213,45 @@ bool TranslatorVisitor::FDIV_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
|
|||
if (datasize == 64) {
|
||||
result = ir.VectorZeroUpper(result);
|
||||
}
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::BIF(bool Q, Vec Vm, Vec Vn, Vec Vd) {
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
auto operand1 = V(datasize, Vd);
|
||||
auto operand4 = V(datasize, Vn);
|
||||
auto operand3 = ir.VectorNot(V(datasize, Vm));
|
||||
const auto operand1 = V(datasize, Vd);
|
||||
const auto operand4 = V(datasize, Vn);
|
||||
const auto operand3 = ir.VectorNot(V(datasize, Vm));
|
||||
const auto result = ir.VectorEor(operand1, ir.VectorAnd(ir.VectorEor(operand1, operand4), operand3));
|
||||
|
||||
auto result = ir.VectorEor(operand1, ir.VectorAnd(ir.VectorEor(operand1, operand4), operand3));
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::BIT(bool Q, Vec Vm, Vec Vn, Vec Vd) {
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
auto operand1 = V(datasize, Vd);
|
||||
auto operand4 = V(datasize, Vn);
|
||||
auto operand3 = V(datasize, Vm);
|
||||
const auto operand1 = V(datasize, Vd);
|
||||
const auto operand4 = V(datasize, Vn);
|
||||
const auto operand3 = V(datasize, Vm);
|
||||
const auto result = ir.VectorEor(operand1, ir.VectorAnd(ir.VectorEor(operand1, operand4), operand3));
|
||||
|
||||
auto result = ir.VectorEor(operand1, ir.VectorAnd(ir.VectorEor(operand1, operand4), operand3));
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::BSL(bool Q, Vec Vm, Vec Vn, Vec Vd) {
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
auto operand4 = V(datasize, Vn);
|
||||
auto operand1 = V(datasize, Vm);
|
||||
auto operand3 = V(datasize, Vd);
|
||||
const auto operand4 = V(datasize, Vn);
|
||||
const auto operand1 = V(datasize, Vm);
|
||||
const auto operand3 = V(datasize, Vd);
|
||||
const auto result = ir.VectorEor(operand1, ir.VectorAnd(ir.VectorEor(operand1, operand4), operand3));
|
||||
|
||||
auto result = ir.VectorEor(operand1, ir.VectorAnd(ir.VectorEor(operand1, operand4), operand3));
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
||||
return true;
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::A64
|
||||
|
|
Loading…
Reference in a new issue