Exclusive fixups
* Incorrect size of exclusive_address * Disable tests on exclusive memory instructions for now
This commit is contained in:
parent
f3fa4a042f
commit
1f5b3bca43
4 changed files with 11 additions and 9 deletions
|
@ -492,10 +492,10 @@ void A64EmitX64::EmitA64ClearExclusive(A64EmitContext&, IR::Inst*) {
|
||||||
void A64EmitX64::EmitA64SetExclusive(A64EmitContext& ctx, IR::Inst* inst) {
|
void A64EmitX64::EmitA64SetExclusive(A64EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
ASSERT(args[1].IsImmediate());
|
ASSERT(args[1].IsImmediate());
|
||||||
Xbyak::Reg32 address = ctx.reg_alloc.UseGpr(args[0]).cvt32();
|
Xbyak::Reg64 address = ctx.reg_alloc.UseGpr(args[0]);
|
||||||
|
|
||||||
code.mov(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(1));
|
code.mov(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(1));
|
||||||
code.mov(dword[r15 + offsetof(A64JitState, exclusive_address)], address);
|
code.mov(qword[r15 + offsetof(A64JitState, exclusive_address)], address);
|
||||||
}
|
}
|
||||||
|
|
||||||
static Xbyak::RegExp EmitVAddrLookup(const A64::UserConfig& conf, BlockOfCode& code, A64EmitContext& ctx, Xbyak::Label& abort, Xbyak::Reg64 vaddr, boost::optional<Xbyak::Reg64> arg_scratch = {}) {
|
static Xbyak::RegExp EmitVAddrLookup(const A64::UserConfig& conf, BlockOfCode& code, A64EmitContext& ctx, Xbyak::Label& abort, Xbyak::Reg64 vaddr, boost::optional<Xbyak::Reg64> arg_scratch = {}) {
|
||||||
|
@ -743,14 +743,14 @@ void A64EmitX64::EmitA64WriteMemory128(A64EmitContext& ctx, IR::Inst* inst) {
|
||||||
void A64EmitX64::EmitExclusiveWrite(A64EmitContext& ctx, IR::Inst* inst, size_t bitsize, Xbyak::Reg64 vaddr, int value_idx) {
|
void A64EmitX64::EmitExclusiveWrite(A64EmitContext& ctx, IR::Inst* inst, size_t bitsize, Xbyak::Reg64 vaddr, int value_idx) {
|
||||||
Xbyak::Label end;
|
Xbyak::Label end;
|
||||||
Xbyak::Reg32 passed = ctx.reg_alloc.ScratchGpr().cvt32();
|
Xbyak::Reg32 passed = ctx.reg_alloc.ScratchGpr().cvt32();
|
||||||
Xbyak::Reg32 tmp = ctx.reg_alloc.ScratchGpr().cvt32();
|
Xbyak::Reg64 tmp = ctx.reg_alloc.ScratchGpr();
|
||||||
|
|
||||||
code.mov(passed, u32(1));
|
code.mov(passed, u32(1));
|
||||||
code.cmp(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(0));
|
code.cmp(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(0));
|
||||||
code.je(end);
|
code.je(end);
|
||||||
code.mov(tmp, vaddr);
|
code.mov(tmp, vaddr);
|
||||||
code.xor_(tmp, dword[r15 + offsetof(A64JitState, exclusive_address)]);
|
code.xor_(tmp, qword[r15 + offsetof(A64JitState, exclusive_address)]);
|
||||||
code.test(tmp, A64JitState::RESERVATION_GRANULE_MASK);
|
code.test(tmp, static_cast<u32>(A64JitState::RESERVATION_GRANULE_MASK & 0xFFFF'FFFF));
|
||||||
code.jne(end);
|
code.jne(end);
|
||||||
code.mov(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(0));
|
code.mov(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(0));
|
||||||
code.call(write_fallbacks[std::make_tuple(bitsize, vaddr.getIdx(), value_idx)]);
|
code.call(write_fallbacks[std::make_tuple(bitsize, vaddr.getIdx(), value_idx)]);
|
||||||
|
|
|
@ -57,9 +57,9 @@ struct A64JitState {
|
||||||
bool check_bit = false;
|
bool check_bit = false;
|
||||||
|
|
||||||
// Exclusive state
|
// Exclusive state
|
||||||
static constexpr u32 RESERVATION_GRANULE_MASK = 0xFFFFFFF8;
|
static constexpr u64 RESERVATION_GRANULE_MASK = 0xFFFF'FFFF'FFFF'FFF0ull;
|
||||||
u32 exclusive_state = 0;
|
u8 exclusive_state = 0;
|
||||||
u32 exclusive_address = 0;
|
u64 exclusive_address = 0;
|
||||||
|
|
||||||
static constexpr size_t RSBSize = 8; // MUST be a power of 2.
|
static constexpr size_t RSBSize = 8; // MUST be a power of 2.
|
||||||
static constexpr size_t RSBPtrMask = RSBSize - 1;
|
static constexpr size_t RSBPtrMask = RSBSize - 1;
|
||||||
|
|
|
@ -47,7 +47,7 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter&
|
||||||
} else if (pair && elsize == 32) {
|
} else if (pair && elsize == 32) {
|
||||||
data = ir.Pack2x32To1x64(tv.X(32, Rt), tv.X(32, *Rt2));
|
data = ir.Pack2x32To1x64(tv.X(32, Rt), tv.X(32, *Rt2));
|
||||||
} else {
|
} else {
|
||||||
data = tv.X(datasize, Rt);
|
data = tv.X(elsize, Rt);
|
||||||
}
|
}
|
||||||
IR::U32 status = tv.ExclusiveMem(address, dbytes, acctype, data);
|
IR::U32 status = tv.ExclusiveMem(address, dbytes, acctype, data);
|
||||||
tv.X(32, *Rs, status);
|
tv.X(32, *Rs, status);
|
||||||
|
|
|
@ -53,6 +53,8 @@ static u32 GenRandomInst(u64 pc, bool is_last_inst) {
|
||||||
"STLLR",
|
"STLLR",
|
||||||
// Unimplemented in QEMU
|
// Unimplemented in QEMU
|
||||||
"LDLAR",
|
"LDLAR",
|
||||||
|
// Dynarmic and QEMU currently differ on how the exclusive monitor's address range works.
|
||||||
|
"STXR", "STLXR", "STXP", "STLXP", "LDXR", "LDAXR", "LDXP", "LDAXP",
|
||||||
};
|
};
|
||||||
|
|
||||||
for (const auto& [fn, bitstring] : list) {
|
for (const auto& [fn, bitstring] : list) {
|
||||||
|
|
Loading…
Reference in a new issue