backend/arm64: Merge memory handling

This commit is contained in:
Merry 2022-12-05 22:34:00 +00:00
parent 7660da4909
commit ec1f117665
8 changed files with 156 additions and 161 deletions

View file

@ -386,6 +386,8 @@ elseif(ARCHITECTURE STREQUAL "arm64")
backend/arm64/emit_arm64_cryptography.cpp backend/arm64/emit_arm64_cryptography.cpp
backend/arm64/emit_arm64_data_processing.cpp backend/arm64/emit_arm64_data_processing.cpp
backend/arm64/emit_arm64_floating_point.cpp backend/arm64/emit_arm64_floating_point.cpp
backend/arm64/emit_arm64_memory.cpp
backend/arm64/emit_arm64_memory.h
backend/arm64/emit_arm64_packed.cpp backend/arm64/emit_arm64_packed.cpp
backend/arm64/emit_arm64_saturation.cpp backend/arm64/emit_arm64_saturation.cpp
backend/arm64/emit_arm64_vector.cpp backend/arm64/emit_arm64_vector.cpp

View file

@ -288,6 +288,7 @@ EmitConfig A32AddressSpace::GetEmitConfig() {
.state_nzcv_offset = offsetof(A32JitState, cpsr_nzcv), .state_nzcv_offset = offsetof(A32JitState, cpsr_nzcv),
.state_fpsr_offset = offsetof(A32JitState, fpsr), .state_fpsr_offset = offsetof(A32JitState, fpsr),
.state_exclusive_state_offset = offsetof(A32JitState, exclusive_state),
.coprocessors = conf.coprocessors, .coprocessors = conf.coprocessors,
}; };

View file

@ -408,6 +408,7 @@ EmitConfig A64AddressSpace::GetEmitConfig() {
.state_nzcv_offset = offsetof(A64JitState, cpsr_nzcv), .state_nzcv_offset = offsetof(A64JitState, cpsr_nzcv),
.state_fpsr_offset = offsetof(A64JitState, fpsr), .state_fpsr_offset = offsetof(A64JitState, fpsr),
.state_exclusive_state_offset = offsetof(A64JitState, exclusive_state),
.coprocessors{}, .coprocessors{},
}; };

View file

@ -120,6 +120,7 @@ struct EmitConfig {
// State offsets // State offsets
size_t state_nzcv_offset; size_t state_nzcv_offset;
size_t state_fpsr_offset; size_t state_fpsr_offset;
size_t state_exclusive_state_offset;
// A32 specific // A32 specific
std::array<std::shared_ptr<A32::Coprocessor>, 16> coprocessors{}; std::array<std::shared_ptr<A32::Coprocessor>, 16> coprocessors{};

View file

@ -8,9 +8,9 @@
#include "dynarmic/backend/arm64/a32_jitstate.h" #include "dynarmic/backend/arm64/a32_jitstate.h"
#include "dynarmic/backend/arm64/abi.h" #include "dynarmic/backend/arm64/abi.h"
#include "dynarmic/backend/arm64/emit_arm64.h" #include "dynarmic/backend/arm64/emit_arm64.h"
#include "dynarmic/backend/arm64/emit_arm64_memory.h"
#include "dynarmic/backend/arm64/emit_context.h" #include "dynarmic/backend/arm64/emit_context.h"
#include "dynarmic/backend/arm64/reg_alloc.h" #include "dynarmic/backend/arm64/reg_alloc.h"
#include "dynarmic/ir/acc_type.h"
#include "dynarmic/ir/basic_block.h" #include "dynarmic/ir/basic_block.h"
#include "dynarmic/ir/microinstruction.h" #include "dynarmic/ir/microinstruction.h"
#include "dynarmic/ir/opcodes.h" #include "dynarmic/ir/opcodes.h"
@ -19,72 +19,6 @@ namespace Dynarmic::Backend::Arm64 {
using namespace oaknut::util; using namespace oaknut::util;
static bool IsOrdered(IR::AccType acctype) {
return acctype == IR::AccType::ORDERED || acctype == IR::AccType::ORDEREDRW || acctype == IR::AccType::LIMITEDORDERED;
}
static void EmitReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1]);
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
ctx.reg_alloc.DefineAsRegister(inst, X0);
}
static void EmitExclusiveReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1]);
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
code.MOV(Wscratch0, 1);
code.STRB(Wscratch0, Xstate, offsetof(A32JitState, exclusive_state));
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
ctx.reg_alloc.DefineAsRegister(inst, X0);
}
static void EmitWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1], args[2]);
const bool ordered = IsOrdered(args[3].GetImmediateAccType());
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
}
static void EmitExclusiveWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1], args[2]);
const bool ordered = IsOrdered(args[3].GetImmediateAccType());
oaknut::Label end;
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
code.MOV(W0, 1);
code.LDRB(Wscratch0, Xstate, offsetof(A32JitState, exclusive_state));
code.CBZ(Wscratch0, end);
code.STRB(WZR, Xstate, offsetof(A32JitState, exclusive_state));
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
code.l(end);
ctx.reg_alloc.DefineAsRegister(inst, X0);
}
template<> template<>
void EmitIR<IR::Opcode::A32ClearExclusive>(oaknut::CodeGenerator& code, EmitContext&, IR::Inst*) { void EmitIR<IR::Opcode::A32ClearExclusive>(oaknut::CodeGenerator& code, EmitContext&, IR::Inst*) {
code.STR(WZR, Xstate, offsetof(A32JitState, exclusive_state)); code.STR(WZR, Xstate, offsetof(A32JitState, exclusive_state));

View file

@ -8,6 +8,7 @@
#include "dynarmic/backend/arm64/a64_jitstate.h" #include "dynarmic/backend/arm64/a64_jitstate.h"
#include "dynarmic/backend/arm64/abi.h" #include "dynarmic/backend/arm64/abi.h"
#include "dynarmic/backend/arm64/emit_arm64.h" #include "dynarmic/backend/arm64/emit_arm64.h"
#include "dynarmic/backend/arm64/emit_arm64_memory.h"
#include "dynarmic/backend/arm64/emit_context.h" #include "dynarmic/backend/arm64/emit_context.h"
#include "dynarmic/backend/arm64/reg_alloc.h" #include "dynarmic/backend/arm64/reg_alloc.h"
#include "dynarmic/ir/acc_type.h" #include "dynarmic/ir/acc_type.h"
@ -19,100 +20,6 @@ namespace Dynarmic::Backend::Arm64 {
using namespace oaknut::util; using namespace oaknut::util;
static bool IsOrdered(IR::AccType acctype) {
return acctype == IR::AccType::ORDERED || acctype == IR::AccType::ORDEREDRW || acctype == IR::AccType::LIMITEDORDERED;
}
static void EmitReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1]);
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
ctx.reg_alloc.DefineAsRegister(inst, X0);
}
static void EmitReadMemory128(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1]);
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
code.MOV(Q8.B16(), Q0.B16());
ctx.reg_alloc.DefineAsRegister(inst, Q8);
}
static void EmitExclusiveReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1]);
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
code.MOV(Wscratch0, 1);
code.STRB(Wscratch0, Xstate, offsetof(A64JitState, exclusive_state));
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
ctx.reg_alloc.DefineAsRegister(inst, X0);
}
static void EmitExclusiveReadMemory128(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1]);
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
code.MOV(Wscratch0, 1);
code.STRB(Wscratch0, Xstate, offsetof(A64JitState, exclusive_state));
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
code.MOV(Q8.B16(), Q0.B16());
ctx.reg_alloc.DefineAsRegister(inst, Q8);
}
static void EmitWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1], args[2]);
const bool ordered = IsOrdered(args[3].GetImmediateAccType());
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
}
static void EmitExclusiveWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1], args[2]);
const bool ordered = IsOrdered(args[3].GetImmediateAccType());
oaknut::Label end;
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
code.MOV(W0, 1);
code.LDRB(Wscratch0, Xstate, offsetof(A64JitState, exclusive_state));
code.CBZ(Wscratch0, end);
code.STRB(WZR, Xstate, offsetof(A64JitState, exclusive_state));
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
code.l(end);
ctx.reg_alloc.DefineAsRegister(inst, X0);
}
template<> template<>
void EmitIR<IR::Opcode::A64ClearExclusive>(oaknut::CodeGenerator& code, EmitContext&, IR::Inst*) { void EmitIR<IR::Opcode::A64ClearExclusive>(oaknut::CodeGenerator& code, EmitContext&, IR::Inst*) {
code.STR(WZR, Xstate, offsetof(A64JitState, exclusive_state)); code.STR(WZR, Xstate, offsetof(A64JitState, exclusive_state));

View file

@ -0,0 +1,117 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "dynarmic/backend/arm64/emit_arm64_memory.h"
#include <oaknut/oaknut.hpp>
#include "dynarmic/backend/arm64/abi.h"
#include "dynarmic/backend/arm64/emit_arm64.h"
#include "dynarmic/backend/arm64/emit_context.h"
#include "dynarmic/backend/arm64/reg_alloc.h"
#include "dynarmic/ir/acc_type.h"
#include "dynarmic/ir/basic_block.h"
#include "dynarmic/ir/microinstruction.h"
#include "dynarmic/ir/opcodes.h"
namespace Dynarmic::Backend::Arm64 {
using namespace oaknut::util;
bool IsOrdered(IR::AccType acctype) {
return acctype == IR::AccType::ORDERED || acctype == IR::AccType::ORDEREDRW || acctype == IR::AccType::LIMITEDORDERED;
}
void EmitReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1]);
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
ctx.reg_alloc.DefineAsRegister(inst, X0);
}
void EmitReadMemory128(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1]);
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
code.MOV(Q8.B16(), Q0.B16());
ctx.reg_alloc.DefineAsRegister(inst, Q8);
}
void EmitExclusiveReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1]);
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
code.MOV(Wscratch0, 1);
code.STRB(Wscratch0, Xstate, ctx.conf.state_exclusive_state_offset);
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
ctx.reg_alloc.DefineAsRegister(inst, X0);
}
void EmitExclusiveReadMemory128(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1]);
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
code.MOV(Wscratch0, 1);
code.STRB(Wscratch0, Xstate, ctx.conf.state_exclusive_state_offset);
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
code.MOV(Q8.B16(), Q0.B16());
ctx.reg_alloc.DefineAsRegister(inst, Q8);
}
void EmitWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1], args[2]);
const bool ordered = IsOrdered(args[3].GetImmediateAccType());
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
}
void EmitExclusiveWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.PrepareForCall({}, args[1], args[2]);
const bool ordered = IsOrdered(args[3].GetImmediateAccType());
oaknut::Label end;
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
code.MOV(W0, 1);
code.LDRB(Wscratch0, Xstate, ctx.conf.state_exclusive_state_offset);
code.CBZ(Wscratch0, end);
code.STRB(WZR, Xstate, ctx.conf.state_exclusive_state_offset);
EmitRelocation(code, ctx, fn);
if (ordered) {
code.DMB(oaknut::BarrierOp::ISH);
}
code.l(end);
ctx.reg_alloc.DefineAsRegister(inst, X0);
}
} // namespace Dynarmic::Backend::Arm64

View file

@ -0,0 +1,32 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
namespace oaknut {
struct PointerCodeGeneratorPolicy;
template<typename>
class BasicCodeGenerator;
using CodeGenerator = BasicCodeGenerator<PointerCodeGeneratorPolicy>;
struct Label;
} // namespace oaknut
namespace Dynarmic::IR {
enum class AccType;
class Inst;
} // namespace Dynarmic::IR
namespace Dynarmic::Backend::Arm64 {
struct EmitContext;
enum class LinkTarget;
bool IsOrdered(IR::AccType acctype);
void EmitReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn);
void EmitReadMemory128(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn);
void EmitExclusiveReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn);
void EmitExclusiveReadMemory128(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn);
void EmitWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn);
void EmitExclusiveWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst, LinkTarget fn);
} // namespace Dynarmic::Backend::Arm64