backend/arm64: Initial implementation of register allocator
This commit is contained in:
parent
3bf2b0aba9
commit
7e046357ff
8 changed files with 646 additions and 15 deletions
|
@ -368,6 +368,15 @@ if (ARCHITECTURE STREQUAL "x86_64")
|
|||
elseif(ARCHITECTURE STREQUAL "arm64")
|
||||
target_link_libraries(dynarmic PRIVATE $<BUILD_INTERFACE:merry::oaknut>)
|
||||
|
||||
target_sources(dynarmic PRIVATE
|
||||
backend/arm64/abi.h
|
||||
backend/arm64/emit_arm64.cpp
|
||||
backend/arm64/emit_arm64.h
|
||||
backend/arm64/reg_alloc.cpp
|
||||
backend/arm64/reg_alloc.h
|
||||
backend/arm64/stack_layout.h
|
||||
)
|
||||
|
||||
if ("A32" IN_LIST DYNARMIC_FRONTENDS)
|
||||
target_sources(dynarmic PRIVATE
|
||||
backend/arm64/a32_address_space.cpp
|
||||
|
@ -376,8 +385,6 @@ elseif(ARCHITECTURE STREQUAL "arm64")
|
|||
backend/arm64/a32_interface.cpp
|
||||
backend/arm64/a32_jitstate.cpp
|
||||
backend/arm64/a32_jitstate.h
|
||||
backend/arm64/emit_arm64.cpp
|
||||
backend/arm64/emit_arm64.h
|
||||
)
|
||||
endif()
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include "dynarmic/backend/arm64/a32_address_space.h"
|
||||
|
||||
#include "dynarmic/backend/arm64/abi.h"
|
||||
#include "dynarmic/backend/arm64/emit_arm64.h"
|
||||
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
|
||||
#include "dynarmic/frontend/A32/translate/a32_translate.h"
|
||||
|
@ -78,6 +79,8 @@ void A32AddressSpace::EmitPrelude() {
|
|||
for (int i = 0; i < 32; i += 2) {
|
||||
code.STP(QReg{i}, QReg{i + 1}, SP, PRE_INDEXED, -32);
|
||||
}
|
||||
code.MOV(Xstate, X1);
|
||||
code.MOV(Xhalt, X2);
|
||||
code.BR(X0);
|
||||
|
||||
prelude_info.return_from_run_code = code.ptr<void*>();
|
||||
|
@ -106,7 +109,7 @@ EmittedBlockInfo A32AddressSpace::Emit(IR::Block block) {
|
|||
|
||||
mem.unprotect();
|
||||
|
||||
EmittedBlockInfo block_info = EmitArm64(code, std::move(block));
|
||||
EmittedBlockInfo block_info = EmitArm64(code, std::move(block), {});
|
||||
Link(block_info);
|
||||
|
||||
mem.protect();
|
||||
|
|
|
@ -5,12 +5,14 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include <mcl/stdint.hpp>
|
||||
#include <oaknut/oaknut.hpp>
|
||||
|
||||
namespace Dynarmic::Backend::Arm64 {
|
||||
|
||||
constexpr u32 ABI_ALL_
|
||||
constexpr oaknut::XReg Xstate{28};
|
||||
constexpr oaknut::XReg Xhalt{27};
|
||||
|
||||
constexpr oaknut::XReg Xscratch0{16}, Xscratch1{17};
|
||||
constexpr oaknut::WReg Wscratch0{16}, Wscratch1{17};
|
||||
|
||||
} // namespace Dynarmic::Backend::Arm64
|
||||
|
|
|
@ -5,27 +5,75 @@
|
|||
|
||||
#include "dynarmic/backend/arm64/emit_arm64.h"
|
||||
|
||||
#include <fmt/ostream.h>
|
||||
#include <oaknut/oaknut.hpp>
|
||||
|
||||
#include "dynarmic/backend/arm64/a32_jitstate.h"
|
||||
#include "dynarmic/backend/arm64/abi.h"
|
||||
#include "dynarmic/backend/arm64/emit_context.h"
|
||||
#include "dynarmic/backend/arm64/reg_alloc.h"
|
||||
#include "dynarmic/ir/basic_block.h"
|
||||
#include "dynarmic/ir/microinstruction.h"
|
||||
#include "dynarmic/ir/opcodes.h"
|
||||
|
||||
namespace Dynarmic::Backend::Arm64 {
|
||||
|
||||
using namespace oaknut::util;
|
||||
|
||||
EmittedBlockInfo EmitArm64(oaknut::CodeGenerator& code, IR::Block block) {
|
||||
(void)block;
|
||||
template<IR::Opcode op>
|
||||
void EmitIR(oaknut::CodeGenerator&, EmitContext&, IR::Inst*) {
|
||||
ASSERT_FALSE("Unimplemented opcode {}", op);
|
||||
}
|
||||
|
||||
template<>
|
||||
void EmitIR<IR::Opcode::GetCarryFromOp>(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst) {
|
||||
ASSERT(ctx.reg_alloc.IsValueLive(inst));
|
||||
}
|
||||
|
||||
EmittedBlockInfo EmitArm64(oaknut::CodeGenerator& code, IR::Block block, const EmitConfig& emit_conf) {
|
||||
EmittedBlockInfo ebi;
|
||||
|
||||
const std::vector<int> gpr_order{19, 20, 21, 22, 23, 24, 25, 26, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
const std::vector<int> fpr_order{8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
|
||||
RegAlloc reg_alloc{code, gpr_order, fpr_order};
|
||||
EmitContext ctx{block, reg_alloc, emit_conf, ebi};
|
||||
|
||||
ebi.entry_point = code.ptr<CodePtr>();
|
||||
|
||||
code.MOV(W0, 8);
|
||||
code.STR(W0, X1, offsetof(A32JitState, regs) + 0 * sizeof(u32));
|
||||
code.MOV(W0, 2);
|
||||
code.STR(W0, X1, offsetof(A32JitState, regs) + 1 * sizeof(u32));
|
||||
code.STR(W0, X1, offsetof(A32JitState, regs) + 15 * sizeof(u32));
|
||||
for (auto iter = block.begin(); iter != block.end(); ++iter) {
|
||||
IR::Inst* inst = &*iter;
|
||||
|
||||
switch (inst->GetOpcode()) {
|
||||
#define OPCODE(name, type, ...) \
|
||||
case IR::Opcode::name: \
|
||||
EmitIR<IR::Opcode::name>(code, ctx, inst); \
|
||||
break;
|
||||
#define A32OPC(name, type, ...) \
|
||||
case IR::Opcode::A32##name: \
|
||||
EmitIR<IR::Opcode::A32##name>(code, ctx, inst); \
|
||||
break;
|
||||
#define A64OPC(name, type, ...) \
|
||||
case IR::Opcode::A64##name: \
|
||||
EmitIR<IR::Opcode::A64##name>(code, ctx, inst); \
|
||||
break;
|
||||
#include "dynarmic/ir/opcodes.inc"
|
||||
#undef OPCODE
|
||||
#undef A32OPC
|
||||
#undef A64OPC
|
||||
default:
|
||||
ASSERT_FALSE("Invalid opcode: {}", inst->GetOpcode());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add Cycles
|
||||
|
||||
// TODO: Emit Terminal
|
||||
const auto term = block.GetTerminal();
|
||||
const IR::Term::LinkBlock* link_block_term = boost::get<IR::Term::LinkBlock>(&term);
|
||||
ASSERT(link_block_term);
|
||||
code.MOV(Xscratch0, link_block_term->next.Value());
|
||||
code.STUR(Xscratch0, Xstate, offsetof(A32JitState, regs) + sizeof(u32) * 15);
|
||||
ebi.relocations.emplace_back(Relocation{code.ptr<CodePtr>() - ebi.entry_point, LinkTarget::ReturnFromRunCode});
|
||||
code.NOP();
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@ using CodeGenerator = BasicCodeGenerator<PointerCodeGeneratorPolicy>;
|
|||
|
||||
namespace Dynarmic::IR {
|
||||
class Block;
|
||||
enum class Opcode;
|
||||
class Inst;
|
||||
} // namespace Dynarmic::IR
|
||||
|
||||
namespace Dynarmic::Backend::Arm64 {
|
||||
|
@ -40,6 +42,14 @@ struct EmittedBlockInfo {
|
|||
std::vector<Relocation> relocations;
|
||||
};
|
||||
|
||||
EmittedBlockInfo EmitArm64(oaknut::CodeGenerator& code, IR::Block block);
|
||||
struct EmitConfig {
|
||||
};
|
||||
|
||||
struct EmitContext;
|
||||
|
||||
template<IR::Opcode op>
|
||||
void EmitIR(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst);
|
||||
|
||||
EmittedBlockInfo EmitArm64(oaknut::CodeGenerator& code, IR::Block block, const EmitConfig& emit_conf);
|
||||
|
||||
} // namespace Dynarmic::Backend::Arm64
|
||||
|
|
278
src/dynarmic/backend/arm64/reg_alloc.cpp
Normal file
278
src/dynarmic/backend/arm64/reg_alloc.cpp
Normal file
|
@ -0,0 +1,278 @@
|
|||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2022 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include "dynarmic/backend/arm64/reg_alloc.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
|
||||
#include <mcl/assert.hpp>
|
||||
#include <mcl/stdint.hpp>
|
||||
|
||||
namespace Dynarmic::Backend::Arm64 {
|
||||
|
||||
using namespace oaknut::util;
|
||||
|
||||
constexpr size_t spill_offset = offsetof(StackLayout, spill);
|
||||
constexpr size_t spill_slot_size = sizeof(decltype(StackLayout::spill)::value_type);
|
||||
|
||||
static bool IsValuelessType(IR::Type type) {
|
||||
switch (type) {
|
||||
case IR::Type::Table:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
IR::Type Argument::GetType() const {
|
||||
return value.GetType();
|
||||
}
|
||||
|
||||
bool Argument::IsImmediate() const {
|
||||
return value.IsImmediate();
|
||||
}
|
||||
|
||||
bool Argument::GetImmediateU1() const {
|
||||
return value.GetU1();
|
||||
}
|
||||
|
||||
u8 Argument::GetImmediateU8() const {
|
||||
const u64 imm = value.GetImmediateAsU64();
|
||||
ASSERT(imm < 0x100);
|
||||
return u8(imm);
|
||||
}
|
||||
|
||||
u16 Argument::GetImmediateU16() const {
|
||||
const u64 imm = value.GetImmediateAsU64();
|
||||
ASSERT(imm < 0x10000);
|
||||
return u16(imm);
|
||||
}
|
||||
|
||||
u32 Argument::GetImmediateU32() const {
|
||||
const u64 imm = value.GetImmediateAsU64();
|
||||
ASSERT(imm < 0x100000000);
|
||||
return u32(imm);
|
||||
}
|
||||
|
||||
u64 Argument::GetImmediateU64() const {
|
||||
return value.GetImmediateAsU64();
|
||||
}
|
||||
|
||||
IR::Cond Argument::GetImmediateCond() const {
|
||||
ASSERT(IsImmediate() && GetType() == IR::Type::Cond);
|
||||
return value.GetCond();
|
||||
}
|
||||
|
||||
IR::AccType Argument::GetImmediateAccType() const {
|
||||
ASSERT(IsImmediate() && GetType() == IR::Type::AccType);
|
||||
return value.GetAccType();
|
||||
}
|
||||
|
||||
RegAlloc::ArgumentInfo RegAlloc::GetArgumentInfo(IR::Inst* inst) {
|
||||
ArgumentInfo ret = {Argument{*this}, Argument{*this}, Argument{*this}, Argument{*this}};
|
||||
for (size_t i = 0; i < inst->NumArgs(); i++) {
|
||||
const IR::Value arg = inst->GetArg(i);
|
||||
ret[i].value = arg;
|
||||
if (!arg.IsImmediate() && !IsValuelessType(arg.GetType())) {
|
||||
ASSERT_MSG(ValueLocation(arg.GetInst()), "argument must already been defined");
|
||||
ValueInfo(arg.GetInst()).accumulated_uses++;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool RegAlloc::IsValueLive(IR::Inst* inst) const {
|
||||
return !!ValueLocation(inst);
|
||||
}
|
||||
|
||||
template<bool is_vector>
|
||||
int RegAlloc::RealizeReadImpl(const IR::Inst* value) {
|
||||
constexpr HostLoc::Kind required_kind = is_vector ? HostLoc::Kind::Fpr : HostLoc::Kind::Gpr;
|
||||
|
||||
const auto current_location = ValueLocation(value);
|
||||
ASSERT(current_location);
|
||||
|
||||
if (current_location->kind == required_kind) {
|
||||
ValueInfo(*current_location).realized = true;
|
||||
return current_location->index;
|
||||
}
|
||||
|
||||
ASSERT(!ValueInfo(*current_location).realized);
|
||||
ASSERT(ValueInfo(*current_location).locked);
|
||||
|
||||
if constexpr (is_vector) {
|
||||
const int new_location_index = AllocateRegister(fprs, fpr_order);
|
||||
SpillFpr(new_location_index);
|
||||
|
||||
switch (current_location->kind) {
|
||||
case HostLoc::Kind::Gpr:
|
||||
code.FMOV(oaknut::DReg{new_location_index}, oaknut::XReg{current_location->index});
|
||||
break;
|
||||
case HostLoc::Kind::Fpr:
|
||||
ASSERT_FALSE("Logic error");
|
||||
break;
|
||||
case HostLoc::Kind::Spill:
|
||||
code.LDR(oaknut::QReg{new_location_index}, SP, spill_offset + new_location_index * spill_slot_size);
|
||||
break;
|
||||
}
|
||||
|
||||
fprs[new_location_index] = std::exchange(ValueInfo(*current_location), {});
|
||||
fprs[new_location_index].realized = true;
|
||||
return new_location_index;
|
||||
} else {
|
||||
const int new_location_index = AllocateRegister(gprs, gpr_order);
|
||||
SpillGpr(new_location_index);
|
||||
|
||||
switch (current_location->kind) {
|
||||
case HostLoc::Kind::Gpr:
|
||||
ASSERT_FALSE("Logic error");
|
||||
break;
|
||||
case HostLoc::Kind::Fpr:
|
||||
code.FMOV(oaknut::XReg{current_location->index}, oaknut::DReg{new_location_index});
|
||||
// ASSERT size fits
|
||||
break;
|
||||
case HostLoc::Kind::Spill:
|
||||
code.LDR(oaknut::XReg{new_location_index}, SP, spill_offset + new_location_index * spill_slot_size);
|
||||
break;
|
||||
}
|
||||
|
||||
gprs[new_location_index] = std::exchange(ValueInfo(*current_location), {});
|
||||
gprs[new_location_index].realized = true;
|
||||
return new_location_index;
|
||||
}
|
||||
}
|
||||
|
||||
template<bool is_vector>
|
||||
int RegAlloc::RealizeWriteImpl(const IR::Inst* value) {
|
||||
ASSERT(!ValueLocation(value));
|
||||
|
||||
const auto setup_location = [&](HostLocInfo& info) {
|
||||
info = {};
|
||||
info.values.emplace(value);
|
||||
info.locked = true;
|
||||
info.realized = true;
|
||||
info.expected_uses += value->UseCount();
|
||||
};
|
||||
|
||||
if constexpr (is_vector) {
|
||||
const int new_location_index = AllocateRegister(fprs, fpr_order);
|
||||
SpillFpr(new_location_index);
|
||||
setup_location(fprs[new_location_index]);
|
||||
return new_location_index;
|
||||
} else {
|
||||
const int new_location_index = AllocateRegister(gprs, gpr_order);
|
||||
SpillGpr(new_location_index);
|
||||
setup_location(gprs[new_location_index]);
|
||||
return new_location_index;
|
||||
}
|
||||
}
|
||||
|
||||
template int RegAlloc::RealizeReadImpl<true>(const IR::Inst* value);
|
||||
template int RegAlloc::RealizeReadImpl<false>(const IR::Inst* value);
|
||||
template int RegAlloc::RealizeWriteImpl<true>(const IR::Inst* value);
|
||||
template int RegAlloc::RealizeWriteImpl<false>(const IR::Inst* value);
|
||||
|
||||
void RegAlloc::Unlock(HostLoc host_loc) {
|
||||
HostLocInfo& info = ValueInfo(host_loc);
|
||||
if (!info.realized) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (info.accumulated_uses == info.expected_uses) {
|
||||
info = {};
|
||||
} else {
|
||||
info.realized = false;
|
||||
info.locked = false;
|
||||
}
|
||||
}
|
||||
|
||||
int RegAlloc::AllocateRegister(const std::array<HostLocInfo, 32>& regs, const std::vector<int>& order) const {
|
||||
const auto empty = std::find_if(order.begin(), order.end(), [&](int i) { return regs[i].values.empty() && !regs[i].locked; });
|
||||
if (empty != order.end()) {
|
||||
return *empty;
|
||||
}
|
||||
|
||||
std::vector<int> candidates;
|
||||
std::copy_if(order.begin(), order.end(), std::back_inserter(candidates), [&](int i) { return !regs[i].locked; });
|
||||
|
||||
// TODO: LRU
|
||||
std::uniform_int_distribution<size_t> dis{0, candidates.size() - 1};
|
||||
return candidates[dis(rand_gen)];
|
||||
}
|
||||
|
||||
void RegAlloc::SpillGpr(int index) {
|
||||
ASSERT(!gprs[index].locked && !gprs[index].realized);
|
||||
if (gprs[index].values.empty()) {
|
||||
return;
|
||||
}
|
||||
const int new_location_index = FindFreeSpill();
|
||||
code.STR(oaknut::XReg{index}, SP, spill_offset + new_location_index * spill_slot_size);
|
||||
spills[new_location_index] = std::exchange(gprs[index], {});
|
||||
}
|
||||
|
||||
void RegAlloc::SpillFpr(int index) {
|
||||
ASSERT(!fprs[index].locked && !fprs[index].realized);
|
||||
if (fprs[index].values.empty()) {
|
||||
return;
|
||||
}
|
||||
const int new_location_index = FindFreeSpill();
|
||||
code.STR(oaknut::QReg{index}, SP, spill_offset + new_location_index * spill_slot_size);
|
||||
spills[new_location_index] = std::exchange(fprs[index], {});
|
||||
}
|
||||
|
||||
int RegAlloc::FindFreeSpill() const {
|
||||
const auto iter = std::find_if(spills.begin(), spills.end(), [](const HostLocInfo& info) { return info.values.empty(); });
|
||||
ASSERT_MSG(iter != spills.end(), "All spill locations are full");
|
||||
return static_cast<int>(iter - spills.begin());
|
||||
}
|
||||
|
||||
std::optional<HostLoc> RegAlloc::ValueLocation(const IR::Inst* value) const {
|
||||
const auto contains_value = [value](const HostLocInfo& info) {
|
||||
return info.values.contains(value);
|
||||
};
|
||||
|
||||
if (const auto iter = std::find_if(gprs.begin(), gprs.end(), contains_value); iter != gprs.end()) {
|
||||
return HostLoc{HostLoc::Kind::Gpr, static_cast<int>(iter - gprs.begin())};
|
||||
}
|
||||
if (const auto iter = std::find_if(fprs.begin(), fprs.end(), contains_value); iter != fprs.end()) {
|
||||
return HostLoc{HostLoc::Kind::Fpr, static_cast<int>(iter - fprs.begin())};
|
||||
}
|
||||
if (const auto iter = std::find_if(spills.begin(), spills.end(), contains_value); iter != spills.end()) {
|
||||
return HostLoc{HostLoc::Kind::Spill, static_cast<int>(iter - spills.begin())};
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
HostLocInfo& RegAlloc::ValueInfo(HostLoc host_loc) {
|
||||
switch (host_loc.kind) {
|
||||
case HostLoc::Kind::Gpr:
|
||||
return gprs[static_cast<size_t>(host_loc.index)];
|
||||
case HostLoc::Kind::Fpr:
|
||||
return fprs[static_cast<size_t>(host_loc.index)];
|
||||
case HostLoc::Kind::Spill:
|
||||
return spills[static_cast<size_t>(host_loc.index)];
|
||||
}
|
||||
ASSERT_FALSE("RegAlloc::ValueInfo: Invalid HostLoc::Kind");
|
||||
}
|
||||
|
||||
HostLocInfo& RegAlloc::ValueInfo(const IR::Inst* value) {
|
||||
const auto contains_value = [value](const HostLocInfo& info) {
|
||||
return info.values.contains(value);
|
||||
};
|
||||
|
||||
if (const auto iter = std::find_if(gprs.begin(), gprs.end(), contains_value)) {
|
||||
return *iter;
|
||||
}
|
||||
if (const auto iter = std::find_if(fprs.begin(), fprs.end(), contains_value)) {
|
||||
return *iter;
|
||||
}
|
||||
if (const auto iter = std::find_if(spills.begin(), spills.end(), contains_value)) {
|
||||
return *iter;
|
||||
}
|
||||
ASSERT_FALSE("RegAlloc::ValueInfo: Value not found");
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::Backend::Arm64
|
244
src/dynarmic/backend/arm64/reg_alloc.h
Normal file
244
src/dynarmic/backend/arm64/reg_alloc.h
Normal file
|
@ -0,0 +1,244 @@
|
|||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2022 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <optional>
|
||||
#include <random>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <mcl/assert.hpp>
|
||||
#include <mcl/stdint.hpp>
|
||||
#include <mcl/type_traits/is_instance_of_template.hpp>
|
||||
#include <oaknut/oaknut.hpp>
|
||||
#include <tsl/robin_set.h>
|
||||
|
||||
#include "dynarmic/backend/arm64/stack_layout.h"
|
||||
#include "dynarmic/ir/cond.h"
|
||||
#include "dynarmic/ir/microinstruction.h"
|
||||
#include "dynarmic/ir/value.h"
|
||||
|
||||
namespace Dynarmic::Backend::Arm64 {
|
||||
|
||||
class RegAlloc;
|
||||
|
||||
struct HostLoc {
|
||||
enum class Kind {
|
||||
Gpr,
|
||||
Fpr,
|
||||
Spill,
|
||||
} kind;
|
||||
int index;
|
||||
};
|
||||
|
||||
struct Argument {
|
||||
public:
|
||||
using copyable_reference = std::reference_wrapper<Argument>;
|
||||
|
||||
IR::Type GetType() const;
|
||||
bool IsImmediate() const;
|
||||
|
||||
bool GetImmediateU1() const;
|
||||
u8 GetImmediateU8() const;
|
||||
u16 GetImmediateU16() const;
|
||||
u32 GetImmediateU32() const;
|
||||
u64 GetImmediateU64() const;
|
||||
IR::Cond GetImmediateCond() const;
|
||||
IR::AccType GetImmediateAccType() const;
|
||||
|
||||
private:
|
||||
friend class RegAlloc;
|
||||
explicit Argument(RegAlloc& reg_alloc)
|
||||
: reg_alloc{reg_alloc} {}
|
||||
|
||||
bool allocated = false;
|
||||
RegAlloc& reg_alloc;
|
||||
IR::Value value;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct RAReg {
|
||||
public:
|
||||
static constexpr bool is_vector = std::is_base_of_v<oaknut::VReg, T>;
|
||||
|
||||
operator T() const { return *reg; }
|
||||
|
||||
template<typename U = T, typename = std::enable_if_t<std::is_same_v<U, oaknut::WReg> && std::is_same_v<T, U>>>
|
||||
operator oaknut::WRegWsp() const {
|
||||
return *reg;
|
||||
}
|
||||
|
||||
template<typename U = T, typename = std::enable_if_t<std::is_same_v<U, oaknut::XReg> && std::is_same_v<T, U>>>
|
||||
operator oaknut::XRegSp() const {
|
||||
return *reg;
|
||||
}
|
||||
|
||||
T operator*() const { return *reg; }
|
||||
|
||||
~RAReg();
|
||||
|
||||
private:
|
||||
friend class RegAlloc;
|
||||
explicit RAReg(RegAlloc& reg_alloc, bool write, const IR::Inst* value)
|
||||
: reg_alloc{reg_alloc}, write{write}, value{value} {}
|
||||
|
||||
void Realize();
|
||||
|
||||
RegAlloc& reg_alloc;
|
||||
bool write;
|
||||
const IR::Inst* value;
|
||||
std::optional<T> reg;
|
||||
};
|
||||
|
||||
struct HostLocInfo {
|
||||
tsl::robin_set<const IR::Inst*> values;
|
||||
bool locked = false;
|
||||
bool realized = false;
|
||||
size_t accumulated_uses = 0;
|
||||
size_t expected_uses = 0;
|
||||
};
|
||||
|
||||
class RegAlloc {
|
||||
public:
|
||||
using ArgumentInfo = std::array<Argument, IR::max_arg_count>;
|
||||
|
||||
explicit RegAlloc(oaknut::CodeGenerator& code, std::vector<int> gpr_order, std::vector<int> fpr_order)
|
||||
: code{code}, gpr_order{gpr_order}, fpr_order{fpr_order}, rand_gen{std::random_device{}()} {}
|
||||
|
||||
ArgumentInfo GetArgumentInfo(IR::Inst* inst);
|
||||
bool IsValueLive(IR::Inst* inst) const;
|
||||
|
||||
auto ReadX(Argument& arg) { return RAReg<oaknut::XReg>{*this, false, PreReadImpl(arg.value)}; }
|
||||
auto ReadW(Argument& arg) { return RAReg<oaknut::WReg>{*this, false, PreReadImpl(arg.value)}; }
|
||||
|
||||
auto ReadQ(Argument& arg) { return RAReg<oaknut::QReg>{*this, false, PreReadImpl(arg.value)}; }
|
||||
auto ReadD(Argument& arg) { return RAReg<oaknut::DReg>{*this, false, PreReadImpl(arg.value)}; }
|
||||
auto ReadS(Argument& arg) { return RAReg<oaknut::SReg>{*this, false, PreReadImpl(arg.value)}; }
|
||||
auto ReadH(Argument& arg) { return RAReg<oaknut::HReg>{*this, false, PreReadImpl(arg.value)}; }
|
||||
auto ReadB(Argument& arg) { return RAReg<oaknut::BReg>{*this, false, PreReadImpl(arg.value)}; }
|
||||
|
||||
template<size_t size>
|
||||
auto ReadReg(Argument& arg) {
|
||||
if constexpr (size == 64) {
|
||||
return ReadX(arg);
|
||||
} else if constexpr (size == 32) {
|
||||
return ReadW(arg);
|
||||
} else {
|
||||
ASSERT_FALSE("Invalid size to ReadReg {}", size);
|
||||
}
|
||||
}
|
||||
|
||||
template<size_t size>
|
||||
auto ReadVec(Argument& arg) {
|
||||
if constexpr (size == 128) {
|
||||
return ReadQ(arg);
|
||||
} else if constexpr (size == 64) {
|
||||
return ReadD(arg);
|
||||
} else if constexpr (size == 32) {
|
||||
return ReadS(arg);
|
||||
} else if constexpr (size == 16) {
|
||||
return ReadH(arg);
|
||||
} else if constexpr (size == 8) {
|
||||
return ReadB(arg);
|
||||
} else {
|
||||
ASSERT_FALSE("Invalid size to ReadVec {}", size);
|
||||
}
|
||||
}
|
||||
|
||||
auto WriteX(IR::Inst* inst) { return RAReg<oaknut::XReg>{*this, true, inst}; }
|
||||
auto WriteW(IR::Inst* inst) { return RAReg<oaknut::WReg>{*this, true, inst}; }
|
||||
|
||||
auto WriteQ(IR::Inst* inst) { return RAReg<oaknut::QReg>{*this, true, inst}; }
|
||||
auto WriteD(IR::Inst* inst) { return RAReg<oaknut::DReg>{*this, true, inst}; }
|
||||
auto WriteS(IR::Inst* inst) { return RAReg<oaknut::SReg>{*this, true, inst}; }
|
||||
auto WriteH(IR::Inst* inst) { return RAReg<oaknut::HReg>{*this, true, inst}; }
|
||||
auto WriteB(IR::Inst* inst) { return RAReg<oaknut::BReg>{*this, true, inst}; }
|
||||
|
||||
template<size_t size>
|
||||
auto WriteReg(IR::Inst* inst) {
|
||||
if constexpr (size == 64) {
|
||||
return WriteX(inst);
|
||||
} else if constexpr (size == 32) {
|
||||
return WriteW(inst);
|
||||
} else {
|
||||
ASSERT_FALSE("Invalid size to WriteReg {}", size);
|
||||
}
|
||||
}
|
||||
|
||||
template<size_t size>
|
||||
auto WriteVec(IR::Inst* inst) {
|
||||
if constexpr (size == 128) {
|
||||
return WriteQ(inst);
|
||||
} else if constexpr (size == 64) {
|
||||
return WriteD(inst);
|
||||
} else if constexpr (size == 32) {
|
||||
return WriteS(inst);
|
||||
} else if constexpr (size == 16) {
|
||||
return WriteH(inst);
|
||||
} else if constexpr (size == 8) {
|
||||
return WriteB(inst);
|
||||
} else {
|
||||
ASSERT_FALSE("Invalid size to WriteVec {}", size);
|
||||
}
|
||||
}
|
||||
|
||||
void SpillAll();
|
||||
|
||||
template<typename... Ts>
|
||||
static void Realize(Ts&... rs) {
|
||||
static_assert((mcl::is_instance_of_template<RAReg, Ts>() && ...));
|
||||
(rs.Realize(), ...);
|
||||
}
|
||||
|
||||
private:
|
||||
template<typename>
|
||||
friend struct RAReg;
|
||||
|
||||
const IR::Inst* PreReadImpl(const IR::Value& value) {
|
||||
ValueInfo(value.GetInst()).locked = true;
|
||||
return value.GetInst();
|
||||
}
|
||||
|
||||
template<bool is_vector>
|
||||
int RealizeReadImpl(const IR::Inst* value);
|
||||
template<bool is_vector>
|
||||
int RealizeWriteImpl(const IR::Inst* value);
|
||||
void Unlock(HostLoc host_loc);
|
||||
|
||||
int AllocateRegister(const std::array<HostLocInfo, 32>& regs, const std::vector<int>& order) const;
|
||||
void SpillGpr(int index);
|
||||
void SpillFpr(int index);
|
||||
int FindFreeSpill() const;
|
||||
|
||||
std::optional<HostLoc> ValueLocation(const IR::Inst* value) const;
|
||||
HostLocInfo& ValueInfo(HostLoc host_loc);
|
||||
HostLocInfo& ValueInfo(const IR::Inst* value);
|
||||
|
||||
oaknut::CodeGenerator& code;
|
||||
std::vector<int> gpr_order;
|
||||
std::vector<int> fpr_order;
|
||||
|
||||
std::array<HostLocInfo, 32> gprs;
|
||||
std::array<HostLocInfo, 32> fprs;
|
||||
std::array<HostLocInfo, SpillCount> spills;
|
||||
|
||||
mutable std::mt19937 rand_gen;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
RAReg<T>::~RAReg() {
|
||||
if (reg) {
|
||||
reg_alloc.Unlock(HostLoc{is_vector ? HostLoc::Kind::Fpr : HostLoc::Kind::Gpr, reg->index()});
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void RAReg<T>::Realize() {
|
||||
reg = T{write ? reg_alloc.RealizeWriteImpl<is_vector>(value) : reg_alloc.RealizeReadImpl<is_vector>(value)};
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::Backend::Arm64
|
39
src/dynarmic/backend/arm64/stack_layout.h
Normal file
39
src/dynarmic/backend/arm64/stack_layout.h
Normal file
|
@ -0,0 +1,39 @@
|
|||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2022 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include <mcl/stdint.hpp>
|
||||
|
||||
namespace Dynarmic::Backend::Arm64 {
|
||||
|
||||
constexpr size_t SpillCount = 64;
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# pragma warning(push)
|
||||
# pragma warning(disable : 4324) // Structure was padded due to alignment specifier
|
||||
#endif
|
||||
|
||||
struct alignas(16) StackLayout {
|
||||
s64 cycles_remaining;
|
||||
s64 cycles_to_run;
|
||||
|
||||
std::array<std::array<u64, 2>, SpillCount> spill;
|
||||
|
||||
u32 save_host_fpcr;
|
||||
u32 save_host_fpsr;
|
||||
|
||||
bool check_bit;
|
||||
};
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# pragma warning(pop)
|
||||
#endif
|
||||
|
||||
static_assert(sizeof(StackLayout) % 16 == 0);
|
||||
|
||||
} // namespace Dynarmic::Backend::Arm64
|
Loading…
Reference in a new issue