backend/arm64: First dummy code execution

This commit is contained in:
Merry 2022-07-14 21:33:55 +01:00 committed by merry
parent d877777c50
commit f6e80f1e0e
8 changed files with 172 additions and 7 deletions

View file

@ -376,6 +376,8 @@ elseif(ARCHITECTURE STREQUAL "arm64")
backend/arm64/a32_interface.cpp backend/arm64/a32_interface.cpp
backend/arm64/a32_jitstate.cpp backend/arm64/a32_jitstate.cpp
backend/arm64/a32_jitstate.h backend/arm64/a32_jitstate.h
backend/arm64/emit_arm64.cpp
backend/arm64/emit_arm64.h
) )
endif() endif()

View file

@ -5,6 +5,7 @@
#include "dynarmic/backend/arm64/a32_address_space.h" #include "dynarmic/backend/arm64/a32_address_space.h"
#include "dynarmic/backend/arm64/emit_arm64.h"
#include "dynarmic/frontend/A32/a32_location_descriptor.h" #include "dynarmic/frontend/A32/a32_location_descriptor.h"
#include "dynarmic/frontend/A32/translate/a32_translate.h" #include "dynarmic/frontend/A32/translate/a32_translate.h"
#include "dynarmic/ir/opt/passes.h" #include "dynarmic/ir/opt/passes.h"
@ -49,19 +50,85 @@ void* A32AddressSpace::GetOrEmit(IR::LocationDescriptor descriptor) {
} }
IR::Block ir_block = GenerateIR(descriptor); IR::Block ir_block = GenerateIR(descriptor);
void* block_entry = Emit(std::move(ir_block)); const EmittedBlockInfo block_info = Emit(std::move(ir_block));
block_entries.insert_or_assign(descriptor.Value(), block_entry);
return block_entry; block_infos.insert_or_assign(descriptor.Value(), block_info);
block_entries.insert_or_assign(descriptor.Value(), block_info.entry_point);
return block_info.entry_point;
}
void A32AddressSpace::ClearCache() {
block_entries.clear();
block_infos.clear();
code.set_ptr(prelude_info.end_of_prelude);
} }
void A32AddressSpace::EmitPrelude() { void A32AddressSpace::EmitPrelude() {
using namespace oaknut;
using namespace oaknut::util;
mem.unprotect();
prelude_info.run_code = code.ptr<PreludeInfo::RunCodeFuncType>(); prelude_info.run_code = code.ptr<PreludeInfo::RunCodeFuncType>();
// TODO: Minimize this.
code.STR(X30, SP, PRE_INDEXED, -16);
for (int i = 0; i < 30; i += 2) {
code.STP(XReg{i}, XReg{i + 1}, SP, PRE_INDEXED, -16);
}
for (int i = 0; i < 32; i += 2) {
code.STP(QReg{i}, QReg{i + 1}, SP, PRE_INDEXED, -32);
}
code.BR(X0);
prelude_info.return_from_run_code = code.ptr<void*>();
for (int i = 30; i >= 0; i -= 2) {
code.LDP(QReg{i}, QReg{i + 1}, SP, POST_INDEXED, 32);
}
for (int i = 28; i >= 0; i -= 2) {
code.LDP(XReg{i}, XReg{i + 1}, SP, POST_INDEXED, 16);
}
code.LDR(X30, SP, POST_INDEXED, 16);
code.RET();
mem.protect();
prelude_info.end_of_prelude = code.ptr<u32*>(); prelude_info.end_of_prelude = code.ptr<u32*>();
} }
void* A32AddressSpace::Emit(IR::Block) { size_t A32AddressSpace::GetRemainingSize() {
ASSERT_FALSE("Unimplemented"); return conf.code_cache_size - (reinterpret_cast<uintptr_t>(code.ptr<void*>()) - reinterpret_cast<uintptr_t>(mem.ptr()));
}
EmittedBlockInfo A32AddressSpace::Emit(IR::Block block) {
if (GetRemainingSize() < 1024 * 1024) {
ClearCache();
}
mem.unprotect();
EmittedBlockInfo block_info = EmitArm64(code, std::move(block));
Link(block_info);
mem.protect();
return block_info;
}
void A32AddressSpace::Link(EmittedBlockInfo& block_info) {
using namespace oaknut;
using namespace oaknut::util;
for (auto [ptr_offset, target] : block_info.relocations) {
CodeGenerator c{reinterpret_cast<u32*>(reinterpret_cast<char*>(block_info.entry_point) + ptr_offset)};
switch (target) {
case LinkTarget::ReturnFromRunCode:
c.B(prelude_info.return_from_run_code);
break;
default:
ASSERT_FALSE("Invalid relocation target");
}
}
} }
} // namespace Dynarmic::Backend::Arm64 } // namespace Dynarmic::Backend::Arm64

View file

@ -5,10 +5,12 @@
#pragma once #pragma once
#include <mcl/stdint.hpp>
#include <oaknut/code_block.hpp> #include <oaknut/code_block.hpp>
#include <oaknut/oaknut.hpp> #include <oaknut/oaknut.hpp>
#include <tsl/robin_map.h> #include <tsl/robin_map.h>
#include "dynarmic/backend/arm64/emit_arm64.h"
#include "dynarmic/interface/A32/config.h" #include "dynarmic/interface/A32/config.h"
#include "dynarmic/interface/halt_reason.h" #include "dynarmic/interface/halt_reason.h"
#include "dynarmic/ir/basic_block.h" #include "dynarmic/ir/basic_block.h"
@ -28,12 +30,16 @@ public:
void* GetOrEmit(IR::LocationDescriptor descriptor); void* GetOrEmit(IR::LocationDescriptor descriptor);
void ClearCache();
private: private:
friend class A32Core; friend class A32Core;
void EmitPrelude(); void EmitPrelude();
void* Emit(IR::Block ir_block); size_t GetRemainingSize();
EmittedBlockInfo Emit(IR::Block ir_block);
void Link(EmittedBlockInfo& block);
const A32::UserConfig conf; const A32::UserConfig conf;
@ -41,12 +47,14 @@ private:
oaknut::CodeGenerator code; oaknut::CodeGenerator code;
tsl::robin_map<u64, void*> block_entries; tsl::robin_map<u64, void*> block_entries;
tsl::robin_map<u64, EmittedBlockInfo> block_infos;
struct PreludeInfo { struct PreludeInfo {
u32* end_of_prelude; u32* end_of_prelude;
using RunCodeFuncType = HaltReason (*)(void* entry_point, A32JitState* context, volatile u32* halt_reason); using RunCodeFuncType = HaltReason (*)(void* entry_point, A32JitState* context, volatile u32* halt_reason);
RunCodeFuncType run_code; RunCodeFuncType run_code;
void* return_from_run_code;
} prelude_info; } prelude_info;
}; };

View file

@ -162,7 +162,7 @@ struct Jit::Impl final {
private: private:
void RequestCacheInvalidation() { void RequestCacheInvalidation() {
ASSERT_FALSE("Unimplemented"); // ASSERT_FALSE("Unimplemented");
invalidate_entire_cache = false; invalidate_entire_cache = false;
invalid_cache_ranges.clear(); invalid_cache_ranges.clear();

View file

View file

@ -0,0 +1,16 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <mcl/stdint.hpp>
namespace Dynarmic::Backend::Arm64 {
constexpr u32 ABI_ALL_
} // namespace Dynarmic::Backend::Arm64

View file

@ -0,0 +1,36 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "dynarmic/backend/arm64/emit_arm64.h"
#include <oaknut/oaknut.hpp>
#include "dynarmic/backend/arm64/a32_jitstate.h"
#include "dynarmic/ir/basic_block.h"
namespace Dynarmic::Backend::Arm64 {
using namespace oaknut::util;
EmittedBlockInfo EmitArm64(oaknut::CodeGenerator& code, IR::Block block) {
(void)block;
EmittedBlockInfo ebi;
ebi.entry_point = code.ptr<void*>();
code.MOV(W0, 8);
code.STR(W0, X1, offsetof(A32JitState, regs) + 0 * sizeof(u32));
code.MOV(W0, 2);
code.STR(W0, X1, offsetof(A32JitState, regs) + 1 * sizeof(u32));
code.STR(W0, X1, offsetof(A32JitState, regs) + 15 * sizeof(u32));
ebi.relocations[code.ptr<char*>() - reinterpret_cast<char*>(ebi.entry_point)] = LinkTarget::ReturnFromRunCode;
code.NOP();
ebi.size = reinterpret_cast<size_t>(code.ptr<void*>()) - reinterpret_cast<size_t>(ebi.entry_point);
return ebi;
}
} // namespace Dynarmic::Backend::Arm64

View file

@ -0,0 +1,36 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mcl/stdint.hpp>
#include <tsl/robin_map.h>
namespace oaknut {
struct PointerCodeGeneratorPolicy;
template<typename>
class BasicCodeGenerator;
using CodeGenerator = BasicCodeGenerator<PointerCodeGeneratorPolicy>;
} // namespace oaknut
namespace Dynarmic::IR {
class Block;
} // namespace Dynarmic::IR
namespace Dynarmic::Backend::Arm64 {
enum class LinkTarget {
ReturnFromRunCode,
};
struct EmittedBlockInfo {
void* entry_point;
size_t size;
tsl::robin_map<std::ptrdiff_t, LinkTarget> relocations;
};
EmittedBlockInfo EmitArm64(oaknut::CodeGenerator& code, IR::Block block);
} // namespace Dynarmic::Backend::Arm64