Add headers

This commit is contained in:
Liam 2022-11-13 11:13:29 -05:00
parent 2d3c8c5724
commit 4a4b00f0b1
4 changed files with 312 additions and 2 deletions

View file

@ -0,0 +1,94 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mcl/stdint.hpp>
#include <oaknut/code_block.hpp>
#include <oaknut/oaknut.hpp>
#include <tsl/robin_map.h>
#include <tsl/robin_set.h>
#include "dynarmic/backend/arm64/emit_arm64.h"
#include "dynarmic/interface/A64/config.h"
#include "dynarmic/interface/halt_reason.h"
#include "dynarmic/ir/basic_block.h"
#include "dynarmic/ir/location_descriptor.h"
namespace Dynarmic::Backend::Arm64 {
struct A64JitState;
class A64AddressSpace final {
public:
explicit A64AddressSpace(const A64::UserConfig& conf);
IR::Block GenerateIR(IR::LocationDescriptor) const;
CodePtr Get(IR::LocationDescriptor descriptor);
CodePtr GetOrEmit(IR::LocationDescriptor descriptor);
void ClearCache();
private:
friend class A64Core;
void EmitPrelude();
size_t GetRemainingSize();
EmittedBlockInfo Emit(IR::Block ir_block);
void Link(IR::LocationDescriptor block_descriptor, EmittedBlockInfo& block);
void RelinkForDescriptor(IR::LocationDescriptor target_descriptor);
const A64::UserConfig conf;
oaknut::CodeBlock mem;
oaknut::CodeGenerator code;
tsl::robin_map<u64, CodePtr> block_entries;
tsl::robin_map<u64, EmittedBlockInfo> block_infos;
tsl::robin_map<u64, tsl::robin_set<u64>> block_references;
struct PreludeInfo {
u32* end_of_prelude;
using RunCodeFuncType = HaltReason (*)(CodePtr entry_point, A64JitState* context, volatile u32* halt_reason);
RunCodeFuncType run_code;
RunCodeFuncType step_code;
void* return_to_dispatcher;
void* return_from_run_code;
void* read_memory_8;
void* read_memory_16;
void* read_memory_32;
void* read_memory_64;
void* read_memory_128;
void* exclusive_read_memory_8;
void* exclusive_read_memory_16;
void* exclusive_read_memory_32;
void* exclusive_read_memory_64;
void* exclusive_read_memory_128;
void* write_memory_8;
void* write_memory_16;
void* write_memory_32;
void* write_memory_64;
void* write_memory_128;
void* exclusive_write_memory_8;
void* exclusive_write_memory_16;
void* exclusive_write_memory_32;
void* exclusive_write_memory_64;
void* exclusive_write_memory_128;
void* call_svc;
void* exception_raised;
void* dc_raised;
void* ic_raised;
void* isb_raised;
void* add_ticks;
void* get_ticks_remaining;
} prelude_info;
};
} // namespace Dynarmic::Backend::Arm64

View file

@ -0,0 +1,30 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include "dynarmic/backend/arm64/a64_address_space.h"
#include "dynarmic/backend/arm64/a64_jitstate.h"
namespace Dynarmic::Backend::Arm64 {
class A64Core final {
public:
explicit A64Core(const A64::UserConfig&) {}
HaltReason Run(A64AddressSpace& process, A64JitState& thread_ctx, volatile u32* halt_reason) {
const auto location_descriptor = thread_ctx.GetLocationDescriptor();
const auto entry_point = process.GetOrEmit(location_descriptor);
return process.prelude_info.run_code(entry_point, &thread_ctx, halt_reason);
}
HaltReason Step(A64AddressSpace& process, A64JitState& thread_ctx, volatile u32* halt_reason) {
const auto location_descriptor = A64::LocationDescriptor{thread_ctx.GetLocationDescriptor()}.SetSingleStepping(true);
const auto entry_point = process.GetOrEmit(location_descriptor);
return process.prelude_info.step_code(entry_point, &thread_ctx, halt_reason);
}
};
} // namespace Dynarmic::Backend::Arm64

View file

@ -1,5 +1,5 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2021 MerryMage
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
@ -11,13 +11,162 @@
#include <mcl/scope_exit.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/backend/arm64/a64_address_space.h"
#include "dynarmic/backend/arm64/a64_core.h"
#include "dynarmic/backend/arm64/a64_jitstate.h"
#include "dynarmic/common/atomic.h"
#include "dynarmic/interface/A64/a64.h"
#include "dynarmic/interface/A64/config.h"
namespace Dynarmic::A64 {
struct Jit::Impl {};
using namespace Backend::Arm64;
struct Jit::Impl final {
Impl(Jit* jit_interface, A64::UserConfig conf)
: jit_interface(jit_interface)
, conf(conf)
, current_address_space(conf)
, core(conf) {}
HaltReason Run() {
ASSERT(!is_executing);
PerformRequestedCacheInvalidation();
is_executing = true;
SCOPE_EXIT {
is_executing = false;
};
HaltReason hr = core.Run(current_address_space, current_state, &halt_reason);
PerformRequestedCacheInvalidation();
return hr;
}
HaltReason Step() {
ASSERT(!is_executing);
PerformRequestedCacheInvalidation();
is_executing = true;
SCOPE_EXIT {
is_executing = false;
};
HaltReason hr = core.Step(current_address_space, current_state, &halt_reason);
PerformRequestedCacheInvalidation();
return hr;
}
void ClearCache() {
std::unique_lock lock{invalidation_mutex};
invalidate_entire_cache = true;
HaltExecution(HaltReason::CacheInvalidation);
}
void InvalidateCacheRange(std::uint64_t start_address, std::size_t length) {
std::unique_lock lock{invalidation_mutex};
invalid_cache_ranges.add(boost::icl::discrete_interval<u64>::closed(start_address, start_address + length - 1));
HaltExecution(HaltReason::CacheInvalidation);
}
void Reset() {
current_state = {};
}
void HaltExecution(HaltReason hr) {
Atomic::Or(&halt_reason, static_cast<u32>(hr));
}
void ClearHalt(HaltReason hr) {
Atomic::And(&halt_reason, ~static_cast<u32>(hr));
}
std::array<std::uint64_t, 31>& Regs() {
return current_state.reg;
}
const std::array<std::uint64_t, 31>& Regs() const {
return current_state.reg;
}
std::array<std::uint64_t, 64>& VecRegs() {
return current_state.vec;
}
const std::array<std::uint64_t, 64>& VecRegs() const {
return current_state.vec;
}
std::uint32_t Fpcr() const {
return current_state.fpcr;
}
void SetFpcr(std::uint32_t value) {
current_state.fpcr = value;
}
std::uint32_t Fpsr() const {
return current_state.fpsr;
}
void SetFpscr(std::uint32_t value) {
current_state.fpsr = value;
}
std::uint32_t Pstate() const {
return current_state.cpsr_nzcv;
}
void SetPstate(std::uint32_t value) {
current_state.cpsr_nzcv = value;
}
void ClearExclusiveState() {
current_state.exclusive_state = false;
}
void DumpDisassembly() const {
ASSERT_FALSE("Unimplemented");
}
private:
void PerformRequestedCacheInvalidation() {
ClearHalt(HaltReason::CacheInvalidation);
if (invalidate_entire_cache) {
current_address_space.ClearCache();
invalidate_entire_cache = false;
invalid_cache_ranges.clear();
return;
}
if (!invalid_cache_ranges.empty()) {
// TODO: Optimize
current_address_space.ClearCache();
invalid_cache_ranges.clear();
return;
}
}
Jit* jit_interface;
A64::UserConfig conf;
A64JitState current_state{};
A64AddressSpace current_address_space;
A64Core core;
volatile u32 halt_reason = 0;
std::mutex invalidation_mutex;
boost::icl::interval_set<u64> invalid_cache_ranges;
bool invalidate_entire_cache = false;
bool is_executing = false;
};
Jit::Jit(UserConfig conf) {
(void)conf;

View file

@ -0,0 +1,37 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <mcl/stdint.hpp>
#include "dynarmic/frontend/A64/a64_location_descriptor.h"
namespace Dynarmic::Backend::Arm64 {
struct A64JitState {
std::array<u64, 31> reg{};
u64 sp = 0;
u64 pc = 0;
u32 cpsr_nzcv = 0;
u32 upper_location_descriptor;
alignas(16) std::array<u64, 64> vec{}; // Extension registers.
u32 exclusive_state = 0;
u32 fpsr = 0;
u32 fpcr = 0;
IR::LocationDescriptor GetLocationDescriptor() const {
return IR::LocationDescriptor{pc};
}
};
} // namespace Dynarmic::Backend::Arm64