backend/rv64: Add initial RISC-V framework

RISC-V target is now compilable.
This commit is contained in:
Yang Liu 2023-12-31 16:48:38 +08:00 committed by Merry
parent add5238180
commit a4b9b431b0
12 changed files with 533 additions and 1 deletions

1
.gitignore vendored
View file

@ -9,3 +9,4 @@ src/dynarmic/backend/arm64/mig/
src/dynarmic/backend/x64/mig/ src/dynarmic/backend/x64/mig/
# System files # System files
.DS_Store .DS_Store
.vscode

View file

@ -399,6 +399,24 @@ if ("arm64" IN_LIST ARCHITECTURE)
endif() endif()
endif() endif()
if ("riscv" IN_LIST ARCHITECTURE)
if ("A32" IN_LIST DYNARMIC_FRONTENDS)
target_sources(dynarmic PRIVATE
backend/riscv64/a32_address_space.cpp
backend/riscv64/a32_address_space.h
backend/riscv64/a32_core.h
backend/riscv64/a32_interface.cpp
backend/riscv64/a32_jitstate.cpp
backend/riscv64/a32_jitstate.h
backend/riscv64/dummy_code_block.h
)
endif()
if ("A64" IN_LIST DYNARMIC_FRONTENDS)
message(FATAL_ERROR "TODO: Unimplemented frontend for this host architecture")
endif()
endif()
if (WIN32) if (WIN32)
target_sources(dynarmic PRIVATE backend/exception_handler_windows.cpp) target_sources(dynarmic PRIVATE backend/exception_handler_windows.cpp)
elseif (APPLE) elseif (APPLE)

View file

@ -20,6 +20,10 @@ class BlockOfCode;
namespace oaknut { namespace oaknut {
class CodeBlock; class CodeBlock;
} // namespace oaknut } // namespace oaknut
#elif defined(MCL_ARCHITECTURE_RISCV)
namespace Dynarmic::Backend::RV64 {
class DummyCodeBlock;
} // namespace Dynarmic::Backend::RV64
#else #else
# error "Invalid architecture" # error "Invalid architecture"
#endif #endif
@ -35,6 +39,9 @@ struct FakeCall {
struct FakeCall { struct FakeCall {
u64 call_pc; u64 call_pc;
}; };
#elif defined(MCL_ARCHITECTURE_RISCV)
struct FakeCall {
};
#else #else
# error "Invalid architecture" # error "Invalid architecture"
#endif #endif
@ -48,6 +55,8 @@ public:
void Register(X64::BlockOfCode& code); void Register(X64::BlockOfCode& code);
#elif defined(MCL_ARCHITECTURE_ARM64) #elif defined(MCL_ARCHITECTURE_ARM64)
void Register(oaknut::CodeBlock& mem, std::size_t mem_size); void Register(oaknut::CodeBlock& mem, std::size_t mem_size);
#elif defined(MCL_ARCHITECTURE_RISCV)
void Register(RV64::DummyCodeBlock& mem, std::size_t mem_size);
#else #else
# error "Invalid architecture" # error "Invalid architecture"
#endif #endif

View file

@ -21,6 +21,10 @@ void ExceptionHandler::Register(X64::BlockOfCode&) {
void ExceptionHandler::Register(oaknut::CodeBlock&, std::size_t) { void ExceptionHandler::Register(oaknut::CodeBlock&, std::size_t) {
// Do nothing // Do nothing
} }
#elif defined(MCL_ARCHITECTURE_RISCV)
void ExceptionHandler::Register(RV64::DummyCodeBlock&, std::size_t) {
// Do nothing
}
#else #else
# error "Invalid architecture" # error "Invalid architecture"
#endif #endif

View file

@ -32,6 +32,8 @@
# include <oaknut/code_block.hpp> # include <oaknut/code_block.hpp>
# include "dynarmic/backend/arm64/abi.h" # include "dynarmic/backend/arm64/abi.h"
#elif defined(MCL_ARCHITECTURE_RISCV)
# include "dynarmic/backend/riscv64/dummy_code_block.h"
#else #else
# error "Invalid architecture" # error "Invalid architecture"
#endif #endif
@ -141,10 +143,12 @@ void SigHandler::RemoveCodeBlock(u64 host_pc) {
void SigHandler::SigAction(int sig, siginfo_t* info, void* raw_context) { void SigHandler::SigAction(int sig, siginfo_t* info, void* raw_context) {
ASSERT(sig == SIGSEGV || sig == SIGBUS); ASSERT(sig == SIGSEGV || sig == SIGBUS);
#ifndef MCL_ARCHITECTURE_RISCV
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(raw_context); ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(raw_context);
# ifndef __OpenBSD__ # ifndef __OpenBSD__
auto& mctx = ucontext->uc_mcontext; auto& mctx = ucontext->uc_mcontext;
# endif # endif
#endif
#if defined(MCL_ARCHITECTURE_X86_64) #if defined(MCL_ARCHITECTURE_X86_64)
@ -243,6 +247,10 @@ void SigHandler::SigAction(int sig, siginfo_t* info, void* raw_context) {
fmt::print(stderr, "Unhandled {} at pc {:#018x}\n", sig == SIGSEGV ? "SIGSEGV" : "SIGBUS", CTX_PC); fmt::print(stderr, "Unhandled {} at pc {:#018x}\n", sig == SIGSEGV ? "SIGSEGV" : "SIGBUS", CTX_PC);
#elif defined(MCL_ARCHITECTURE_RISCV)
ASSERT_FALSE("Unimplemented");
#else #else
# error "Invalid architecture" # error "Invalid architecture"
@ -304,6 +312,12 @@ void ExceptionHandler::Register(oaknut::CodeBlock& mem, std::size_t size) {
const u64 code_end = code_begin + size; const u64 code_end = code_begin + size;
impl = std::make_unique<Impl>(code_begin, code_end); impl = std::make_unique<Impl>(code_begin, code_end);
} }
#elif defined(MCL_ARCHITECTURE_RISCV)
void ExceptionHandler::Register(RV64::DummyCodeBlock& mem, std::size_t size) {
const u64 code_begin = mcl::bit_cast<u64>(mem.ptr());
const u64 code_end = code_begin + size;
impl = std::make_unique<Impl>(code_begin, code_end);
}
#else #else
# error "Invalid architecture" # error "Invalid architecture"
#endif #endif

View file

@ -0,0 +1,63 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2024 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "dynarmic/backend/riscv64/a32_address_space.h"
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
#include "dynarmic/frontend/A32/translate/a32_translate.h"
#include "dynarmic/ir/opt/passes.h"
namespace Dynarmic::Backend::RV64 {
A32AddressSpace::A32AddressSpace(const A32::UserConfig& conf)
: conf(conf) {
EmitPrelude();
}
IR::Block A32AddressSpace::GenerateIR(IR::LocationDescriptor descriptor) const {
IR::Block ir_block = A32::Translate(A32::LocationDescriptor{descriptor}, conf.callbacks, {conf.arch_version, conf.define_unpredictable_behaviour, conf.hook_hint_instructions});
Optimization::PolyfillPass(ir_block, {});
if (conf.HasOptimization(OptimizationFlag::GetSetElimination)) {
Optimization::A32GetSetElimination(ir_block, {.convert_nzc_to_nz = true});
Optimization::DeadCodeElimination(ir_block);
}
if (conf.HasOptimization(OptimizationFlag::ConstProp)) {
Optimization::A32ConstantMemoryReads(ir_block, conf.callbacks);
Optimization::ConstantPropagation(ir_block);
Optimization::DeadCodeElimination(ir_block);
}
Optimization::VerificationPass(ir_block);
return ir_block;
}
void* A32AddressSpace::Get(IR::LocationDescriptor descriptor) {
if (const auto iter = block_entries.find(descriptor.Value()); iter != block_entries.end()) {
return iter->second;
}
return nullptr;
}
void* A32AddressSpace::GetOrEmit(IR::LocationDescriptor descriptor) {
if (void* block_entry = Get(descriptor)) {
return block_entry;
}
IR::Block ir_block = GenerateIR(descriptor);
void* block_entry = Emit(std::move(ir_block));
block_entries.insert_or_assign(descriptor.Value(), block_entry);
return block_entry;
}
void A32AddressSpace::EmitPrelude() {
ASSERT_FALSE("Unimplemented");
}
void* A32AddressSpace::Emit(IR::Block) {
ASSERT_FALSE("Unimplemented");
}
} // namespace Dynarmic::Backend::RV64

View file

@ -0,0 +1,48 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2024 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <tsl/robin_map.h>
#include "dynarmic/interface/A32/config.h"
#include "dynarmic/interface/halt_reason.h"
#include "dynarmic/ir/basic_block.h"
#include "dynarmic/ir/location_descriptor.h"
namespace Dynarmic::Backend::RV64 {
struct A32JitState;
class A32AddressSpace final {
public:
explicit A32AddressSpace(const A32::UserConfig& conf);
IR::Block GenerateIR(IR::LocationDescriptor) const;
void* Get(IR::LocationDescriptor descriptor);
void* GetOrEmit(IR::LocationDescriptor descriptor);
private:
friend class A32Core;
void EmitPrelude();
void* Emit(IR::Block ir_block);
const A32::UserConfig conf;
tsl::robin_map<u64, void*> block_entries;
struct PreludeInfo {
u32* end_of_prelude;
using RunCodeFuncType = HaltReason (*)(void* entry_point, A32JitState* context, volatile u32* halt_reason);
RunCodeFuncType run_code;
} prelude_info;
};
} // namespace Dynarmic::Backend::RV64

View file

@ -0,0 +1,24 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2024 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include "dynarmic/backend/riscv64/a32_address_space.h"
#include "dynarmic/backend/riscv64/a32_jitstate.h"
namespace Dynarmic::Backend::RV64 {
class A32Core final {
public:
explicit A32Core(const A32::UserConfig&) {}
HaltReason Run(A32AddressSpace& process, A32JitState& thread_ctx, volatile u32* halt_reason) {
const auto location_descriptor = thread_ctx.GetLocationDescriptor();
const auto entry_point = process.GetOrEmit(location_descriptor);
return process.prelude_info.run_code(entry_point, &thread_ctx, halt_reason);
}
};
} // namespace Dynarmic::Backend::RV64

View file

@ -0,0 +1,217 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2024 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <memory>
#include <mutex>
#include <boost/icl/interval_set.hpp>
#include <mcl/assert.hpp>
#include <mcl/scope_exit.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/backend/riscv64/a32_address_space.h"
#include "dynarmic/backend/riscv64/a32_core.h"
#include "dynarmic/backend/riscv64/a32_jitstate.h"
#include "dynarmic/common/atomic.h"
#include "dynarmic/interface/A32/a32.h"
namespace Dynarmic::A32 {
using namespace Backend::RV64;
struct Jit::Impl final {
Impl(Jit* jit_interface, A32::UserConfig conf)
: jit_interface(jit_interface)
, conf(conf)
, current_address_space(conf)
, core(conf) {}
HaltReason Run() {
ASSERT(!jit_interface->is_executing);
jit_interface->is_executing = true;
SCOPE_EXIT {
jit_interface->is_executing = false;
};
HaltReason hr = core.Run(current_address_space, current_state, &halt_reason);
RequestCacheInvalidation();
return hr;
}
HaltReason Step() {
ASSERT(!jit_interface->is_executing);
jit_interface->is_executing = true;
SCOPE_EXIT {
jit_interface->is_executing = false;
};
ASSERT_FALSE("Unimplemented");
RequestCacheInvalidation();
return HaltReason{};
}
void ClearCache() {
std::unique_lock lock{invalidation_mutex};
invalidate_entire_cache = true;
HaltExecution(HaltReason::CacheInvalidation);
}
void InvalidateCacheRange(std::uint32_t start_address, std::size_t length) {
std::unique_lock lock{invalidation_mutex};
invalid_cache_ranges.add(boost::icl::discrete_interval<u32>::closed(start_address, static_cast<u32>(start_address + length - 1)));
HaltExecution(HaltReason::CacheInvalidation);
}
void Reset() {
current_state = {};
}
void HaltExecution(HaltReason hr) {
Atomic::Or(&halt_reason, ~static_cast<u32>(hr));
}
void ClearHalt(HaltReason hr) {
Atomic::And(&halt_reason, ~static_cast<u32>(hr));
}
std::array<std::uint32_t, 16>& Regs() {
return current_state.regs;
}
const std::array<std::uint32_t, 16>& Regs() const {
return current_state.regs;
}
std::array<std::uint32_t, 64>& ExtRegs() {
return current_state.ext_regs;
}
const std::array<std::uint32_t, 64>& ExtRegs() const {
return current_state.ext_regs;
}
std::uint32_t Cpsr() const {
return current_state.Cpsr();
}
void SetCpsr(std::uint32_t value) {
current_state.SetCpsr(value);
}
std::uint32_t Fpscr() const {
return current_state.Fpscr();
}
void SetFpscr(std::uint32_t value) {
current_state.SetFpscr(value);
}
void ClearExclusiveState() {
current_state.exclusive_state = false;
}
void DumpDisassembly() const {
ASSERT_FALSE("Unimplemented");
}
private:
void RequestCacheInvalidation() {
ASSERT_FALSE("Unimplemented");
invalidate_entire_cache = false;
invalid_cache_ranges.clear();
}
Jit* jit_interface;
A32::UserConfig conf;
A32JitState current_state{};
A32AddressSpace current_address_space;
A32Core core;
volatile u32 halt_reason = 0;
std::mutex invalidation_mutex;
boost::icl::interval_set<u32> invalid_cache_ranges;
bool invalidate_entire_cache = false;
};
Jit::Jit(UserConfig conf)
: impl(std::make_unique<Impl>(this, conf)) {}
Jit::~Jit() = default;
HaltReason Jit::Run() {
return impl->Run();
}
HaltReason Jit::Step() {
return impl->Step();
}
void Jit::ClearCache() {
impl->ClearCache();
}
void Jit::InvalidateCacheRange(std::uint32_t start_address, std::size_t length) {
impl->InvalidateCacheRange(start_address, length);
}
void Jit::Reset() {
impl->Reset();
}
void Jit::HaltExecution(HaltReason hr) {
impl->HaltExecution(hr);
}
void Jit::ClearHalt(HaltReason hr) {
impl->ClearHalt(hr);
}
std::array<std::uint32_t, 16>& Jit::Regs() {
return impl->Regs();
}
const std::array<std::uint32_t, 16>& Jit::Regs() const {
return impl->Regs();
}
std::array<std::uint32_t, 64>& Jit::ExtRegs() {
return impl->ExtRegs();
}
const std::array<std::uint32_t, 64>& Jit::ExtRegs() const {
return impl->ExtRegs();
}
std::uint32_t Jit::Cpsr() const {
return impl->Cpsr();
}
void Jit::SetCpsr(std::uint32_t value) {
impl->SetCpsr(value);
}
std::uint32_t Jit::Fpscr() const {
return impl->Fpscr();
}
void Jit::SetFpscr(std::uint32_t value) {
impl->SetFpscr(value);
}
void Jit::ClearExclusiveState() {
impl->ClearExclusiveState();
}
void Jit::DumpDisassembly() const {
impl->DumpDisassembly();
}
} // namespace Dynarmic::A32

View file

@ -0,0 +1,73 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2024 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "dynarmic/backend/riscv64/a32_jitstate.h"
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
namespace Dynarmic::Backend::RV64 {
u32 A32JitState::Cpsr() const {
u32 cpsr = 0;
// NZCV flags
cpsr |= cpsr_nzcv;
// Q flag
cpsr |= cpsr_q;
// GE flags
cpsr |= mcl::bit::get_bit<31>(cpsr_ge) ? 1 << 19 : 0;
cpsr |= mcl::bit::get_bit<23>(cpsr_ge) ? 1 << 18 : 0;
cpsr |= mcl::bit::get_bit<15>(cpsr_ge) ? 1 << 17 : 0;
cpsr |= mcl::bit::get_bit<7>(cpsr_ge) ? 1 << 16 : 0;
// E flag, T flag
cpsr |= mcl::bit::get_bit<1>(upper_location_descriptor) ? 1 << 9 : 0;
cpsr |= mcl::bit::get_bit<0>(upper_location_descriptor) ? 1 << 5 : 0;
// IT state
cpsr |= static_cast<u32>(upper_location_descriptor & 0b11111100'00000000);
cpsr |= static_cast<u32>(upper_location_descriptor & 0b00000011'00000000) << 17;
// Other flags
cpsr |= cpsr_jaifm;
return cpsr;
}
void A32JitState::SetCpsr(u32 cpsr) {
// NZCV flags
cpsr_nzcv = cpsr & 0xF0000000;
// Q flag
cpsr_q = cpsr & (1 << 27);
// GE flags
cpsr_ge = 0;
cpsr_ge |= mcl::bit::get_bit<19>(cpsr) ? 0xFF000000 : 0;
cpsr_ge |= mcl::bit::get_bit<18>(cpsr) ? 0x00FF0000 : 0;
cpsr_ge |= mcl::bit::get_bit<17>(cpsr) ? 0x0000FF00 : 0;
cpsr_ge |= mcl::bit::get_bit<16>(cpsr) ? 0x000000FF : 0;
upper_location_descriptor &= 0xFFFF0000;
// E flag, T flag
upper_location_descriptor |= mcl::bit::get_bit<9>(cpsr) ? 2 : 0;
upper_location_descriptor |= mcl::bit::get_bit<5>(cpsr) ? 1 : 0;
// IT state
upper_location_descriptor |= (cpsr >> 0) & 0b11111100'00000000;
upper_location_descriptor |= (cpsr >> 17) & 0b00000011'00000000;
// Other flags
cpsr_jaifm = cpsr & 0x010001DF;
}
constexpr u32 FPCR_MASK = A32::LocationDescriptor::FPSCR_MODE_MASK;
constexpr u32 FPSR_MASK = 0xF800009F;
u32 A32JitState::Fpscr() const {
return (upper_location_descriptor & 0xffff0000) | fpsr;
}
void A32JitState::SetFpscr(u32 fpscr) {
fpsr = fpscr & FPSR_MASK;
upper_location_descriptor = (upper_location_descriptor & 0x0000ffff) | (fpscr & FPCR_MASK);
}
} // namespace Dynarmic::Backend::RV64

View file

@ -0,0 +1,45 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2024 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <mcl/stdint.hpp>
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
#include "dynarmic/ir/location_descriptor.h"
namespace Dynarmic::Backend::RV64 {
struct A32JitState {
u32 cpsr_nzcv = 0;
u32 cpsr_q = 0;
u32 cpsr_jaifm = 0;
u32 cpsr_ge = 0;
u32 fpsr = 0;
u32 fpsr_nzcv = 0;
std::array<u32, 16> regs{};
u32 upper_location_descriptor;
alignas(16) std::array<u32, 64> ext_regs{};
u32 exclusive_state = 0;
u32 Cpsr() const;
void SetCpsr(u32 cpsr);
u32 Fpscr() const;
void SetFpscr(u32 fpscr);
IR::LocationDescriptor GetLocationDescriptor() const {
return IR::LocationDescriptor{regs[15] | (static_cast<u64>(upper_location_descriptor) << 32)};
}
};
} // namespace Dynarmic::Backend::RV64

View file

@ -0,0 +1,16 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2024 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace Dynarmic::Backend::RV64 {
class DummyCodeBlock {
public:
DummyCodeBlock() {}
void* ptr() { return nullptr; }
};
} // namespace Dynarmic::Backend::RV64