backend/rv64: Add a dummy code generation

This commit is contained in:
Yang Liu 2023-12-31 22:14:52 +08:00 committed by Merry
parent 4324b262aa
commit d743fe8a2a
8 changed files with 166 additions and 14 deletions

View file

@ -410,6 +410,9 @@ if ("riscv" IN_LIST ARCHITECTURE)
backend/riscv64/a32_interface.cpp
backend/riscv64/a32_jitstate.cpp
backend/riscv64/a32_jitstate.h
backend/riscv64/code_block.h
backend/riscv64/emit_riscv64.cpp
backend/riscv64/emit_riscv64.h
)
endif()

View file

@ -5,6 +5,7 @@
#include "dynarmic/backend/riscv64/a32_address_space.h"
#include "dynarmic/backend/riscv64/emit_riscv64.h"
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
#include "dynarmic/frontend/A32/translate/a32_translate.h"
#include "dynarmic/ir/opt/passes.h"
@ -12,7 +13,9 @@
namespace Dynarmic::Backend::RV64 {
A32AddressSpace::A32AddressSpace(const A32::UserConfig& conf)
: conf(conf) {
: conf(conf)
, cb(conf.code_cache_size)
, as(cb.ptr(), conf.code_cache_size) {
EmitPrelude();
}
@ -47,17 +50,81 @@ void* A32AddressSpace::GetOrEmit(IR::LocationDescriptor descriptor) {
}
IR::Block ir_block = GenerateIR(descriptor);
void* block_entry = Emit(std::move(ir_block));
block_entries.insert_or_assign(descriptor.Value(), block_entry);
return block_entry;
const EmittedBlockInfo block_info = Emit(std::move(ir_block));
block_infos.insert_or_assign(descriptor.Value(), block_info);
block_entries.insert_or_assign(descriptor.Value(), block_info.entry_point);
return block_info.entry_point;
}
void A32AddressSpace::ClearCache() {
block_entries.clear();
block_infos.clear();
as.RewindBuffer(reinterpret_cast<char*>(prelude_info.end_of_prelude) - reinterpret_cast<char*>(as.GetBufferPointer(0)));
}
void A32AddressSpace::EmitPrelude() {
ASSERT_FALSE("Unimplemented");
using namespace biscuit;
prelude_info.run_code = reinterpret_cast<PreludeInfo::RunCodeFuncType>(as.GetCursorPointer());
// TODO: Minimize this.
as.ADDI(sp, sp, -64 * 8);
for (std::uint32_t i = 1; i < 32; i += 1) {
if (GPR{i} == sp || GPR{i} == tp)
continue;
as.SD(GPR{i}, i * 8, sp);
}
for (std::uint32_t i = 0; i < 32; i += 1) {
as.FSD(FPR{i}, 32 + i * 8, sp);
}
void* A32AddressSpace::Emit(IR::Block) {
ASSERT_FALSE("Unimplemented");
as.JALR(x0, 0, a0);
prelude_info.return_from_run_code = reinterpret_cast<void*>(as.GetCursorPointer());
for (std::uint32_t i = 1; i < 32; i += 1) {
if (GPR{i} == sp || GPR{i} == tp)
continue;
as.LD(GPR{i}, i * 8, sp);
}
for (std::uint32_t i = 0; i < 32; i += 1) {
as.FLD(FPR{i}, 32 + i * 8, sp);
}
as.ADDI(sp, sp, 64 * 8);
as.JALR(ra);
prelude_info.end_of_prelude = reinterpret_cast<u32*>(as.GetCursorPointer());
}
size_t A32AddressSpace::GetRemainingSize() {
return conf.code_cache_size - (reinterpret_cast<uintptr_t>(as.GetCursorPointer()) - reinterpret_cast<uintptr_t>(as.GetBufferPointer(0)));
}
EmittedBlockInfo A32AddressSpace::Emit(IR::Block block) {
if (GetRemainingSize() < 1024 * 1024) {
ClearCache();
}
EmittedBlockInfo block_info = EmitRV64(as, std::move(block));
Link(block_info);
return block_info;
}
void A32AddressSpace::Link(EmittedBlockInfo& block_info) {
using namespace biscuit;
for (auto [ptr_offset, target] : block_info.relocations) {
Assembler a(reinterpret_cast<u8*>(reinterpret_cast<char*>(block_info.entry_point) + ptr_offset), 4);
switch (target) {
case LinkTarget::ReturnFromRunCode: {
std::ptrdiff_t off = reinterpret_cast<char*>(prelude_info.return_from_run_code) - reinterpret_cast<char*>(a.GetCursorPointer());
a.JAL(x0, off);
break;
}
default:
ASSERT_FALSE("Invalid relocation target");
}
}
}
} // namespace Dynarmic::Backend::RV64

View file

@ -5,8 +5,11 @@
#pragma once
#include <biscuit/assembler.hpp>
#include <tsl/robin_map.h>
#include "dynarmic/backend/riscv64/code_block.h"
#include "dynarmic/backend/riscv64/emit_riscv64.h"
#include "dynarmic/interface/A32/config.h"
#include "dynarmic/interface/halt_reason.h"
#include "dynarmic/ir/basic_block.h"
@ -26,22 +29,31 @@ public:
void* GetOrEmit(IR::LocationDescriptor descriptor);
void ClearCache();
private:
friend class A32Core;
void EmitPrelude();
void* Emit(IR::Block ir_block);
size_t GetRemainingSize();
EmittedBlockInfo Emit(IR::Block ir_block);
void Link(EmittedBlockInfo& block);
const A32::UserConfig conf;
CodeBlock cb;
biscuit::Assembler as;
tsl::robin_map<u64, void*> block_entries;
tsl::robin_map<u64, EmittedBlockInfo> block_infos;
struct PreludeInfo {
u32* end_of_prelude;
using RunCodeFuncType = HaltReason (*)(void* entry_point, A32JitState* context, volatile u32* halt_reason);
RunCodeFuncType run_code;
void* return_from_run_code;
} prelude_info;
};

View file

@ -122,7 +122,7 @@ struct Jit::Impl final {
private:
void RequestCacheInvalidation() {
ASSERT_FALSE("Unimplemented");
// ASSERT_FALSE("Unimplemented");
invalidate_entire_cache = false;
invalid_cache_ranges.clear();

View file

@ -8,7 +8,6 @@
#include <cstdint>
#include <new>
#include <biscuit/assembler.hpp>
#include <sys/mman.h>
namespace Dynarmic::Backend::RV64 {
@ -17,7 +16,7 @@ class CodeBlock {
public:
explicit CodeBlock(std::size_t size)
: memsize(size) {
mem = (std::uint32_t*)mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE, -1, 0);
mem = (std::uint8_t*)mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE, -1, 0);
if (mem == nullptr)
throw std::bad_alloc{};
@ -30,13 +29,12 @@ public:
munmap(mem, memsize);
}
std::uint32_t* ptr() const {
std::uint8_t* ptr() const {
return mem;
}
protected:
std::uint32_t* mem;
std::uint8_t* mem;
std::size_t memsize = 0;
biscuit::Assembler as;
};
} // namespace Dynarmic::Backend::RV64

View file

@ -0,0 +1,35 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2024 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "dynarmic/backend/riscv64/emit_riscv64.h"
#include <biscuit/assembler.hpp>
#include "dynarmic/backend/riscv64/a32_jitstate.h"
#include "dynarmic/ir/basic_block.h"
namespace Dynarmic::Backend::RV64 {
EmittedBlockInfo EmitRV64(biscuit::Assembler& as, [[maybe_unused]] IR::Block block) {
using namespace biscuit;
EmittedBlockInfo ebi;
ebi.entry_point = reinterpret_cast<void*>(as.GetCursorPointer());
as.ADDIW(a0, zero, 8);
as.SW(a0, offsetof(A32JitState, regs) + 0 * sizeof(u32), a1);
as.ADDIW(a0, zero, 2);
as.SW(a0, offsetof(A32JitState, regs) + 1 * sizeof(u32), a1);
as.SW(a0, offsetof(A32JitState, regs) + 15 * sizeof(u32), a1);
ebi.relocations[reinterpret_cast<char*>(as.GetCursorPointer()) - reinterpret_cast<char*>(ebi.entry_point)] = LinkTarget::ReturnFromRunCode;
as.NOP();
ebi.size = reinterpret_cast<size_t>(as.GetCursorPointer()) - reinterpret_cast<size_t>(ebi.entry_point);
return ebi;
}
} // namespace Dynarmic::Backend::RV64

View file

@ -0,0 +1,33 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2024 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mcl/stdint.hpp>
#include <tsl/robin_map.h>
namespace biscuit {
class Assembler;
} // namespace biscuit
namespace Dynarmic::IR {
class Block;
} // namespace Dynarmic::IR
namespace Dynarmic::Backend::RV64 {
enum class LinkTarget {
ReturnFromRunCode,
};
struct EmittedBlockInfo {
void* entry_point;
size_t size;
tsl::robin_map<std::ptrdiff_t, LinkTarget> relocations;
};
EmittedBlockInfo EmitRV64(biscuit::Assembler& as, IR::Block block);
} // namespace Dynarmic::Backend::RV64

View file

@ -60,6 +60,10 @@ if (DYNARMIC_TESTS_USE_UNICORN)
endif()
endif()
if ("riscv" IN_LIST ARCHITECTURE)
target_link_libraries(dynarmic_tests PRIVATE biscuit::biscuit)
endif()
if ("x86_64" IN_LIST ARCHITECTURE)
target_link_libraries(dynarmic_tests PRIVATE xbyak::xbyak)