Add optimization flags to disable specific optimizations
This commit is contained in:
parent
3eed024caf
commit
4ba1f8b9e7
13 changed files with 160 additions and 96 deletions
|
@ -10,6 +10,8 @@
|
|||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
#include <dynarmic/optimization_flags.h>
|
||||
|
||||
namespace Dynarmic {
|
||||
class ExclusiveMonitor;
|
||||
} // namespace Dynarmic
|
||||
|
@ -99,13 +101,17 @@ struct UserConfig {
|
|||
size_t processor_id = 0;
|
||||
ExclusiveMonitor* global_monitor = nullptr;
|
||||
|
||||
/// When set to false, this disables all optimizations than can't otherwise be disabled
|
||||
/// by setting other configuration options. This includes:
|
||||
/// This selects other optimizations than can't otherwise be disabled by setting other
|
||||
/// configuration options. This includes:
|
||||
/// - IR optimizations
|
||||
/// - Block linking optimizations
|
||||
/// - RSB optimizations
|
||||
/// This is intended to be used for debugging.
|
||||
bool enable_optimizations = true;
|
||||
OptimizationFlag optimizations = all_optimizations;
|
||||
|
||||
bool HasOptimization(OptimizationFlag f) const {
|
||||
return (f & optimizations) != no_optimizations;
|
||||
}
|
||||
|
||||
// Page Table
|
||||
// The page table is used for faster memory access. If an entry in the table is nullptr,
|
||||
|
@ -156,9 +162,6 @@ struct UserConfig {
|
|||
/// to avoid writting certain unnecessary code only needed for cycle timers.
|
||||
bool wall_clock_cntpct = false;
|
||||
|
||||
/// This enables the fast dispatcher.
|
||||
bool enable_fast_dispatch = true;
|
||||
|
||||
/// This option relates to the CPSR.E flag. Enabling this option disables modification
|
||||
/// of CPSR.E by the emulated program, forcing it to 0.
|
||||
/// NOTE: Calling Jit::SetCpsr with CPSR.E=1 while this option is enabled may result
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
#include <dynarmic/optimization_flags.h>
|
||||
|
||||
namespace Dynarmic {
|
||||
class ExclusiveMonitor;
|
||||
} // namespace Dynarmic
|
||||
|
@ -124,13 +126,17 @@ struct UserConfig {
|
|||
size_t processor_id = 0;
|
||||
ExclusiveMonitor* global_monitor = nullptr;
|
||||
|
||||
/// When set to false, this disables all optimizations than can't otherwise be disabled
|
||||
/// by setting other configuration options. This includes:
|
||||
/// This selects other optimizations than can't otherwise be disabled by setting other
|
||||
/// configuration options. This includes:
|
||||
/// - IR optimizations
|
||||
/// - Block linking optimizations
|
||||
/// - RSB optimizations
|
||||
/// This is intended to be used for debugging.
|
||||
bool enable_optimizations = true;
|
||||
OptimizationFlag optimizations = all_optimizations;
|
||||
|
||||
bool HasOptimization(OptimizationFlag f) const {
|
||||
return (f & optimizations) != no_optimizations;
|
||||
}
|
||||
|
||||
/// When set to true, UserCallbacks::DataCacheOperationRaised will be called when any
|
||||
/// data cache instruction is executed. Notably DC ZVA will not implicitly do anything.
|
||||
|
@ -206,9 +212,6 @@ struct UserConfig {
|
|||
/// to avoid writting certain unnecessary code only needed for cycle timers.
|
||||
bool wall_clock_cntpct = false;
|
||||
|
||||
/// This enables the fast dispatcher.
|
||||
bool enable_fast_dispatch = true;
|
||||
|
||||
// Determines whether AddTicks and GetTicksRemaining are called.
|
||||
// If false, execution will continue until soon after Jit::HaltExecution is called.
|
||||
// bool enable_ticks = true; // TODO
|
||||
|
|
48
include/dynarmic/optimization_flags.h
Normal file
48
include/dynarmic/optimization_flags.h
Normal file
|
@ -0,0 +1,48 @@
|
|||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2020 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace Dynarmic {
|
||||
|
||||
enum class OptimizationFlag : std::uint32_t {
|
||||
BlockLinking = 0x01,
|
||||
ReturnStackBuffer = 0x02,
|
||||
FastDispatch = 0x04,
|
||||
GetSetElimination = 0x08,
|
||||
ConstProp = 0x10,
|
||||
MiscIROpt = 0x20,
|
||||
};
|
||||
|
||||
constexpr OptimizationFlag no_optimizations = static_cast<OptimizationFlag>(0);
|
||||
constexpr OptimizationFlag all_optimizations = static_cast<OptimizationFlag>(~std::uint32_t(0));
|
||||
|
||||
constexpr OptimizationFlag operator~(OptimizationFlag f) {
|
||||
return static_cast<OptimizationFlag>(~static_cast<std::uint32_t>(f));
|
||||
}
|
||||
|
||||
constexpr OptimizationFlag operator|(OptimizationFlag f1, OptimizationFlag f2) {
|
||||
return static_cast<OptimizationFlag>(static_cast<std::uint32_t>(f1) | static_cast<std::uint32_t>(f2));
|
||||
}
|
||||
|
||||
constexpr OptimizationFlag operator&(OptimizationFlag f1, OptimizationFlag f2) {
|
||||
return static_cast<OptimizationFlag>(static_cast<std::uint32_t>(f1) & static_cast<std::uint32_t>(f2));
|
||||
}
|
||||
|
||||
constexpr OptimizationFlag operator|=(OptimizationFlag& result, OptimizationFlag f) {
|
||||
return result = (result | f);
|
||||
}
|
||||
|
||||
constexpr OptimizationFlag operator&=(OptimizationFlag& result, OptimizationFlag f) {
|
||||
return result = (result & f);
|
||||
}
|
||||
|
||||
constexpr bool operator!(OptimizationFlag f) {
|
||||
return f == no_optimizations;
|
||||
}
|
||||
|
||||
} // namespace Dynarmic
|
|
@ -7,6 +7,7 @@ add_library(dynarmic
|
|||
../include/dynarmic/A64/a64.h
|
||||
../include/dynarmic/A64/config.h
|
||||
../include/dynarmic/exclusive_monitor.h
|
||||
../include/dynarmic/optimization_flags.h
|
||||
common/assert.cpp
|
||||
common/assert.h
|
||||
common/bit_util.h
|
||||
|
|
|
@ -185,7 +185,7 @@ void A32EmitX64::EmitCondPrelude(const A32EmitContext& ctx) {
|
|||
}
|
||||
|
||||
void A32EmitX64::ClearFastDispatchTable() {
|
||||
if (conf.enable_fast_dispatch) {
|
||||
if (conf.HasOptimization(OptimizationFlag::FastDispatch)) {
|
||||
fast_dispatch_table.fill({});
|
||||
}
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ void A32EmitX64::GenTerminalHandlers() {
|
|||
code.and_(eax, u32(A32JitState::RSBPtrMask));
|
||||
code.mov(dword[r15 + offsetof(A32JitState, rsb_ptr)], eax);
|
||||
code.cmp(rbx, qword[r15 + offsetof(A32JitState, rsb_location_descriptors) + rax * sizeof(u64)]);
|
||||
if (conf.enable_fast_dispatch) {
|
||||
if (conf.HasOptimization(OptimizationFlag::FastDispatch)) {
|
||||
code.jne(rsb_cache_miss);
|
||||
} else {
|
||||
code.jne(code.GetReturnFromRunCodeAddress());
|
||||
|
@ -281,7 +281,7 @@ void A32EmitX64::GenTerminalHandlers() {
|
|||
code.jmp(rax);
|
||||
PerfMapRegister(terminal_handler_pop_rsb_hint, code.getCurr(), "a32_terminal_handler_pop_rsb_hint");
|
||||
|
||||
if (conf.enable_fast_dispatch) {
|
||||
if (conf.HasOptimization(OptimizationFlag::FastDispatch)) {
|
||||
code.align();
|
||||
terminal_handler_fast_dispatch_hint = code.getCurr<const void*>();
|
||||
calculate_location_descriptor();
|
||||
|
@ -1509,7 +1509,7 @@ void A32EmitX64::EmitSetUpperLocationDescriptor(IR::LocationDescriptor new_locat
|
|||
void A32EmitX64::EmitTerminalImpl(IR::Term::LinkBlock terminal, IR::LocationDescriptor initial_location, bool is_single_step) {
|
||||
EmitSetUpperLocationDescriptor(terminal.next, initial_location);
|
||||
|
||||
if (!conf.enable_optimizations || is_single_step) {
|
||||
if (!conf.HasOptimization(OptimizationFlag::BlockLinking) || is_single_step) {
|
||||
code.mov(MJitStateReg(A32::Reg::PC), A32::LocationDescriptor{terminal.next}.PC());
|
||||
code.ReturnFromRunCode();
|
||||
return;
|
||||
|
@ -1538,7 +1538,7 @@ void A32EmitX64::EmitTerminalImpl(IR::Term::LinkBlock terminal, IR::LocationDesc
|
|||
void A32EmitX64::EmitTerminalImpl(IR::Term::LinkBlockFast terminal, IR::LocationDescriptor initial_location, bool is_single_step) {
|
||||
EmitSetUpperLocationDescriptor(terminal.next, initial_location);
|
||||
|
||||
if (!conf.enable_optimizations || is_single_step) {
|
||||
if (!conf.HasOptimization(OptimizationFlag::BlockLinking) || is_single_step) {
|
||||
code.mov(MJitStateReg(A32::Reg::PC), A32::LocationDescriptor{terminal.next}.PC());
|
||||
code.ReturnFromRunCode();
|
||||
return;
|
||||
|
@ -1553,7 +1553,7 @@ void A32EmitX64::EmitTerminalImpl(IR::Term::LinkBlockFast terminal, IR::Location
|
|||
}
|
||||
|
||||
void A32EmitX64::EmitTerminalImpl(IR::Term::PopRSBHint, IR::LocationDescriptor, bool is_single_step) {
|
||||
if (!conf.enable_optimizations || is_single_step) {
|
||||
if (!conf.HasOptimization(OptimizationFlag::ReturnStackBuffer) || is_single_step) {
|
||||
code.ReturnFromRunCode();
|
||||
return;
|
||||
}
|
||||
|
@ -1562,11 +1562,12 @@ void A32EmitX64::EmitTerminalImpl(IR::Term::PopRSBHint, IR::LocationDescriptor,
|
|||
}
|
||||
|
||||
void A32EmitX64::EmitTerminalImpl(IR::Term::FastDispatchHint, IR::LocationDescriptor, bool is_single_step) {
|
||||
if (conf.enable_fast_dispatch && !is_single_step) {
|
||||
code.jmp(terminal_handler_fast_dispatch_hint);
|
||||
} else {
|
||||
if (!conf.HasOptimization(OptimizationFlag::FastDispatch) || is_single_step) {
|
||||
code.ReturnFromRunCode();
|
||||
return;
|
||||
}
|
||||
|
||||
code.jmp(terminal_handler_fast_dispatch_hint);
|
||||
}
|
||||
|
||||
void A32EmitX64::EmitTerminalImpl(IR::Term::If terminal, IR::LocationDescriptor initial_location, bool is_single_step) {
|
||||
|
@ -1624,7 +1625,7 @@ void A32EmitX64::EmitPatchMovRcx(CodePtr target_code_ptr) {
|
|||
|
||||
void A32EmitX64::Unpatch(const IR::LocationDescriptor& location) {
|
||||
EmitX64::Unpatch(location);
|
||||
if (conf.enable_fast_dispatch) {
|
||||
if (conf.HasOptimization(OptimizationFlag::FastDispatch)) {
|
||||
(*fast_dispatch_table_lookup)(location.Value()) = {};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -176,9 +176,11 @@ private:
|
|||
}
|
||||
|
||||
IR::Block ir_block = A32::Translate(A32::LocationDescriptor{descriptor}, [this](u32 vaddr) { return conf.callbacks->MemoryReadCode(vaddr); }, {conf.define_unpredictable_behaviour, conf.hook_hint_instructions});
|
||||
if (conf.enable_optimizations) {
|
||||
if (conf.HasOptimization(OptimizationFlag::GetSetElimination)) {
|
||||
Optimization::A32GetSetElimination(ir_block);
|
||||
Optimization::DeadCodeElimination(ir_block);
|
||||
}
|
||||
if (conf.HasOptimization(OptimizationFlag::ConstProp)) {
|
||||
Optimization::A32ConstantMemoryReads(ir_block, conf.callbacks);
|
||||
Optimization::ConstantPropagation(ir_block);
|
||||
Optimization::DeadCodeElimination(ir_block);
|
||||
|
|
|
@ -140,7 +140,7 @@ void A64EmitX64::InvalidateCacheRanges(const boost::icl::interval_set<u64>& rang
|
|||
}
|
||||
|
||||
void A64EmitX64::ClearFastDispatchTable() {
|
||||
if (conf.enable_fast_dispatch) {
|
||||
if (conf.HasOptimization(OptimizationFlag::FastDispatch)) {
|
||||
fast_dispatch_table.fill({});
|
||||
}
|
||||
}
|
||||
|
@ -319,7 +319,7 @@ void A64EmitX64::GenTerminalHandlers() {
|
|||
code.and_(eax, u32(A64JitState::RSBPtrMask));
|
||||
code.mov(dword[r15 + offsetof(A64JitState, rsb_ptr)], eax);
|
||||
code.cmp(rbx, qword[r15 + offsetof(A64JitState, rsb_location_descriptors) + rax * sizeof(u64)]);
|
||||
if (conf.enable_fast_dispatch) {
|
||||
if (conf.HasOptimization(OptimizationFlag::FastDispatch)) {
|
||||
code.jne(rsb_cache_miss);
|
||||
} else {
|
||||
code.jne(code.GetReturnFromRunCodeAddress());
|
||||
|
@ -328,7 +328,7 @@ void A64EmitX64::GenTerminalHandlers() {
|
|||
code.jmp(rax);
|
||||
PerfMapRegister(terminal_handler_pop_rsb_hint, code.getCurr(), "a64_terminal_handler_pop_rsb_hint");
|
||||
|
||||
if (conf.enable_fast_dispatch) {
|
||||
if (conf.HasOptimization(OptimizationFlag::FastDispatch)) {
|
||||
code.align();
|
||||
terminal_handler_fast_dispatch_hint = code.getCurr<const void*>();
|
||||
calculate_location_descriptor();
|
||||
|
@ -362,7 +362,7 @@ void A64EmitX64::GenTerminalHandlers() {
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitPushRSB(EmitContext& ctx, IR::Inst* inst) {
|
||||
if (!conf.enable_optimizations) {
|
||||
if (!conf.HasOptimization(OptimizationFlag::ReturnStackBuffer)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1212,7 +1212,7 @@ void A64EmitX64::EmitTerminalImpl(IR::Term::ReturnToDispatch, IR::LocationDescri
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitTerminalImpl(IR::Term::LinkBlock terminal, IR::LocationDescriptor, bool is_single_step) {
|
||||
if (!conf.enable_optimizations || is_single_step) {
|
||||
if (!conf.HasOptimization(OptimizationFlag::BlockLinking) || is_single_step) {
|
||||
code.mov(rax, A64::LocationDescriptor{terminal.next}.PC());
|
||||
code.mov(qword[r15 + offsetof(A64JitState, pc)], rax);
|
||||
code.ReturnFromRunCode();
|
||||
|
@ -1233,7 +1233,7 @@ void A64EmitX64::EmitTerminalImpl(IR::Term::LinkBlock terminal, IR::LocationDesc
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitTerminalImpl(IR::Term::LinkBlockFast terminal, IR::LocationDescriptor, bool is_single_step) {
|
||||
if (!conf.enable_optimizations || is_single_step) {
|
||||
if (!conf.HasOptimization(OptimizationFlag::BlockLinking) || is_single_step) {
|
||||
code.mov(rax, A64::LocationDescriptor{terminal.next}.PC());
|
||||
code.mov(qword[r15 + offsetof(A64JitState, pc)], rax);
|
||||
code.ReturnFromRunCode();
|
||||
|
@ -1249,7 +1249,7 @@ void A64EmitX64::EmitTerminalImpl(IR::Term::LinkBlockFast terminal, IR::Location
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitTerminalImpl(IR::Term::PopRSBHint, IR::LocationDescriptor, bool is_single_step) {
|
||||
if (!conf.enable_optimizations || is_single_step) {
|
||||
if (!conf.HasOptimization(OptimizationFlag::ReturnStackBuffer) || is_single_step) {
|
||||
code.ReturnFromRunCode();
|
||||
return;
|
||||
}
|
||||
|
@ -1258,11 +1258,12 @@ void A64EmitX64::EmitTerminalImpl(IR::Term::PopRSBHint, IR::LocationDescriptor,
|
|||
}
|
||||
|
||||
void A64EmitX64::EmitTerminalImpl(IR::Term::FastDispatchHint, IR::LocationDescriptor, bool is_single_step) {
|
||||
if (conf.enable_fast_dispatch && !is_single_step) {
|
||||
code.jmp(terminal_handler_fast_dispatch_hint);
|
||||
} else {
|
||||
if (!conf.HasOptimization(OptimizationFlag::FastDispatch) || is_single_step) {
|
||||
code.ReturnFromRunCode();
|
||||
return;
|
||||
}
|
||||
|
||||
code.jmp(terminal_handler_fast_dispatch_hint);
|
||||
}
|
||||
|
||||
void A64EmitX64::EmitTerminalImpl(IR::Term::If terminal, IR::LocationDescriptor initial_location, bool is_single_step) {
|
||||
|
@ -1330,7 +1331,7 @@ void A64EmitX64::EmitPatchMovRcx(CodePtr target_code_ptr) {
|
|||
|
||||
void A64EmitX64::Unpatch(const IR::LocationDescriptor& location) {
|
||||
EmitX64::Unpatch(location);
|
||||
if (conf.enable_fast_dispatch) {
|
||||
if (conf.HasOptimization(OptimizationFlag::FastDispatch)) {
|
||||
(*fast_dispatch_table_lookup)(location.Value()) = {};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -249,14 +249,17 @@ private:
|
|||
IR::Block ir_block = A64::Translate(A64::LocationDescriptor{current_location}, get_code,
|
||||
{conf.define_unpredictable_behaviour, conf.wall_clock_cntpct});
|
||||
Optimization::A64CallbackConfigPass(ir_block, conf);
|
||||
if (conf.enable_optimizations) {
|
||||
if (conf.HasOptimization(OptimizationFlag::GetSetElimination)) {
|
||||
Optimization::A64GetSetElimination(ir_block);
|
||||
Optimization::DeadCodeElimination(ir_block);
|
||||
}
|
||||
if (conf.HasOptimization(OptimizationFlag::ConstProp)) {
|
||||
Optimization::ConstantPropagation(ir_block);
|
||||
Optimization::DeadCodeElimination(ir_block);
|
||||
}
|
||||
if (conf.HasOptimization(OptimizationFlag::MiscIROpt)) {
|
||||
Optimization::A64MergeInterpretBlocksPass(ir_block, conf.callbacks);
|
||||
}
|
||||
// printf("%s\n", IR::DumpBlock(ir_block).c_str());
|
||||
Optimization::VerificationPass(ir_block);
|
||||
return emitter.Emit(ir_block).entrypoint;
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ u32 GenRandomInst(u32 pc, bool is_last_inst) {
|
|||
|
||||
Dynarmic::A32::UserConfig GetUserConfig(ArmTestEnv& testenv) {
|
||||
Dynarmic::A32::UserConfig user_config;
|
||||
user_config.enable_fast_dispatch = false;
|
||||
user_config.optimizations &= ~OptimizationFlag::FastDispatch;
|
||||
user_config.callbacks = &testenv;
|
||||
user_config.always_little_endian = true;
|
||||
return user_config;
|
||||
|
|
|
@ -28,9 +28,11 @@
|
|||
#include "testenv.h"
|
||||
#include "unicorn_emu/a32_unicorn.h"
|
||||
|
||||
static Dynarmic::A32::UserConfig GetUserConfig(ThumbTestEnv* testenv) {
|
||||
Dynarmic::A32::UserConfig user_config;
|
||||
user_config.enable_fast_dispatch = false;
|
||||
using namespace Dynarmic;
|
||||
|
||||
static A32::UserConfig GetUserConfig(ThumbTestEnv* testenv) {
|
||||
A32::UserConfig user_config;
|
||||
user_config.optimizations &= ~OptimizationFlag::FastDispatch;
|
||||
user_config.callbacks = testenv;
|
||||
return user_config;
|
||||
}
|
||||
|
@ -76,7 +78,7 @@ private:
|
|||
std::function<bool(u16)> is_valid;
|
||||
};
|
||||
|
||||
static bool DoesBehaviorMatch(const A32Unicorn<ThumbTestEnv>& uni, const Dynarmic::A32::Jit& jit,
|
||||
static bool DoesBehaviorMatch(const A32Unicorn<ThumbTestEnv>& uni, const A32::Jit& jit,
|
||||
const WriteRecords& interp_write_records, const WriteRecords& jit_write_records) {
|
||||
const auto interp_regs = uni.GetRegisters();
|
||||
const auto jit_regs = jit.Regs();
|
||||
|
@ -86,7 +88,7 @@ static bool DoesBehaviorMatch(const A32Unicorn<ThumbTestEnv>& uni, const Dynarmi
|
|||
interp_write_records == jit_write_records;
|
||||
}
|
||||
|
||||
static void RunInstance(size_t run_number, ThumbTestEnv& test_env, A32Unicorn<ThumbTestEnv>& uni, Dynarmic::A32::Jit& jit, const ThumbTestEnv::RegisterArray& initial_regs,
|
||||
static void RunInstance(size_t run_number, ThumbTestEnv& test_env, A32Unicorn<ThumbTestEnv>& uni, A32::Jit& jit, const ThumbTestEnv::RegisterArray& initial_regs,
|
||||
size_t instruction_count, size_t instructions_to_execute_count) {
|
||||
uni.ClearPageCache();
|
||||
jit.ClearCache();
|
||||
|
@ -126,7 +128,7 @@ static void RunInstance(size_t run_number, ThumbTestEnv& test_env, A32Unicorn<Th
|
|||
|
||||
printf("\nInstruction Listing: \n");
|
||||
for (size_t i = 0; i < instruction_count; i++) {
|
||||
printf("%04x %s\n", test_env.code_mem[i], Dynarmic::A32::DisassembleThumb16(test_env.code_mem[i]).c_str());
|
||||
printf("%04x %s\n", test_env.code_mem[i], A32::DisassembleThumb16(test_env.code_mem[i]).c_str());
|
||||
}
|
||||
|
||||
printf("\nInitial Register Listing: \n");
|
||||
|
@ -152,20 +154,20 @@ static void RunInstance(size_t run_number, ThumbTestEnv& test_env, A32Unicorn<Th
|
|||
printf("[%08x] = %02x\n", record.first, record.second);
|
||||
}
|
||||
|
||||
Dynarmic::A32::PSR cpsr;
|
||||
A32::PSR cpsr;
|
||||
cpsr.T(true);
|
||||
|
||||
size_t num_insts = 0;
|
||||
while (num_insts < instructions_to_execute_count) {
|
||||
Dynarmic::A32::LocationDescriptor descriptor = {u32(num_insts * 4), cpsr, Dynarmic::A32::FPSCR{}};
|
||||
Dynarmic::IR::Block ir_block = Dynarmic::A32::Translate(descriptor, [&test_env](u32 vaddr) { return test_env.MemoryReadCode(vaddr); }, {});
|
||||
Dynarmic::Optimization::A32GetSetElimination(ir_block);
|
||||
Dynarmic::Optimization::DeadCodeElimination(ir_block);
|
||||
Dynarmic::Optimization::A32ConstantMemoryReads(ir_block, &test_env);
|
||||
Dynarmic::Optimization::ConstantPropagation(ir_block);
|
||||
Dynarmic::Optimization::DeadCodeElimination(ir_block);
|
||||
Dynarmic::Optimization::VerificationPass(ir_block);
|
||||
printf("\n\nIR:\n%s", Dynarmic::IR::DumpBlock(ir_block).c_str());
|
||||
A32::LocationDescriptor descriptor = {u32(num_insts * 4), cpsr, A32::FPSCR{}};
|
||||
IR::Block ir_block = A32::Translate(descriptor, [&test_env](u32 vaddr) { return test_env.MemoryReadCode(vaddr); }, {});
|
||||
Optimization::A32GetSetElimination(ir_block);
|
||||
Optimization::DeadCodeElimination(ir_block);
|
||||
Optimization::A32ConstantMemoryReads(ir_block, &test_env);
|
||||
Optimization::ConstantPropagation(ir_block);
|
||||
Optimization::DeadCodeElimination(ir_block);
|
||||
Optimization::VerificationPass(ir_block);
|
||||
printf("\n\nIR:\n%s", IR::DumpBlock(ir_block).c_str());
|
||||
printf("\n\nx86_64:\n%s", jit.Disassemble().c_str());
|
||||
num_insts += ir_block.CycleCount();
|
||||
}
|
||||
|
@ -186,7 +188,7 @@ void FuzzJitThumb(const size_t instruction_count, const size_t instructions_to_e
|
|||
|
||||
// Prepare test subjects
|
||||
A32Unicorn uni{test_env};
|
||||
Dynarmic::A32::Jit jit{GetUserConfig(&test_env)};
|
||||
A32::Jit jit{GetUserConfig(&test_env)};
|
||||
|
||||
for (size_t run_number = 0; run_number < run_count; run_number++) {
|
||||
ThumbTestEnv::RegisterArray initial_regs;
|
||||
|
@ -210,9 +212,9 @@ TEST_CASE("Fuzz Thumb instructions set 1", "[JitX64][Thumb]") {
|
|||
ThumbInstGen("010000ooooxxxxxx"), // Data Processing
|
||||
ThumbInstGen("010001000hxxxxxx"), // ADD (high registers)
|
||||
ThumbInstGen("0100010101xxxxxx", // CMP (high registers)
|
||||
[](u16 inst){ return Dynarmic::Common::Bits<3, 5>(inst) != 0b111; }), // R15 is UNPREDICTABLE
|
||||
[](u16 inst){ return Common::Bits<3, 5>(inst) != 0b111; }), // R15 is UNPREDICTABLE
|
||||
ThumbInstGen("0100010110xxxxxx", // CMP (high registers)
|
||||
[](u16 inst){ return Dynarmic::Common::Bits<0, 2>(inst) != 0b111; }), // R15 is UNPREDICTABLE
|
||||
[](u16 inst){ return Common::Bits<0, 2>(inst) != 0b111; }), // R15 is UNPREDICTABLE
|
||||
ThumbInstGen("010001100hxxxxxx"), // MOV (high registers)
|
||||
ThumbInstGen("10110000oxxxxxxx"), // Adjust stack pointer
|
||||
ThumbInstGen("10110010ooxxxxxx"), // SXT/UXT
|
||||
|
@ -225,15 +227,15 @@ TEST_CASE("Fuzz Thumb instructions set 1", "[JitX64][Thumb]") {
|
|||
ThumbInstGen("1000xxxxxxxxxxxx"), // LDRH/STRH Rd, [Rn, #offset]
|
||||
ThumbInstGen("1001xxxxxxxxxxxx"), // LDR/STR Rd, [SP, #]
|
||||
ThumbInstGen("1011010xxxxxxxxx", // PUSH
|
||||
[](u16 inst){ return Dynarmic::Common::Bits<0, 7>(inst) != 0; }), // Empty reg_list is UNPREDICTABLE
|
||||
[](u16 inst){ return Common::Bits<0, 7>(inst) != 0; }), // Empty reg_list is UNPREDICTABLE
|
||||
ThumbInstGen("10111100xxxxxxxx", // POP (P = 0)
|
||||
[](u16 inst){ return Dynarmic::Common::Bits<0, 7>(inst) != 0; }), // Empty reg_list is UNPREDICTABLE
|
||||
[](u16 inst){ return Common::Bits<0, 7>(inst) != 0; }), // Empty reg_list is UNPREDICTABLE
|
||||
ThumbInstGen("1100xxxxxxxxxxxx", // STMIA/LDMIA
|
||||
[](u16 inst) {
|
||||
// Ensure that the architecturally undefined case of
|
||||
// the base register being within the list isn't hit.
|
||||
const u32 rn = Dynarmic::Common::Bits<8, 10>(inst);
|
||||
return (inst & (1U << rn)) == 0 && Dynarmic::Common::Bits<0, 7>(inst) != 0;
|
||||
const u32 rn = Common::Bits<8, 10>(inst);
|
||||
return (inst & (1U << rn)) == 0 && Common::Bits<0, 7>(inst) != 0;
|
||||
}),
|
||||
// TODO: We should properly test against swapped
|
||||
// endianness cases, however Unicorn doesn't
|
||||
|
@ -277,7 +279,7 @@ TEST_CASE("Fuzz Thumb instructions set 2 (affects PC)", "[JitX64][Thumb]") {
|
|||
#if 0
|
||||
ThumbInstGen("01000111xmmmm000", // BLX/BX
|
||||
[](u16 inst){
|
||||
const u32 Rm = Dynarmic::Common::Bits<3, 6>(inst);
|
||||
const u32 Rm = Common::Bits<3, 6>(inst);
|
||||
return Rm != 15;
|
||||
}),
|
||||
#endif
|
||||
|
@ -287,7 +289,7 @@ TEST_CASE("Fuzz Thumb instructions set 2 (affects PC)", "[JitX64][Thumb]") {
|
|||
ThumbInstGen("01000110h0xxxxxx"), // MOV (high registers)
|
||||
ThumbInstGen("1101ccccxxxxxxxx", // B<cond>
|
||||
[](u16 inst){
|
||||
const u32 c = Dynarmic::Common::Bits<9, 12>(inst);
|
||||
const u32 c = Common::Bits<9, 12>(inst);
|
||||
return c < 0b1110; // Don't want SWI or undefined instructions.
|
||||
}),
|
||||
ThumbInstGen("1011o0i1iiiiinnn"), // CBZ/CBNZ
|
||||
|
@ -315,7 +317,7 @@ TEST_CASE("Verify fix for off by one error in MemoryRead32 worked", "[Thumb]") {
|
|||
|
||||
// Prepare test subjects
|
||||
A32Unicorn<ThumbTestEnv> uni{test_env};
|
||||
Dynarmic::A32::Jit jit{GetUserConfig(&test_env)};
|
||||
A32::Jit jit{GetUserConfig(&test_env)};
|
||||
|
||||
constexpr ThumbTestEnv::RegisterArray initial_regs {
|
||||
0xe90ecd70,
|
||||
|
|
|
@ -13,7 +13,7 @@ using namespace Dynarmic;
|
|||
|
||||
static A32::UserConfig GetUserConfig(ArmTestEnv* testenv) {
|
||||
A32::UserConfig user_config;
|
||||
user_config.enable_fast_dispatch = false;
|
||||
user_config.optimizations &= ~OptimizationFlag::FastDispatch;
|
||||
user_config.callbacks = testenv;
|
||||
return user_config;
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ TEST_CASE("arm: Test InvalidateCacheRange", "[arm][A32]") {
|
|||
TEST_CASE("arm: Step blx", "[arm]") {
|
||||
ArmTestEnv test_env;
|
||||
A32::UserConfig config = GetUserConfig(&test_env);
|
||||
config.enable_fast_dispatch = true;
|
||||
config.optimizations |= OptimizationFlag::FastDispatch;
|
||||
Dynarmic::A32::Jit jit{config};
|
||||
test_env.code_mem = {
|
||||
0xe12fff30, // blx r0
|
||||
|
@ -271,7 +271,7 @@ TEST_CASE("arm: Step blx", "[arm]") {
|
|||
TEST_CASE("arm: Step bx", "[arm]") {
|
||||
ArmTestEnv test_env;
|
||||
A32::UserConfig config = GetUserConfig(&test_env);
|
||||
config.enable_fast_dispatch = true;
|
||||
config.optimizations |= OptimizationFlag::FastDispatch;
|
||||
Dynarmic::A32::Jit jit{config};
|
||||
test_env.code_mem = {
|
||||
0xe12fff10, // bx r0
|
||||
|
|
|
@ -10,11 +10,11 @@
|
|||
#include "common/fp/fpsr.h"
|
||||
#include "testenv.h"
|
||||
|
||||
namespace FP = Dynarmic::FP;
|
||||
using namespace Dynarmic;
|
||||
|
||||
TEST_CASE("A64: ADD", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x8b020020); // ADD X0, X1, X2
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -35,7 +35,7 @@ TEST_CASE("A64: ADD", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: REV", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0xdac00c00); // REV X0, X0
|
||||
env.code_mem.emplace_back(0x5ac00821); // REV W1, W1
|
||||
|
@ -55,7 +55,7 @@ TEST_CASE("A64: REV", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: REV32", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0xdac00800); // REV32 X0, X0
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -71,7 +71,7 @@ TEST_CASE("A64: REV32", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: REV16", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0xdac00400); // REV16 X0, X0
|
||||
env.code_mem.emplace_back(0x5ac00421); // REV16 W1, W1
|
||||
|
@ -91,7 +91,7 @@ TEST_CASE("A64: REV16", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: AND", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x8a020020); // AND X0, X1, X2
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -112,7 +112,7 @@ TEST_CASE("A64: AND", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: Bitmasks", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x3200c3e0); // ORR W0, WZR, #0x01010101
|
||||
env.code_mem.emplace_back(0x320c8fe1); // ORR W1, WZR, #0x00F000F0
|
||||
|
@ -132,7 +132,7 @@ TEST_CASE("A64: Bitmasks", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: ANDS NZCV", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x6a020020); // ANDS W0, W1, W2
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -187,7 +187,7 @@ TEST_CASE("A64: ANDS NZCV", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: CBZ", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x34000060); // 0x00 : CBZ X0, label
|
||||
env.code_mem.emplace_back(0x320003e2); // 0x04 : MOV X2, 1
|
||||
|
@ -220,7 +220,7 @@ TEST_CASE("A64: CBZ", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: TBZ", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x36180060); // 0x00 : TBZ X0, 3, label
|
||||
env.code_mem.emplace_back(0x320003e2); // 0x04 : MOV X2, 1
|
||||
|
@ -264,7 +264,7 @@ TEST_CASE("A64: TBZ", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: FABD", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x6eb5d556); // FABD.4S V22, V10, V21
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -281,9 +281,9 @@ TEST_CASE("A64: FABD", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: 128-bit exclusive read/write", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::ExclusiveMonitor monitor{1};
|
||||
ExclusiveMonitor monitor{1};
|
||||
|
||||
Dynarmic::A64::UserConfig conf;
|
||||
A64::UserConfig conf;
|
||||
conf.callbacks = &env;
|
||||
conf.processor_id = 0;
|
||||
|
||||
|
@ -291,7 +291,7 @@ TEST_CASE("A64: 128-bit exclusive read/write", "[a64]") {
|
|||
conf.global_monitor = &monitor;
|
||||
}
|
||||
|
||||
Dynarmic::A64::Jit jit{conf};
|
||||
A64::Jit jit{conf};
|
||||
|
||||
env.code_mem.emplace_back(0xc87f0861); // LDXP X1, X2, [X3]
|
||||
env.code_mem.emplace_back(0xc8241865); // STXP W4, X5, X6, [X3]
|
||||
|
@ -315,7 +315,7 @@ TEST_CASE("A64: 128-bit exclusive read/write", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: CNTPCT_EL0", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0xd53be021); // MRS X1, CNTPCT_EL0
|
||||
env.code_mem.emplace_back(0xd503201f); // NOP
|
||||
|
@ -336,7 +336,7 @@ TEST_CASE("A64: CNTPCT_EL0", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: FNMSUB 1", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x1f618a9c); // FNMSUB D28, D20, D1, D2
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -354,7 +354,7 @@ TEST_CASE("A64: FNMSUB 1", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: FNMSUB 2", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x1f2ab88e); // FNMSUB S14, S4, S10, S14
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -373,7 +373,7 @@ TEST_CASE("A64: FNMSUB 2", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: FMADD", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x1f5e0e4a); // FMADD D10, D18, D30, D3
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -392,7 +392,7 @@ TEST_CASE("A64: FMADD", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: FMLA.4S (denormal)", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x4e2fcccc); // FMLA.4S V12, V6, V15
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -411,7 +411,7 @@ TEST_CASE("A64: FMLA.4S (denormal)", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: FMLA.4S (0x80800000)", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x4e38cc2b); // FMLA.4S V11, V1, V24
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -433,7 +433,7 @@ TEST_CASE("A64: FMLA.4S (0x80800000)", "[a64]") {
|
|||
// x64 performs rounding before flushing-to-zero.
|
||||
TEST_CASE("A64: FMADD (0x80800000)", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x1f0f7319); // FMADD S25, S24, S15, S28
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -452,7 +452,7 @@ TEST_CASE("A64: FMADD (0x80800000)", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: FNEG failed to zero upper", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x2ea0fb50); // FNEG.2S V16, V26
|
||||
env.code_mem.emplace_back(0x2e207a1c); // SQNEG.8B V28, V16
|
||||
|
@ -471,7 +471,7 @@ TEST_CASE("A64: FNEG failed to zero upper", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: FRSQRTS", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x5eb8fcad); // FRSQRTS S13, S5, S24
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -493,7 +493,7 @@ TEST_CASE("A64: FRSQRTS", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: SQDMULH.8H (saturate)", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x4e62b420); // SQDMULH.8H V0, V1, V2
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -514,7 +514,7 @@ TEST_CASE("A64: SQDMULH.8H (saturate)", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: SQDMULH.4S (saturate)", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x4ea2b420); // SQDMULH.4S V0, V1, V2
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
@ -535,9 +535,9 @@ TEST_CASE("A64: SQDMULH.4S (saturate)", "[a64]") {
|
|||
|
||||
TEST_CASE("A64: This is an infinite loop if fast dispatch is enabled", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::UserConfig conf{&env};
|
||||
conf.enable_fast_dispatch = false;
|
||||
Dynarmic::A64::Jit jit{conf};
|
||||
A64::UserConfig conf{&env};
|
||||
conf.optimizations &= ~OptimizationFlag::FastDispatch;
|
||||
A64::Jit jit{conf};
|
||||
|
||||
env.code_mem.emplace_back(0x2ef998fa);
|
||||
env.code_mem.emplace_back(0x2ef41c11);
|
||||
|
@ -552,7 +552,7 @@ TEST_CASE("A64: This is an infinite loop if fast dispatch is enabled", "[a64]")
|
|||
|
||||
TEST_CASE("A64: Optimization failure when folding ADD", "[a64]") {
|
||||
A64TestEnv env;
|
||||
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0xbc4f84be); // LDR S30, [X5], #248
|
||||
env.code_mem.emplace_back(0x9a0c00ea); // ADC X10, X7, X12
|
||||
|
|
|
@ -147,7 +147,7 @@ static u32 GenFloatInst(u64 pc, bool is_last_inst) {
|
|||
|
||||
static Dynarmic::A64::UserConfig GetUserConfig(A64TestEnv& jit_env) {
|
||||
Dynarmic::A64::UserConfig jit_user_config{&jit_env};
|
||||
jit_user_config.enable_fast_dispatch = false;
|
||||
jit_user_config.optimizations &= ~OptimizationFlag::FastDispatch;
|
||||
// The below corresponds to the settings for qemu's aarch64_max_initfn
|
||||
jit_user_config.dczid_el0 = 7;
|
||||
jit_user_config.ctr_el0 = 0x80038003;
|
||||
|
|
Loading…
Reference in a new issue