1
0
Fork 0
forked from suyu/suyu
suyu/src/core/arm/dynarmic/arm_dynarmic.cpp

316 lines
10 KiB
C++
Raw Normal View History

// Copyright 2018 yuzu emulator team
2016-09-02 05:07:14 +02:00
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <cinttypes>
2018-01-09 22:33:46 +01:00
#include <memory>
#include <dynarmic/A64/a64.h>
#include <dynarmic/A64/config.h>
#include "common/logging/log.h"
#include "common/microprofile.h"
#include "core/arm/dynarmic/arm_dynarmic.h"
#include "core/core.h"
#include "core/core_cpu.h"
2018-01-09 22:33:46 +01:00
#include "core/core_timing.h"
#include "core/core_timing_util.h"
2018-09-19 21:40:31 +02:00
#include "core/gdbstub/gdbstub.h"
#include "core/hle/kernel/process.h"
2018-01-09 22:33:46 +01:00
#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/vm_manager.h"
2018-01-09 22:33:46 +01:00
#include "core/memory.h"
namespace Core {
using Vector = Dynarmic::A64::Vector;
2018-01-09 22:33:46 +01:00
class ARM_Dynarmic_Callbacks : public Dynarmic::A64::UserCallbacks {
public:
explicit ARM_Dynarmic_Callbacks(ARM_Dynarmic& parent) : parent(parent) {}
u8 MemoryRead8(u64 vaddr) override {
2018-01-09 22:33:46 +01:00
return Memory::Read8(vaddr);
}
u16 MemoryRead16(u64 vaddr) override {
2018-01-09 22:33:46 +01:00
return Memory::Read16(vaddr);
}
u32 MemoryRead32(u64 vaddr) override {
2018-01-09 22:33:46 +01:00
return Memory::Read32(vaddr);
}
u64 MemoryRead64(u64 vaddr) override {
2018-01-09 22:33:46 +01:00
return Memory::Read64(vaddr);
}
Vector MemoryRead128(u64 vaddr) override {
return {Memory::Read64(vaddr), Memory::Read64(vaddr + 8)};
}
2018-01-09 22:33:46 +01:00
void MemoryWrite8(u64 vaddr, u8 value) override {
2018-01-09 22:33:46 +01:00
Memory::Write8(vaddr, value);
}
void MemoryWrite16(u64 vaddr, u16 value) override {
2018-01-09 22:33:46 +01:00
Memory::Write16(vaddr, value);
}
void MemoryWrite32(u64 vaddr, u32 value) override {
2018-01-09 22:33:46 +01:00
Memory::Write32(vaddr, value);
}
void MemoryWrite64(u64 vaddr, u64 value) override {
2018-01-09 22:33:46 +01:00
Memory::Write64(vaddr, value);
}
void MemoryWrite128(u64 vaddr, Vector value) override {
Memory::Write64(vaddr, value[0]);
Memory::Write64(vaddr + 8, value[1]);
}
2018-01-09 22:33:46 +01:00
void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
2018-07-02 18:13:26 +02:00
LOG_INFO(Core_ARM, "Unicorn fallback @ 0x{:X} for {} instructions (instr = {:08X})", pc,
2018-07-02 18:20:50 +02:00
num_instructions, MemoryReadCode(pc));
2018-01-09 22:33:46 +01:00
ARM_Interface::ThreadContext ctx;
parent.SaveContext(ctx);
parent.inner_unicorn.LoadContext(ctx);
parent.inner_unicorn.ExecuteInstructions(static_cast<int>(num_instructions));
2018-01-09 22:33:46 +01:00
parent.inner_unicorn.SaveContext(ctx);
parent.LoadContext(ctx);
num_interpreted_instructions += num_instructions;
}
void ExceptionRaised(u64 pc, Dynarmic::A64::Exception exception) override {
dynarmic: Update to 6b4c6b0 6b4c6b0 impl: Update PC when raising exception 7a1313a A64: Implement FDIV (vector) b2d781d system: Raise exception for YIELD, WFE, WFI, SEV, SEVL b277bf5 Correct FPSR and FPCR 7673933 A64: Implement USHL 8d0e558 A64: Implement UCVTF (vector, integer), scalar variant da9a4f8 A64: Partially implement FCVTZU (scalar, fixed-point) and FCVTZS (scalar, fixed-point) 7479684 A64: Implement system register TPIDR_EL0 0fd75fd A64: Implement system registers FPCR and FPSR 31e370c A64: Implement system register CNTPCT_EL0 9a88fd3 A64: Implement system register CTR_EL0 1d16896 A64: Implement NEG (vector) 3184edf IR: Add IR instruction ZeroVector 31f8fbc emit_x64_floating_point: Add maybe_unused to preprocess parameter 567eb1a A64: Implement FMINNM (scalar) c6d8fa1 A64: Implement FMAXNM (scalar) 616056d constant_pool: Add frame parameter a3747cb A64: Implement ADDP (scalar) 5cd5d9f reg_alloc: Only exchange GPRs dd0452a A64: Implement DUP (element), scalar variant e5732ea emit_x64_floating_point: Correct FP{Max,Min}{32,64} implementations for -0/+0 40eb9c3 A64: Implement FMAX (scalar), FMIN (scalar) 7cef39b fuzz_with_unicorn: QEMU's implementation of FCVT is incorrect 826dce2 travis: Switch unicorn repository 9605f28 a64/config: Allow NaN emulation accuracy to be set e9435bc a64_emit_x64: Add conf to A64EmitContext 30b596d fuzz_with_unicorn: Explicitly test floating point instructions be292a8 A64: Implement FSQRT (scalar) 3c42d48 backend_x64: Accurately handle NaNs 4aefed0 fuzz_with_unicorn: Print AArch64 disassembly
2018-02-21 21:51:54 +01:00
switch (exception) {
case Dynarmic::A64::Exception::WaitForInterrupt:
case Dynarmic::A64::Exception::WaitForEvent:
case Dynarmic::A64::Exception::SendEvent:
case Dynarmic::A64::Exception::SendEventLocal:
case Dynarmic::A64::Exception::Yield:
return;
2018-09-19 21:40:31 +02:00
case Dynarmic::A64::Exception::Breakpoint:
if (GDBStub::IsServerEnabled()) {
parent.jit->HaltExecution();
2018-09-19 21:40:31 +02:00
parent.SetPC(pc);
Kernel::Thread* thread = Kernel::GetCurrentThread();
parent.SaveContext(thread->GetContext());
2018-09-19 21:40:31 +02:00
GDBStub::Break();
GDBStub::SendTrap(thread, 5);
return;
}
[[fallthrough]];
dynarmic: Update to 6b4c6b0 6b4c6b0 impl: Update PC when raising exception 7a1313a A64: Implement FDIV (vector) b2d781d system: Raise exception for YIELD, WFE, WFI, SEV, SEVL b277bf5 Correct FPSR and FPCR 7673933 A64: Implement USHL 8d0e558 A64: Implement UCVTF (vector, integer), scalar variant da9a4f8 A64: Partially implement FCVTZU (scalar, fixed-point) and FCVTZS (scalar, fixed-point) 7479684 A64: Implement system register TPIDR_EL0 0fd75fd A64: Implement system registers FPCR and FPSR 31e370c A64: Implement system register CNTPCT_EL0 9a88fd3 A64: Implement system register CTR_EL0 1d16896 A64: Implement NEG (vector) 3184edf IR: Add IR instruction ZeroVector 31f8fbc emit_x64_floating_point: Add maybe_unused to preprocess parameter 567eb1a A64: Implement FMINNM (scalar) c6d8fa1 A64: Implement FMAXNM (scalar) 616056d constant_pool: Add frame parameter a3747cb A64: Implement ADDP (scalar) 5cd5d9f reg_alloc: Only exchange GPRs dd0452a A64: Implement DUP (element), scalar variant e5732ea emit_x64_floating_point: Correct FP{Max,Min}{32,64} implementations for -0/+0 40eb9c3 A64: Implement FMAX (scalar), FMIN (scalar) 7cef39b fuzz_with_unicorn: QEMU's implementation of FCVT is incorrect 826dce2 travis: Switch unicorn repository 9605f28 a64/config: Allow NaN emulation accuracy to be set e9435bc a64_emit_x64: Add conf to A64EmitContext 30b596d fuzz_with_unicorn: Explicitly test floating point instructions be292a8 A64: Implement FSQRT (scalar) 3c42d48 backend_x64: Accurately handle NaNs 4aefed0 fuzz_with_unicorn: Print AArch64 disassembly
2018-02-21 21:51:54 +01:00
default:
ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:X})",
static_cast<std::size_t>(exception), pc);
dynarmic: Update to 6b4c6b0 6b4c6b0 impl: Update PC when raising exception 7a1313a A64: Implement FDIV (vector) b2d781d system: Raise exception for YIELD, WFE, WFI, SEV, SEVL b277bf5 Correct FPSR and FPCR 7673933 A64: Implement USHL 8d0e558 A64: Implement UCVTF (vector, integer), scalar variant da9a4f8 A64: Partially implement FCVTZU (scalar, fixed-point) and FCVTZS (scalar, fixed-point) 7479684 A64: Implement system register TPIDR_EL0 0fd75fd A64: Implement system registers FPCR and FPSR 31e370c A64: Implement system register CNTPCT_EL0 9a88fd3 A64: Implement system register CTR_EL0 1d16896 A64: Implement NEG (vector) 3184edf IR: Add IR instruction ZeroVector 31f8fbc emit_x64_floating_point: Add maybe_unused to preprocess parameter 567eb1a A64: Implement FMINNM (scalar) c6d8fa1 A64: Implement FMAXNM (scalar) 616056d constant_pool: Add frame parameter a3747cb A64: Implement ADDP (scalar) 5cd5d9f reg_alloc: Only exchange GPRs dd0452a A64: Implement DUP (element), scalar variant e5732ea emit_x64_floating_point: Correct FP{Max,Min}{32,64} implementations for -0/+0 40eb9c3 A64: Implement FMAX (scalar), FMIN (scalar) 7cef39b fuzz_with_unicorn: QEMU's implementation of FCVT is incorrect 826dce2 travis: Switch unicorn repository 9605f28 a64/config: Allow NaN emulation accuracy to be set e9435bc a64_emit_x64: Add conf to A64EmitContext 30b596d fuzz_with_unicorn: Explicitly test floating point instructions be292a8 A64: Implement FSQRT (scalar) 3c42d48 backend_x64: Accurately handle NaNs 4aefed0 fuzz_with_unicorn: Print AArch64 disassembly
2018-02-21 21:51:54 +01:00
}
}
void CallSVC(u32 swi) override {
Kernel::CallSVC(parent.system, swi);
2018-01-09 22:33:46 +01:00
}
void AddTicks(u64 ticks) override {
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
// rough approximation of the amount of executed ticks in the system, it may be thrown off
// if not all cores are doing a similar amount of work. Instead of doing this, we should
// device a way so that timing is consistent across all cores without increasing the ticks 4
// times.
u64 amortized_ticks = (ticks - num_interpreted_instructions) / Core::NUM_CPU_CORES;
// Always execute at least one tick.
amortized_ticks = std::max<u64>(amortized_ticks, 1);
parent.system.CoreTiming().AddTicks(amortized_ticks);
2018-03-24 10:02:19 +01:00
num_interpreted_instructions = 0;
2018-01-09 22:33:46 +01:00
}
u64 GetTicksRemaining() override {
return std::max(parent.system.CoreTiming().GetDowncount(), 0);
2018-01-09 22:33:46 +01:00
}
dynarmic: Update to 6b4c6b0 6b4c6b0 impl: Update PC when raising exception 7a1313a A64: Implement FDIV (vector) b2d781d system: Raise exception for YIELD, WFE, WFI, SEV, SEVL b277bf5 Correct FPSR and FPCR 7673933 A64: Implement USHL 8d0e558 A64: Implement UCVTF (vector, integer), scalar variant da9a4f8 A64: Partially implement FCVTZU (scalar, fixed-point) and FCVTZS (scalar, fixed-point) 7479684 A64: Implement system register TPIDR_EL0 0fd75fd A64: Implement system registers FPCR and FPSR 31e370c A64: Implement system register CNTPCT_EL0 9a88fd3 A64: Implement system register CTR_EL0 1d16896 A64: Implement NEG (vector) 3184edf IR: Add IR instruction ZeroVector 31f8fbc emit_x64_floating_point: Add maybe_unused to preprocess parameter 567eb1a A64: Implement FMINNM (scalar) c6d8fa1 A64: Implement FMAXNM (scalar) 616056d constant_pool: Add frame parameter a3747cb A64: Implement ADDP (scalar) 5cd5d9f reg_alloc: Only exchange GPRs dd0452a A64: Implement DUP (element), scalar variant e5732ea emit_x64_floating_point: Correct FP{Max,Min}{32,64} implementations for -0/+0 40eb9c3 A64: Implement FMAX (scalar), FMIN (scalar) 7cef39b fuzz_with_unicorn: QEMU's implementation of FCVT is incorrect 826dce2 travis: Switch unicorn repository 9605f28 a64/config: Allow NaN emulation accuracy to be set e9435bc a64_emit_x64: Add conf to A64EmitContext 30b596d fuzz_with_unicorn: Explicitly test floating point instructions be292a8 A64: Implement FSQRT (scalar) 3c42d48 backend_x64: Accurately handle NaNs 4aefed0 fuzz_with_unicorn: Print AArch64 disassembly
2018-02-21 21:51:54 +01:00
u64 GetCNTPCT() override {
return Timing::CpuCyclesToClockCycles(parent.system.CoreTiming().GetTicks());
dynarmic: Update to 6b4c6b0 6b4c6b0 impl: Update PC when raising exception 7a1313a A64: Implement FDIV (vector) b2d781d system: Raise exception for YIELD, WFE, WFI, SEV, SEVL b277bf5 Correct FPSR and FPCR 7673933 A64: Implement USHL 8d0e558 A64: Implement UCVTF (vector, integer), scalar variant da9a4f8 A64: Partially implement FCVTZU (scalar, fixed-point) and FCVTZS (scalar, fixed-point) 7479684 A64: Implement system register TPIDR_EL0 0fd75fd A64: Implement system registers FPCR and FPSR 31e370c A64: Implement system register CNTPCT_EL0 9a88fd3 A64: Implement system register CTR_EL0 1d16896 A64: Implement NEG (vector) 3184edf IR: Add IR instruction ZeroVector 31f8fbc emit_x64_floating_point: Add maybe_unused to preprocess parameter 567eb1a A64: Implement FMINNM (scalar) c6d8fa1 A64: Implement FMAXNM (scalar) 616056d constant_pool: Add frame parameter a3747cb A64: Implement ADDP (scalar) 5cd5d9f reg_alloc: Only exchange GPRs dd0452a A64: Implement DUP (element), scalar variant e5732ea emit_x64_floating_point: Correct FP{Max,Min}{32,64} implementations for -0/+0 40eb9c3 A64: Implement FMAX (scalar), FMIN (scalar) 7cef39b fuzz_with_unicorn: QEMU's implementation of FCVT is incorrect 826dce2 travis: Switch unicorn repository 9605f28 a64/config: Allow NaN emulation accuracy to be set e9435bc a64_emit_x64: Add conf to A64EmitContext 30b596d fuzz_with_unicorn: Explicitly test floating point instructions be292a8 A64: Implement FSQRT (scalar) 3c42d48 backend_x64: Accurately handle NaNs 4aefed0 fuzz_with_unicorn: Print AArch64 disassembly
2018-02-21 21:51:54 +01:00
}
2018-01-09 22:33:46 +01:00
ARM_Dynarmic& parent;
std::size_t num_interpreted_instructions = 0;
u64 tpidrro_el0 = 0;
dynarmic: Update to 6b4c6b0 6b4c6b0 impl: Update PC when raising exception 7a1313a A64: Implement FDIV (vector) b2d781d system: Raise exception for YIELD, WFE, WFI, SEV, SEVL b277bf5 Correct FPSR and FPCR 7673933 A64: Implement USHL 8d0e558 A64: Implement UCVTF (vector, integer), scalar variant da9a4f8 A64: Partially implement FCVTZU (scalar, fixed-point) and FCVTZS (scalar, fixed-point) 7479684 A64: Implement system register TPIDR_EL0 0fd75fd A64: Implement system registers FPCR and FPSR 31e370c A64: Implement system register CNTPCT_EL0 9a88fd3 A64: Implement system register CTR_EL0 1d16896 A64: Implement NEG (vector) 3184edf IR: Add IR instruction ZeroVector 31f8fbc emit_x64_floating_point: Add maybe_unused to preprocess parameter 567eb1a A64: Implement FMINNM (scalar) c6d8fa1 A64: Implement FMAXNM (scalar) 616056d constant_pool: Add frame parameter a3747cb A64: Implement ADDP (scalar) 5cd5d9f reg_alloc: Only exchange GPRs dd0452a A64: Implement DUP (element), scalar variant e5732ea emit_x64_floating_point: Correct FP{Max,Min}{32,64} implementations for -0/+0 40eb9c3 A64: Implement FMAX (scalar), FMIN (scalar) 7cef39b fuzz_with_unicorn: QEMU's implementation of FCVT is incorrect 826dce2 travis: Switch unicorn repository 9605f28 a64/config: Allow NaN emulation accuracy to be set e9435bc a64_emit_x64: Add conf to A64EmitContext 30b596d fuzz_with_unicorn: Explicitly test floating point instructions be292a8 A64: Implement FSQRT (scalar) 3c42d48 backend_x64: Accurately handle NaNs 4aefed0 fuzz_with_unicorn: Print AArch64 disassembly
2018-02-21 21:51:54 +01:00
u64 tpidr_el0 = 0;
2018-01-09 22:33:46 +01:00
};
core/cpu_core_manager: Create threads separately from initialization. Our initialization process is a little wonky than one would expect when it comes to code flow. We initialize the CPU last, as opposed to hardware, where the CPU obviously needs to be first, otherwise nothing else would work, and we have code that adds checks to get around this. For example, in the page table setting code, we check to see if the system is turned on before we even notify the CPU instances of a page table switch. This results in dead code (at the moment), because the only time a page table switch will occur is when the system is *not* running, preventing the emulated CPU instances from being notified of a page table switch in a convenient manner (technically the code path could be taken, but we don't emulate the process creation svc handlers yet). This moves the threads creation into its own member function of the core manager and restores a little order (and predictability) to our initialization process. Previously, in the multi-threaded cases, we'd kick off several threads before even the main kernel process was created and ready to execute (gross!). Now the initialization process is like so: Initialization: 1. Timers 2. CPU 3. Kernel 4. Filesystem stuff (kind of gross, but can be amended trivially) 5. Applet stuff (ditto in terms of being kind of gross) 6. Main process (will be moved into the loading step in a following change) 7. Telemetry (this should be initialized last in the future). 8. Services (4 and 5 should ideally be alongside this). 9. GDB (gross. Uses namespace scope state. Needs to be refactored into a class or booted altogether). 10. Renderer 11. GPU (will also have its threads created in a separate step in a following change). Which... isn't *ideal* per-se, however getting rid of the wonky intertwining of CPU state initialization out of this mix gets rid of most of the footguns when it comes to our initialization process.
2019-04-09 19:25:54 +02:00
std::unique_ptr<Dynarmic::A64::Jit> ARM_Dynarmic::MakeJit(Common::PageTable& page_table,
std::size_t address_space_bits) const {
Dynarmic::A64::UserConfig config;
2018-07-03 15:28:46 +02:00
// Callbacks
config.callbacks = cb.get();
2018-07-03 15:28:46 +02:00
// Memory
core/cpu_core_manager: Create threads separately from initialization. Our initialization process is a little wonky than one would expect when it comes to code flow. We initialize the CPU last, as opposed to hardware, where the CPU obviously needs to be first, otherwise nothing else would work, and we have code that adds checks to get around this. For example, in the page table setting code, we check to see if the system is turned on before we even notify the CPU instances of a page table switch. This results in dead code (at the moment), because the only time a page table switch will occur is when the system is *not* running, preventing the emulated CPU instances from being notified of a page table switch in a convenient manner (technically the code path could be taken, but we don't emulate the process creation svc handlers yet). This moves the threads creation into its own member function of the core manager and restores a little order (and predictability) to our initialization process. Previously, in the multi-threaded cases, we'd kick off several threads before even the main kernel process was created and ready to execute (gross!). Now the initialization process is like so: Initialization: 1. Timers 2. CPU 3. Kernel 4. Filesystem stuff (kind of gross, but can be amended trivially) 5. Applet stuff (ditto in terms of being kind of gross) 6. Main process (will be moved into the loading step in a following change) 7. Telemetry (this should be initialized last in the future). 8. Services (4 and 5 should ideally be alongside this). 9. GDB (gross. Uses namespace scope state. Needs to be refactored into a class or booted altogether). 10. Renderer 11. GPU (will also have its threads created in a separate step in a following change). Which... isn't *ideal* per-se, however getting rid of the wonky intertwining of CPU state initialization out of this mix gets rid of most of the footguns when it comes to our initialization process.
2019-04-09 19:25:54 +02:00
config.page_table = reinterpret_cast<void**>(page_table.pointers.data());
config.page_table_address_space_bits = address_space_bits;
2018-07-03 15:28:46 +02:00
config.silently_mirror_page_table = false;
// Multi-process state
config.processor_id = core_index;
config.global_monitor = &exclusive_monitor.monitor;
2018-07-03 15:28:46 +02:00
// System registers
config.tpidrro_el0 = &cb->tpidrro_el0;
dynarmic: Update to 6b4c6b0 6b4c6b0 impl: Update PC when raising exception 7a1313a A64: Implement FDIV (vector) b2d781d system: Raise exception for YIELD, WFE, WFI, SEV, SEVL b277bf5 Correct FPSR and FPCR 7673933 A64: Implement USHL 8d0e558 A64: Implement UCVTF (vector, integer), scalar variant da9a4f8 A64: Partially implement FCVTZU (scalar, fixed-point) and FCVTZS (scalar, fixed-point) 7479684 A64: Implement system register TPIDR_EL0 0fd75fd A64: Implement system registers FPCR and FPSR 31e370c A64: Implement system register CNTPCT_EL0 9a88fd3 A64: Implement system register CTR_EL0 1d16896 A64: Implement NEG (vector) 3184edf IR: Add IR instruction ZeroVector 31f8fbc emit_x64_floating_point: Add maybe_unused to preprocess parameter 567eb1a A64: Implement FMINNM (scalar) c6d8fa1 A64: Implement FMAXNM (scalar) 616056d constant_pool: Add frame parameter a3747cb A64: Implement ADDP (scalar) 5cd5d9f reg_alloc: Only exchange GPRs dd0452a A64: Implement DUP (element), scalar variant e5732ea emit_x64_floating_point: Correct FP{Max,Min}{32,64} implementations for -0/+0 40eb9c3 A64: Implement FMAX (scalar), FMIN (scalar) 7cef39b fuzz_with_unicorn: QEMU's implementation of FCVT is incorrect 826dce2 travis: Switch unicorn repository 9605f28 a64/config: Allow NaN emulation accuracy to be set e9435bc a64_emit_x64: Add conf to A64EmitContext 30b596d fuzz_with_unicorn: Explicitly test floating point instructions be292a8 A64: Implement FSQRT (scalar) 3c42d48 backend_x64: Accurately handle NaNs 4aefed0 fuzz_with_unicorn: Print AArch64 disassembly
2018-02-21 21:51:54 +01:00
config.tpidr_el0 = &cb->tpidr_el0;
config.dczid_el0 = 4;
dynarmic: Update to 6b4c6b0 6b4c6b0 impl: Update PC when raising exception 7a1313a A64: Implement FDIV (vector) b2d781d system: Raise exception for YIELD, WFE, WFI, SEV, SEVL b277bf5 Correct FPSR and FPCR 7673933 A64: Implement USHL 8d0e558 A64: Implement UCVTF (vector, integer), scalar variant da9a4f8 A64: Partially implement FCVTZU (scalar, fixed-point) and FCVTZS (scalar, fixed-point) 7479684 A64: Implement system register TPIDR_EL0 0fd75fd A64: Implement system registers FPCR and FPSR 31e370c A64: Implement system register CNTPCT_EL0 9a88fd3 A64: Implement system register CTR_EL0 1d16896 A64: Implement NEG (vector) 3184edf IR: Add IR instruction ZeroVector 31f8fbc emit_x64_floating_point: Add maybe_unused to preprocess parameter 567eb1a A64: Implement FMINNM (scalar) c6d8fa1 A64: Implement FMAXNM (scalar) 616056d constant_pool: Add frame parameter a3747cb A64: Implement ADDP (scalar) 5cd5d9f reg_alloc: Only exchange GPRs dd0452a A64: Implement DUP (element), scalar variant e5732ea emit_x64_floating_point: Correct FP{Max,Min}{32,64} implementations for -0/+0 40eb9c3 A64: Implement FMAX (scalar), FMIN (scalar) 7cef39b fuzz_with_unicorn: QEMU's implementation of FCVT is incorrect 826dce2 travis: Switch unicorn repository 9605f28 a64/config: Allow NaN emulation accuracy to be set e9435bc a64_emit_x64: Add conf to A64EmitContext 30b596d fuzz_with_unicorn: Explicitly test floating point instructions be292a8 A64: Implement FSQRT (scalar) 3c42d48 backend_x64: Accurately handle NaNs 4aefed0 fuzz_with_unicorn: Print AArch64 disassembly
2018-02-21 21:51:54 +01:00
config.ctr_el0 = 0x8444c004;
2019-02-16 21:52:24 +01:00
config.cntfrq_el0 = Timing::CNTFREQ;
dynarmic: Update to 6b4c6b0 6b4c6b0 impl: Update PC when raising exception 7a1313a A64: Implement FDIV (vector) b2d781d system: Raise exception for YIELD, WFE, WFI, SEV, SEVL b277bf5 Correct FPSR and FPCR 7673933 A64: Implement USHL 8d0e558 A64: Implement UCVTF (vector, integer), scalar variant da9a4f8 A64: Partially implement FCVTZU (scalar, fixed-point) and FCVTZS (scalar, fixed-point) 7479684 A64: Implement system register TPIDR_EL0 0fd75fd A64: Implement system registers FPCR and FPSR 31e370c A64: Implement system register CNTPCT_EL0 9a88fd3 A64: Implement system register CTR_EL0 1d16896 A64: Implement NEG (vector) 3184edf IR: Add IR instruction ZeroVector 31f8fbc emit_x64_floating_point: Add maybe_unused to preprocess parameter 567eb1a A64: Implement FMINNM (scalar) c6d8fa1 A64: Implement FMAXNM (scalar) 616056d constant_pool: Add frame parameter a3747cb A64: Implement ADDP (scalar) 5cd5d9f reg_alloc: Only exchange GPRs dd0452a A64: Implement DUP (element), scalar variant e5732ea emit_x64_floating_point: Correct FP{Max,Min}{32,64} implementations for -0/+0 40eb9c3 A64: Implement FMAX (scalar), FMIN (scalar) 7cef39b fuzz_with_unicorn: QEMU's implementation of FCVT is incorrect 826dce2 travis: Switch unicorn repository 9605f28 a64/config: Allow NaN emulation accuracy to be set e9435bc a64_emit_x64: Add conf to A64EmitContext 30b596d fuzz_with_unicorn: Explicitly test floating point instructions be292a8 A64: Implement FSQRT (scalar) 3c42d48 backend_x64: Accurately handle NaNs 4aefed0 fuzz_with_unicorn: Print AArch64 disassembly
2018-02-21 21:51:54 +01:00
// Unpredictable instructions
config.define_unpredictable_behaviour = true;
return std::make_unique<Dynarmic::A64::Jit>(config);
}
MICROPROFILE_DEFINE(ARM_Jit_Dynarmic, "ARM JIT", "Dynarmic", MP_RGB(255, 64, 64));
2018-02-14 18:47:48 +01:00
void ARM_Dynarmic::Run() {
MICROPROFILE_SCOPE(ARM_Jit_Dynarmic);
2018-02-14 18:47:48 +01:00
jit->Run();
}
void ARM_Dynarmic::Step() {
cb->InterpreterFallback(jit->GetPC(), 1);
}
ARM_Dynarmic::ARM_Dynarmic(System& system, ExclusiveMonitor& exclusive_monitor,
std::size_t core_index)
: cb(std::make_unique<ARM_Dynarmic_Callbacks>(*this)), inner_unicorn{system},
core_index{core_index}, system{system},
core/cpu_core_manager: Create threads separately from initialization. Our initialization process is a little wonky than one would expect when it comes to code flow. We initialize the CPU last, as opposed to hardware, where the CPU obviously needs to be first, otherwise nothing else would work, and we have code that adds checks to get around this. For example, in the page table setting code, we check to see if the system is turned on before we even notify the CPU instances of a page table switch. This results in dead code (at the moment), because the only time a page table switch will occur is when the system is *not* running, preventing the emulated CPU instances from being notified of a page table switch in a convenient manner (technically the code path could be taken, but we don't emulate the process creation svc handlers yet). This moves the threads creation into its own member function of the core manager and restores a little order (and predictability) to our initialization process. Previously, in the multi-threaded cases, we'd kick off several threads before even the main kernel process was created and ready to execute (gross!). Now the initialization process is like so: Initialization: 1. Timers 2. CPU 3. Kernel 4. Filesystem stuff (kind of gross, but can be amended trivially) 5. Applet stuff (ditto in terms of being kind of gross) 6. Main process (will be moved into the loading step in a following change) 7. Telemetry (this should be initialized last in the future). 8. Services (4 and 5 should ideally be alongside this). 9. GDB (gross. Uses namespace scope state. Needs to be refactored into a class or booted altogether). 10. Renderer 11. GPU (will also have its threads created in a separate step in a following change). Which... isn't *ideal* per-se, however getting rid of the wonky intertwining of CPU state initialization out of this mix gets rid of most of the footguns when it comes to our initialization process.
2019-04-09 19:25:54 +02:00
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
2018-01-09 22:33:46 +01:00
ARM_Dynarmic::~ARM_Dynarmic() = default;
void ARM_Dynarmic::MapBackingMemory(u64 address, std::size_t size, u8* memory,
2018-01-09 22:33:46 +01:00
Kernel::VMAPermission perms) {
inner_unicorn.MapBackingMemory(address, size, memory, perms);
}
void ARM_Dynarmic::UnmapMemory(u64 address, std::size_t size) {
inner_unicorn.UnmapMemory(address, size);
}
2018-01-09 22:33:46 +01:00
void ARM_Dynarmic::SetPC(u64 pc) {
jit->SetPC(pc);
2016-09-02 05:07:14 +02:00
}
u64 ARM_Dynarmic::GetPC() const {
return jit->GetPC();
2016-09-02 05:07:14 +02:00
}
2018-01-09 22:33:46 +01:00
u64 ARM_Dynarmic::GetReg(int index) const {
return jit->GetRegister(index);
2016-09-02 05:07:14 +02:00
}
2018-01-09 22:33:46 +01:00
void ARM_Dynarmic::SetReg(int index, u64 value) {
jit->SetRegister(index, value);
2016-09-02 05:07:14 +02:00
}
u128 ARM_Dynarmic::GetVectorReg(int index) const {
return jit->GetVector(index);
2016-09-02 05:07:14 +02:00
}
void ARM_Dynarmic::SetVectorReg(int index, u128 value) {
jit->SetVector(index, value);
2016-09-02 05:07:14 +02:00
}
u32 ARM_Dynarmic::GetPSTATE() const {
return jit->GetPstate();
2016-09-02 05:07:14 +02:00
}
void ARM_Dynarmic::SetPSTATE(u32 pstate) {
jit->SetPstate(pstate);
2016-09-02 05:07:14 +02:00
}
2018-01-09 22:33:46 +01:00
u64 ARM_Dynarmic::GetTlsAddress() const {
return cb->tpidrro_el0;
}
void ARM_Dynarmic::SetTlsAddress(VAddr address) {
cb->tpidrro_el0 = address;
}
u64 ARM_Dynarmic::GetTPIDR_EL0() const {
return cb->tpidr_el0;
}
void ARM_Dynarmic::SetTPIDR_EL0(u64 value) {
cb->tpidr_el0 = value;
}
void ARM_Dynarmic::SaveContext(ThreadContext& ctx) {
ctx.cpu_registers = jit->GetRegisters();
ctx.sp = jit->GetSP();
ctx.pc = jit->GetPC();
ctx.pstate = jit->GetPstate();
ctx.vector_registers = jit->GetVectors();
ctx.fpcr = jit->GetFpcr();
ctx.fpsr = jit->GetFpsr();
ctx.tpidr = cb->tpidr_el0;
2016-09-02 05:07:14 +02:00
}
void ARM_Dynarmic::LoadContext(const ThreadContext& ctx) {
jit->SetRegisters(ctx.cpu_registers);
jit->SetSP(ctx.sp);
jit->SetPC(ctx.pc);
jit->SetPstate(ctx.pstate);
jit->SetVectors(ctx.vector_registers);
jit->SetFpcr(ctx.fpcr);
jit->SetFpsr(ctx.fpsr);
SetTPIDR_EL0(ctx.tpidr);
2016-09-02 05:07:14 +02:00
}
void ARM_Dynarmic::PrepareReschedule() {
jit->HaltExecution();
2016-09-02 05:07:14 +02:00
}
void ARM_Dynarmic::ClearInstructionCache() {
jit->ClearCache();
2016-09-02 05:07:14 +02:00
}
void ARM_Dynarmic::ClearExclusiveState() {
jit->ClearExclusiveState();
}
core/cpu_core_manager: Create threads separately from initialization. Our initialization process is a little wonky than one would expect when it comes to code flow. We initialize the CPU last, as opposed to hardware, where the CPU obviously needs to be first, otherwise nothing else would work, and we have code that adds checks to get around this. For example, in the page table setting code, we check to see if the system is turned on before we even notify the CPU instances of a page table switch. This results in dead code (at the moment), because the only time a page table switch will occur is when the system is *not* running, preventing the emulated CPU instances from being notified of a page table switch in a convenient manner (technically the code path could be taken, but we don't emulate the process creation svc handlers yet). This moves the threads creation into its own member function of the core manager and restores a little order (and predictability) to our initialization process. Previously, in the multi-threaded cases, we'd kick off several threads before even the main kernel process was created and ready to execute (gross!). Now the initialization process is like so: Initialization: 1. Timers 2. CPU 3. Kernel 4. Filesystem stuff (kind of gross, but can be amended trivially) 5. Applet stuff (ditto in terms of being kind of gross) 6. Main process (will be moved into the loading step in a following change) 7. Telemetry (this should be initialized last in the future). 8. Services (4 and 5 should ideally be alongside this). 9. GDB (gross. Uses namespace scope state. Needs to be refactored into a class or booted altogether). 10. Renderer 11. GPU (will also have its threads created in a separate step in a following change). Which... isn't *ideal* per-se, however getting rid of the wonky intertwining of CPU state initialization out of this mix gets rid of most of the footguns when it comes to our initialization process.
2019-04-09 19:25:54 +02:00
void ARM_Dynarmic::PageTableChanged(Common::PageTable& page_table,
std::size_t new_address_space_size_in_bits) {
jit = MakeJit(page_table, new_address_space_size_in_bits);
}
2018-07-03 15:28:46 +02:00
DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(std::size_t core_count) : monitor(core_count) {}
2018-07-03 15:28:46 +02:00
DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default;
void DynarmicExclusiveMonitor::SetExclusive(std::size_t core_index, VAddr addr) {
2018-07-03 15:28:46 +02:00
// Size doesn't actually matter.
monitor.Mark(core_index, addr, 16);
}
void DynarmicExclusiveMonitor::ClearExclusive() {
monitor.Clear();
}
bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
2018-07-03 15:28:46 +02:00
return monitor.DoExclusiveOperation(core_index, vaddr, 1,
[&] { Memory::Write8(vaddr, value); });
}
bool DynarmicExclusiveMonitor::ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) {
2018-07-03 15:28:46 +02:00
return monitor.DoExclusiveOperation(core_index, vaddr, 2,
[&] { Memory::Write16(vaddr, value); });
}
bool DynarmicExclusiveMonitor::ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) {
2018-07-03 15:28:46 +02:00
return monitor.DoExclusiveOperation(core_index, vaddr, 4,
[&] { Memory::Write32(vaddr, value); });
}
bool DynarmicExclusiveMonitor::ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) {
2018-07-03 15:28:46 +02:00
return monitor.DoExclusiveOperation(core_index, vaddr, 8,
[&] { Memory::Write64(vaddr, value); });
}
bool DynarmicExclusiveMonitor::ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) {
2018-07-03 15:28:46 +02:00
return monitor.DoExclusiveOperation(core_index, vaddr, 16, [&] {
Memory::Write64(vaddr + 0, value[0]);
Memory::Write64(vaddr + 8, value[1]);
2018-07-03 15:28:46 +02:00
});
}
} // namespace Core