core_timing: Make use of uintptr_t to represent user_data
Makes the interface future-proofed for supporting other platforms in the event we ever support platforms with differing pointer sizes. This way, we have a type in place that is always guaranteed to be able to represent a pointer exactly.
This commit is contained in:
parent
6b35317ff3
commit
a7af349dae
15 changed files with 52 additions and 43 deletions
|
@ -36,9 +36,10 @@ Stream::Stream(Core::Timing::CoreTiming& core_timing, u32 sample_rate, Format fo
|
||||||
ReleaseCallback&& release_callback, SinkStream& sink_stream, std::string&& name_)
|
ReleaseCallback&& release_callback, SinkStream& sink_stream, std::string&& name_)
|
||||||
: sample_rate{sample_rate}, format{format}, release_callback{std::move(release_callback)},
|
: sample_rate{sample_rate}, format{format}, release_callback{std::move(release_callback)},
|
||||||
sink_stream{sink_stream}, core_timing{core_timing}, name{std::move(name_)} {
|
sink_stream{sink_stream}, core_timing{core_timing}, name{std::move(name_)} {
|
||||||
|
release_event =
|
||||||
release_event = Core::Timing::CreateEvent(
|
Core::Timing::CreateEvent(name, [this](std::uintptr_t, std::chrono::nanoseconds ns_late) {
|
||||||
name, [this](u64, std::chrono::nanoseconds ns_late) { ReleaseActiveBuffer(ns_late); });
|
ReleaseActiveBuffer(ns_late);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void Stream::Play() {
|
void Stream::Play() {
|
||||||
|
|
|
@ -23,7 +23,7 @@ std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callbac
|
||||||
struct CoreTiming::Event {
|
struct CoreTiming::Event {
|
||||||
u64 time;
|
u64 time;
|
||||||
u64 fifo_order;
|
u64 fifo_order;
|
||||||
u64 userdata;
|
std::uintptr_t user_data;
|
||||||
std::weak_ptr<EventType> type;
|
std::weak_ptr<EventType> type;
|
||||||
|
|
||||||
// Sort by time, unless the times are the same, in which case sort by
|
// Sort by time, unless the times are the same, in which case sort by
|
||||||
|
@ -58,7 +58,7 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||||
event_fifo_id = 0;
|
event_fifo_id = 0;
|
||||||
shutting_down = false;
|
shutting_down = false;
|
||||||
ticks = 0;
|
ticks = 0;
|
||||||
const auto empty_timed_callback = [](u64, std::chrono::nanoseconds) {};
|
const auto empty_timed_callback = [](std::uintptr_t, std::chrono::nanoseconds) {};
|
||||||
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
||||||
if (is_multicore) {
|
if (is_multicore) {
|
||||||
timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
|
timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
|
||||||
|
@ -107,22 +107,24 @@ bool CoreTiming::HasPendingEvents() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||||
const std::shared_ptr<EventType>& event_type, u64 userdata) {
|
const std::shared_ptr<EventType>& event_type,
|
||||||
|
std::uintptr_t user_data) {
|
||||||
{
|
{
|
||||||
std::scoped_lock scope{basic_lock};
|
std::scoped_lock scope{basic_lock};
|
||||||
const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count());
|
const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count());
|
||||||
|
|
||||||
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
|
event_queue.emplace_back(Event{timeout, event_fifo_id++, user_data, event_type});
|
||||||
|
|
||||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||||
}
|
}
|
||||||
event.Set();
|
event.Set();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) {
|
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||||
|
std::uintptr_t user_data) {
|
||||||
std::scoped_lock scope{basic_lock};
|
std::scoped_lock scope{basic_lock};
|
||||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||||
return e.type.lock().get() == event_type.get() && e.userdata == userdata;
|
return e.type.lock().get() == event_type.get() && e.user_data == user_data;
|
||||||
});
|
});
|
||||||
|
|
||||||
// Removing random items breaks the invariant so we have to re-establish it.
|
// Removing random items breaks the invariant so we have to re-establish it.
|
||||||
|
@ -197,7 +199,7 @@ std::optional<s64> CoreTiming::Advance() {
|
||||||
|
|
||||||
if (const auto event_type{evt.type.lock()}) {
|
if (const auto event_type{evt.type.lock()}) {
|
||||||
event_type->callback(
|
event_type->callback(
|
||||||
evt.userdata, std::chrono::nanoseconds{static_cast<s64>(global_timer - evt.time)});
|
evt.user_data, std::chrono::nanoseconds{static_cast<s64>(global_timer - evt.time)});
|
||||||
}
|
}
|
||||||
|
|
||||||
basic_lock.lock();
|
basic_lock.lock();
|
||||||
|
|
|
@ -22,7 +22,8 @@
|
||||||
namespace Core::Timing {
|
namespace Core::Timing {
|
||||||
|
|
||||||
/// A callback that may be scheduled for a particular core timing event.
|
/// A callback that may be scheduled for a particular core timing event.
|
||||||
using TimedCallback = std::function<void(u64 userdata, std::chrono::nanoseconds ns_late)>;
|
using TimedCallback =
|
||||||
|
std::function<void(std::uintptr_t user_data, std::chrono::nanoseconds ns_late)>;
|
||||||
|
|
||||||
/// Contains the characteristics of a particular event.
|
/// Contains the characteristics of a particular event.
|
||||||
struct EventType {
|
struct EventType {
|
||||||
|
@ -94,9 +95,9 @@ public:
|
||||||
|
|
||||||
/// Schedules an event in core timing
|
/// Schedules an event in core timing
|
||||||
void ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
void ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||||
const std::shared_ptr<EventType>& event_type, u64 userdata = 0);
|
const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data = 0);
|
||||||
|
|
||||||
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
|
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data);
|
||||||
|
|
||||||
/// We only permit one event of each type in the queue at a time.
|
/// We only permit one event of each type in the queue at a time.
|
||||||
void RemoveEvent(const std::shared_ptr<EventType>& event_type);
|
void RemoveEvent(const std::shared_ptr<EventType>& event_type);
|
||||||
|
|
|
@ -11,8 +11,8 @@
|
||||||
namespace Core::Hardware {
|
namespace Core::Hardware {
|
||||||
|
|
||||||
InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) {
|
InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) {
|
||||||
gpu_interrupt_event =
|
gpu_interrupt_event = Core::Timing::CreateEvent(
|
||||||
Core::Timing::CreateEvent("GPUInterrupt", [this](u64 message, std::chrono::nanoseconds) {
|
"GPUInterrupt", [this](std::uintptr_t message, std::chrono::nanoseconds) {
|
||||||
auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv");
|
auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv");
|
||||||
const u32 syncpt = static_cast<u32>(message >> 32);
|
const u32 syncpt = static_cast<u32>(message >> 32);
|
||||||
const u32 value = static_cast<u32>(message);
|
const u32 value = static_cast<u32>(message);
|
||||||
|
|
|
@ -145,7 +145,7 @@ struct KernelCore::Impl {
|
||||||
|
|
||||||
void InitializePreemption(KernelCore& kernel) {
|
void InitializePreemption(KernelCore& kernel) {
|
||||||
preemption_event = Core::Timing::CreateEvent(
|
preemption_event = Core::Timing::CreateEvent(
|
||||||
"PreemptionCallback", [this, &kernel](u64, std::chrono::nanoseconds) {
|
"PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
|
||||||
{
|
{
|
||||||
SchedulerLock lock(kernel);
|
SchedulerLock lock(kernel);
|
||||||
global_scheduler.PreemptThreads();
|
global_scheduler.PreemptThreads();
|
||||||
|
|
|
@ -33,8 +33,10 @@ ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kern
|
||||||
std::string name) {
|
std::string name) {
|
||||||
std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)};
|
std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)};
|
||||||
|
|
||||||
session->request_event = Core::Timing::CreateEvent(
|
session->request_event =
|
||||||
name, [session](u64, std::chrono::nanoseconds) { session->CompleteSyncRequest(); });
|
Core::Timing::CreateEvent(name, [session](std::uintptr_t, std::chrono::nanoseconds) {
|
||||||
|
session->CompleteSyncRequest();
|
||||||
|
});
|
||||||
session->name = std::move(name);
|
session->name = std::move(name);
|
||||||
session->parent = std::move(parent);
|
session->parent = std::move(parent);
|
||||||
|
|
||||||
|
|
|
@ -16,14 +16,14 @@ namespace Kernel {
|
||||||
|
|
||||||
TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||||
time_manager_event_type = Core::Timing::CreateEvent(
|
time_manager_event_type = Core::Timing::CreateEvent(
|
||||||
"Kernel::TimeManagerCallback", [this](u64 thread_handle, std::chrono::nanoseconds) {
|
"Kernel::TimeManagerCallback",
|
||||||
SchedulerLock lock(system.Kernel());
|
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
||||||
Handle proper_handle = static_cast<Handle>(thread_handle);
|
const SchedulerLock lock(system.Kernel());
|
||||||
|
const auto proper_handle = static_cast<Handle>(thread_handle);
|
||||||
if (cancelled_events[proper_handle]) {
|
if (cancelled_events[proper_handle]) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
std::shared_ptr<Thread> thread =
|
auto thread = this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
|
||||||
this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
|
|
||||||
thread->OnWakeUp();
|
thread->OnWakeUp();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,8 +77,9 @@ IAppletResource::IAppletResource(Core::System& system)
|
||||||
|
|
||||||
// Register update callbacks
|
// Register update callbacks
|
||||||
pad_update_event = Core::Timing::CreateEvent(
|
pad_update_event = Core::Timing::CreateEvent(
|
||||||
"HID::UpdatePadCallback", [this](u64 userdata, std::chrono::nanoseconds ns_late) {
|
"HID::UpdatePadCallback",
|
||||||
UpdateControllers(userdata, ns_late);
|
[this](std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
|
||||||
|
UpdateControllers(user_data, ns_late);
|
||||||
});
|
});
|
||||||
|
|
||||||
// TODO(shinyquagsire23): Other update callbacks? (accel, gyro?)
|
// TODO(shinyquagsire23): Other update callbacks? (accel, gyro?)
|
||||||
|
@ -108,7 +109,8 @@ void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) {
|
||||||
rb.PushCopyObjects(shared_mem);
|
rb.PushCopyObjects(shared_mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
void IAppletResource::UpdateControllers(u64 userdata, std::chrono::nanoseconds ns_late) {
|
void IAppletResource::UpdateControllers(std::uintptr_t user_data,
|
||||||
|
std::chrono::nanoseconds ns_late) {
|
||||||
auto& core_timing = system.CoreTiming();
|
auto& core_timing = system.CoreTiming();
|
||||||
|
|
||||||
const bool should_reload = Settings::values.is_device_reload_pending.exchange(false);
|
const bool should_reload = Settings::values.is_device_reload_pending.exchange(false);
|
||||||
|
|
|
@ -64,7 +64,7 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx);
|
void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx);
|
||||||
void UpdateControllers(u64 userdata, std::chrono::nanoseconds ns_late);
|
void UpdateControllers(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||||
|
|
||||||
std::shared_ptr<Kernel::SharedMemory> shared_mem;
|
std::shared_ptr<Kernel::SharedMemory> shared_mem;
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ NVFlinger::NVFlinger(Core::System& system) : system(system) {
|
||||||
|
|
||||||
// Schedule the screen composition events
|
// Schedule the screen composition events
|
||||||
composition_event = Core::Timing::CreateEvent(
|
composition_event = Core::Timing::CreateEvent(
|
||||||
"ScreenComposition", [this](u64, std::chrono::nanoseconds ns_late) {
|
"ScreenComposition", [this](std::uintptr_t, std::chrono::nanoseconds ns_late) {
|
||||||
const auto guard = Lock();
|
const auto guard = Lock();
|
||||||
Compose();
|
Compose();
|
||||||
|
|
||||||
|
|
|
@ -188,11 +188,11 @@ CheatEngine::~CheatEngine() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void CheatEngine::Initialize() {
|
void CheatEngine::Initialize() {
|
||||||
event = Core::Timing::CreateEvent("CheatEngine::FrameCallback::" +
|
event = Core::Timing::CreateEvent(
|
||||||
Common::HexToString(metadata.main_nso_build_id),
|
"CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id),
|
||||||
[this](u64 userdata, std::chrono::nanoseconds ns_late) {
|
[this](std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
|
||||||
FrameCallback(userdata, ns_late);
|
FrameCallback(user_data, ns_late);
|
||||||
});
|
});
|
||||||
core_timing.ScheduleEvent(CHEAT_ENGINE_NS, event);
|
core_timing.ScheduleEvent(CHEAT_ENGINE_NS, event);
|
||||||
|
|
||||||
metadata.process_id = system.CurrentProcess()->GetProcessID();
|
metadata.process_id = system.CurrentProcess()->GetProcessID();
|
||||||
|
@ -219,7 +219,7 @@ void CheatEngine::Reload(std::vector<CheatEntry> cheats) {
|
||||||
|
|
||||||
MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70));
|
MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70));
|
||||||
|
|
||||||
void CheatEngine::FrameCallback(u64, std::chrono::nanoseconds ns_late) {
|
void CheatEngine::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) {
|
||||||
if (is_pending_reload.exchange(false)) {
|
if (is_pending_reload.exchange(false)) {
|
||||||
vm.LoadProgram(cheats);
|
vm.LoadProgram(cheats);
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,7 @@ public:
|
||||||
void Reload(std::vector<CheatEntry> cheats);
|
void Reload(std::vector<CheatEntry> cheats);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void FrameCallback(u64 userdata, std::chrono::nanoseconds ns_late);
|
void FrameCallback(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||||
|
|
||||||
DmntCheatVm vm;
|
DmntCheatVm vm;
|
||||||
CheatProcessMetadata metadata;
|
CheatProcessMetadata metadata;
|
||||||
|
|
|
@ -55,10 +55,11 @@ void MemoryWriteWidth(Core::Memory::Memory& memory, u32 width, VAddr addr, u64 v
|
||||||
|
|
||||||
Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_)
|
Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_)
|
||||||
: core_timing{core_timing_}, memory{memory_} {
|
: core_timing{core_timing_}, memory{memory_} {
|
||||||
event = Core::Timing::CreateEvent("MemoryFreezer::FrameCallback",
|
event = Core::Timing::CreateEvent(
|
||||||
[this](u64 userdata, std::chrono::nanoseconds ns_late) {
|
"MemoryFreezer::FrameCallback",
|
||||||
FrameCallback(userdata, ns_late);
|
[this](std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
|
||||||
});
|
FrameCallback(user_data, ns_late);
|
||||||
|
});
|
||||||
core_timing.ScheduleEvent(memory_freezer_ns, event);
|
core_timing.ScheduleEvent(memory_freezer_ns, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,7 +160,7 @@ std::vector<Freezer::Entry> Freezer::GetEntries() const {
|
||||||
return entries;
|
return entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Freezer::FrameCallback(u64, std::chrono::nanoseconds ns_late) {
|
void Freezer::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) {
|
||||||
if (!IsActive()) {
|
if (!IsActive()) {
|
||||||
LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events.");
|
LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events.");
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -73,7 +73,7 @@ public:
|
||||||
std::vector<Entry> GetEntries() const;
|
std::vector<Entry> GetEntries() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void FrameCallback(u64 userdata, std::chrono::nanoseconds ns_late);
|
void FrameCallback(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||||
void FillEntryReads();
|
void FillEntryReads();
|
||||||
|
|
||||||
std::atomic_bool active{false};
|
std::atomic_bool active{false};
|
||||||
|
|
|
@ -25,10 +25,10 @@ std::bitset<CB_IDS.size()> callbacks_ran_flags;
|
||||||
u64 expected_callback = 0;
|
u64 expected_callback = 0;
|
||||||
|
|
||||||
template <unsigned int IDX>
|
template <unsigned int IDX>
|
||||||
void HostCallbackTemplate(u64 userdata, std::chrono::nanoseconds ns_late) {
|
void HostCallbackTemplate(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
|
||||||
static_assert(IDX < CB_IDS.size(), "IDX out of range");
|
static_assert(IDX < CB_IDS.size(), "IDX out of range");
|
||||||
callbacks_ran_flags.set(IDX);
|
callbacks_ran_flags.set(IDX);
|
||||||
REQUIRE(CB_IDS[IDX] == userdata);
|
REQUIRE(CB_IDS[IDX] == user_data);
|
||||||
REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]);
|
REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]);
|
||||||
delays[IDX] = ns_late.count();
|
delays[IDX] = ns_late.count();
|
||||||
++expected_callback;
|
++expected_callback;
|
||||||
|
|
Loading…
Reference in a new issue