2018-02-18 20:58:40 +01:00
|
|
|
// Copyright 2018 yuzu emulator team
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
2019-12-08 04:09:20 +01:00
|
|
|
#include <atomic>
|
|
|
|
#include <memory>
|
2020-02-14 16:44:31 +01:00
|
|
|
#include <mutex>
|
2018-02-18 20:58:40 +01:00
|
|
|
#include <vector>
|
2019-12-08 04:09:20 +01:00
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
#include "common/common_types.h"
|
2019-03-16 05:30:15 +01:00
|
|
|
#include "common/multi_level_queue.h"
|
2020-02-12 00:56:24 +01:00
|
|
|
#include "core/hardware_properties.h"
|
2018-02-18 20:58:40 +01:00
|
|
|
#include "core/hle/kernel/thread.h"
|
|
|
|
|
2018-08-25 03:43:32 +02:00
|
|
|
namespace Core {
|
2018-07-31 14:06:09 +02:00
|
|
|
class ARM_Interface;
|
2019-03-04 22:02:59 +01:00
|
|
|
class System;
|
|
|
|
} // namespace Core
|
2018-07-31 14:06:09 +02:00
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
namespace Kernel {
|
|
|
|
|
2020-02-14 03:04:10 +01:00
|
|
|
class KernelCore;
|
2018-10-26 00:42:50 +02:00
|
|
|
class Process;
|
2020-02-14 16:44:31 +01:00
|
|
|
class SchedulerLock;
|
2018-10-26 00:42:50 +02:00
|
|
|
|
2019-03-29 22:01:17 +01:00
|
|
|
class GlobalScheduler final {
|
2018-02-18 20:58:40 +01:00
|
|
|
public:
|
2020-02-14 03:04:10 +01:00
|
|
|
explicit GlobalScheduler(KernelCore& kernel);
|
2019-03-29 22:01:17 +01:00
|
|
|
~GlobalScheduler();
|
2019-10-28 03:01:45 +01:00
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
/// Adds a new thread to the scheduler
|
2019-11-25 02:15:51 +01:00
|
|
|
void AddThread(std::shared_ptr<Thread> thread);
|
2018-02-18 20:58:40 +01:00
|
|
|
|
|
|
|
/// Removes a thread from the scheduler
|
2019-11-25 02:15:51 +01:00
|
|
|
void RemoveThread(std::shared_ptr<Thread> thread);
|
2018-02-18 20:58:40 +01:00
|
|
|
|
2019-03-29 22:01:17 +01:00
|
|
|
/// Returns a list of all threads managed by the scheduler
|
2019-11-25 02:15:51 +01:00
|
|
|
const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
|
2019-03-29 22:01:17 +01:00
|
|
|
return thread_list;
|
|
|
|
}
|
2018-02-18 20:58:40 +01:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/**
|
|
|
|
* Add a thread to the suggested queue of a cpu core. Suggested threads may be
|
|
|
|
* picked if no thread is scheduled to run on the core.
|
|
|
|
*/
|
2019-11-12 09:32:53 +01:00
|
|
|
void Suggest(u32 priority, std::size_t core, Thread* thread);
|
2018-02-18 20:58:40 +01:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/**
|
|
|
|
* Remove a thread to the suggested queue of a cpu core. Suggested threads may be
|
|
|
|
* picked if no thread is scheduled to run on the core.
|
|
|
|
*/
|
2019-11-12 09:32:53 +01:00
|
|
|
void Unsuggest(u32 priority, std::size_t core, Thread* thread);
|
2018-02-18 20:58:40 +01:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/**
|
|
|
|
* Add a thread to the scheduling queue of a cpu core. The thread is added at the
|
|
|
|
* back the queue in its priority level.
|
|
|
|
*/
|
2019-11-12 09:32:53 +01:00
|
|
|
void Schedule(u32 priority, std::size_t core, Thread* thread);
|
2018-11-22 06:33:53 +01:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/**
|
|
|
|
* Add a thread to the scheduling queue of a cpu core. The thread is added at the
|
|
|
|
* front the queue in its priority level.
|
|
|
|
*/
|
2019-11-12 09:32:53 +01:00
|
|
|
void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
|
2018-11-22 06:33:53 +01:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/// Reschedule an already scheduled thread based on a new priority
|
2019-11-12 09:32:53 +01:00
|
|
|
void Reschedule(u32 priority, std::size_t core, Thread* thread);
|
2018-11-22 06:33:53 +01:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/// Unschedules a thread.
|
2019-11-12 09:32:53 +01:00
|
|
|
void Unschedule(u32 priority, std::size_t core, Thread* thread);
|
2019-03-29 22:01:17 +01:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/// Selects a core and forces it to unload its current thread's context
|
2019-11-12 09:32:53 +01:00
|
|
|
void UnloadThread(std::size_t core);
|
2019-03-29 22:01:17 +01:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/**
|
|
|
|
* Takes care of selecting the new scheduled thread in three steps:
|
|
|
|
*
|
|
|
|
* 1. First a thread is selected from the top of the priority queue. If no thread
|
|
|
|
* is obtained then we move to step two, else we are done.
|
|
|
|
*
|
|
|
|
* 2. Second we try to get a suggested thread that's not assigned to any core or
|
|
|
|
* that is not the top thread in that core.
|
|
|
|
*
|
|
|
|
* 3. Third is no suggested thread is found, we do a second pass and pick a running
|
|
|
|
* thread in another core and swap it with its current thread.
|
2019-04-02 14:03:44 +02:00
|
|
|
*/
|
2019-11-12 09:32:53 +01:00
|
|
|
void SelectThread(std::size_t core);
|
2019-03-29 22:01:17 +01:00
|
|
|
|
2019-11-12 09:32:53 +01:00
|
|
|
bool HaveReadyThreads(std::size_t core_id) const {
|
2019-03-29 22:01:17 +01:00
|
|
|
return !scheduled_queue[core_id].empty();
|
|
|
|
}
|
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/**
|
|
|
|
* Takes a thread and moves it to the back of the it's priority list.
|
|
|
|
*
|
|
|
|
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
2019-04-02 14:03:44 +02:00
|
|
|
*/
|
2019-09-10 16:23:43 +02:00
|
|
|
bool YieldThread(Thread* thread);
|
2019-04-02 14:03:44 +02:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/**
|
|
|
|
* Takes a thread and moves it to the back of the it's priority list.
|
2019-04-02 14:03:44 +02:00
|
|
|
* Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
|
|
|
|
* a better priority than the next thread in the core.
|
2019-10-28 03:01:45 +01:00
|
|
|
*
|
|
|
|
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
2019-04-02 14:03:44 +02:00
|
|
|
*/
|
2019-09-10 16:23:43 +02:00
|
|
|
bool YieldThreadAndBalanceLoad(Thread* thread);
|
2019-04-02 14:03:44 +02:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/**
|
|
|
|
* Takes a thread and moves it out of the scheduling queue.
|
|
|
|
* and into the suggested queue. If no thread can be scheduled afterwards in that core,
|
2019-04-02 14:03:44 +02:00
|
|
|
* a suggested thread is obtained instead.
|
2019-10-28 03:01:45 +01:00
|
|
|
*
|
|
|
|
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
2019-04-02 14:03:44 +02:00
|
|
|
*/
|
2019-09-10 16:23:43 +02:00
|
|
|
bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
|
2019-03-29 22:01:17 +01:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
/**
|
|
|
|
* Rotates the scheduling queues of threads at a preemption priority and then does
|
|
|
|
* some core rebalancing. Preemption priorities can be found in the array
|
|
|
|
* 'preemption_priorities'.
|
|
|
|
*
|
|
|
|
* @note This operation happens every 10ms.
|
2019-10-12 16:13:25 +02:00
|
|
|
*/
|
2019-09-10 17:04:40 +02:00
|
|
|
void PreemptThreads();
|
|
|
|
|
2019-03-29 22:01:17 +01:00
|
|
|
u32 CpuCoresCount() const {
|
2020-02-12 00:56:24 +01:00
|
|
|
return Core::Hardware::NUM_CPU_CORES;
|
2019-03-29 22:01:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void SetReselectionPending() {
|
2019-10-12 16:13:25 +02:00
|
|
|
is_reselection_pending.store(true, std::memory_order_release);
|
2019-03-29 22:01:17 +01:00
|
|
|
}
|
|
|
|
|
2019-06-19 15:11:18 +02:00
|
|
|
bool IsReselectionPending() const {
|
2019-10-12 16:13:25 +02:00
|
|
|
return is_reselection_pending.load(std::memory_order_acquire);
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
|
2019-10-12 14:21:51 +02:00
|
|
|
void Shutdown();
|
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
private:
|
2020-02-14 16:44:31 +01:00
|
|
|
friend class SchedulerLock;
|
|
|
|
|
|
|
|
/// Lock the scheduler to the current thread.
|
|
|
|
void Lock();
|
|
|
|
|
|
|
|
/// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
|
|
|
|
/// and reschedules current core if needed.
|
|
|
|
void Unlock();
|
2019-11-12 09:32:53 +01:00
|
|
|
/**
|
|
|
|
* Transfers a thread into an specific core. If the destination_core is -1
|
|
|
|
* it will be unscheduled from its source code and added into its suggested
|
|
|
|
* queue.
|
|
|
|
*/
|
|
|
|
void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
|
|
|
|
|
2019-10-28 03:39:20 +01:00
|
|
|
bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner);
|
2019-03-29 22:01:17 +01:00
|
|
|
|
|
|
|
static constexpr u32 min_regular_priority = 2;
|
2020-02-12 00:56:24 +01:00
|
|
|
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
|
|
|
|
scheduled_queue;
|
|
|
|
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
|
|
|
|
suggested_queue;
|
2019-10-28 03:13:51 +01:00
|
|
|
std::atomic<bool> is_reselection_pending{false};
|
2019-03-29 22:01:17 +01:00
|
|
|
|
2019-10-28 03:01:45 +01:00
|
|
|
// The priority levels at which the global scheduler preempts threads every 10 ms. They are
|
|
|
|
// ordered from Core 0 to Core 3.
|
2020-02-12 00:56:24 +01:00
|
|
|
std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
|
2019-09-10 17:04:40 +02:00
|
|
|
|
2020-02-14 16:44:31 +01:00
|
|
|
/// Scheduler lock mechanisms.
|
|
|
|
std::mutex inner_lock{}; // TODO(Blinkhawk): Replace for a SpinLock
|
2020-02-22 15:27:40 +01:00
|
|
|
std::atomic<s64> scope_lock{};
|
2020-02-14 16:44:31 +01:00
|
|
|
Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
|
|
|
|
|
2019-03-29 22:01:17 +01:00
|
|
|
/// Lists all thread ids that aren't deleted/etc.
|
2019-11-25 02:15:51 +01:00
|
|
|
std::vector<std::shared_ptr<Thread>> thread_list;
|
2020-02-14 03:04:10 +01:00
|
|
|
KernelCore& kernel;
|
2019-03-29 22:01:17 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
class Scheduler final {
|
|
|
|
public:
|
2020-03-02 05:46:10 +01:00
|
|
|
explicit Scheduler(Core::System& system, std::size_t core_id);
|
2019-03-29 22:01:17 +01:00
|
|
|
~Scheduler();
|
|
|
|
|
|
|
|
/// Returns whether there are any threads that are ready to run.
|
|
|
|
bool HaveReadyThreads() const;
|
|
|
|
|
|
|
|
/// Reschedules to the next available thread (call after current thread is suspended)
|
|
|
|
void TryDoContextSwitch();
|
|
|
|
|
2019-10-12 16:13:25 +02:00
|
|
|
/// Unloads currently running thread
|
2019-03-29 22:01:17 +01:00
|
|
|
void UnloadThread();
|
|
|
|
|
2019-10-12 16:13:25 +02:00
|
|
|
/// Select the threads in top of the scheduling multilist.
|
2019-03-29 22:01:17 +01:00
|
|
|
void SelectThreads();
|
|
|
|
|
|
|
|
/// Gets the current running thread
|
|
|
|
Thread* GetCurrentThread() const;
|
|
|
|
|
2019-10-12 16:13:25 +02:00
|
|
|
/// Gets the currently selected thread from the top of the multilevel queue
|
2019-03-29 22:01:17 +01:00
|
|
|
Thread* GetSelectedThread() const;
|
|
|
|
|
|
|
|
/// Gets the timestamp for the last context switch in ticks.
|
|
|
|
u64 GetLastContextSwitchTicks() const;
|
|
|
|
|
|
|
|
bool ContextSwitchPending() const {
|
2019-10-12 16:13:25 +02:00
|
|
|
return is_context_switch_pending;
|
2019-03-29 22:01:17 +01:00
|
|
|
}
|
2018-02-18 20:58:40 +01:00
|
|
|
|
2019-10-12 16:13:25 +02:00
|
|
|
/// Shutdowns the scheduler.
|
|
|
|
void Shutdown();
|
2019-10-12 14:21:51 +02:00
|
|
|
|
2019-03-29 22:01:17 +01:00
|
|
|
private:
|
|
|
|
friend class GlobalScheduler;
|
2019-10-28 03:01:45 +01:00
|
|
|
|
|
|
|
/// Switches the CPU's active thread context to that of the specified thread
|
2019-03-29 22:01:17 +01:00
|
|
|
void SwitchContext();
|
2018-02-18 20:58:40 +01:00
|
|
|
|
2018-10-26 00:42:50 +02:00
|
|
|
/**
|
|
|
|
* Called on every context switch to update the internal timestamp
|
|
|
|
* This also updates the running time ticks for the given thread and
|
|
|
|
* process using the following difference:
|
|
|
|
*
|
|
|
|
* ticks += most_recent_ticks - last_context_switch_ticks
|
|
|
|
*
|
|
|
|
* The internal tick timestamp for the scheduler is simply the
|
|
|
|
* most recent tick count retrieved. No special arithmetic is
|
|
|
|
* applied to it.
|
|
|
|
*/
|
|
|
|
void UpdateLastContextSwitchTime(Thread* thread, Process* process);
|
|
|
|
|
2019-11-25 02:15:51 +01:00
|
|
|
std::shared_ptr<Thread> current_thread = nullptr;
|
|
|
|
std::shared_ptr<Thread> selected_thread = nullptr;
|
2018-02-18 20:58:40 +01:00
|
|
|
|
2019-03-29 22:01:17 +01:00
|
|
|
Core::System& system;
|
2018-10-26 00:42:50 +02:00
|
|
|
u64 last_context_switch_time = 0;
|
2019-03-29 22:01:17 +01:00
|
|
|
u64 idle_selection_count = 0;
|
2019-11-12 09:32:53 +01:00
|
|
|
const std::size_t core_id;
|
2018-05-08 04:12:45 +02:00
|
|
|
|
2019-10-12 16:13:25 +02:00
|
|
|
bool is_context_switch_pending = false;
|
2018-02-18 20:58:40 +01:00
|
|
|
};
|
|
|
|
|
2020-02-14 16:44:31 +01:00
|
|
|
class SchedulerLock {
|
|
|
|
public:
|
2020-02-22 15:27:40 +01:00
|
|
|
explicit SchedulerLock(KernelCore& kernel);
|
2020-02-14 16:44:31 +01:00
|
|
|
~SchedulerLock();
|
|
|
|
|
|
|
|
protected:
|
|
|
|
KernelCore& kernel;
|
|
|
|
};
|
|
|
|
|
|
|
|
class SchedulerLockAndSleep : public SchedulerLock {
|
|
|
|
public:
|
2020-02-22 15:27:40 +01:00
|
|
|
explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task,
|
|
|
|
s64 nanoseconds);
|
2020-02-14 16:44:31 +01:00
|
|
|
~SchedulerLockAndSleep();
|
|
|
|
|
|
|
|
void CancelSleep() {
|
|
|
|
sleep_cancelled = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Handle& event_handle;
|
|
|
|
Thread* time_task;
|
|
|
|
s64 nanoseconds;
|
|
|
|
bool sleep_cancelled{};
|
|
|
|
};
|
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
} // namespace Kernel
|