1
0
Fork 0
forked from suyu/suyu

Kernel: Address Feedback.

This commit is contained in:
Fernando Sahmkow 2019-10-12 10:13:25 -04:00 committed by FernandoS27
parent 25f8606a6d
commit 3073615dbc
6 changed files with 98 additions and 67 deletions

View file

@ -21,11 +21,11 @@ namespace Kernel {
class AddressArbiter; class AddressArbiter;
class ClientPort; class ClientPort;
class GlobalScheduler;
class HandleTable; class HandleTable;
class Process; class Process;
class ResourceLimit; class ResourceLimit;
class Thread; class Thread;
class GlobalScheduler;
/// Represents a single instance of the kernel. /// Represents a single instance of the kernel.
class KernelCore { class KernelCore {

View file

@ -23,7 +23,7 @@
namespace Kernel { namespace Kernel {
GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} { GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} {
reselection_pending = false; is_reselection_pending = false;
} }
void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { void GlobalScheduler::AddThread(SharedPtr<Thread> thread) {
@ -61,7 +61,7 @@ void GlobalScheduler::SelectThread(u32 core) {
} }
sched.selected_thread = thread; sched.selected_thread = thread;
} }
sched.context_switch_pending = sched.selected_thread != sched.current_thread; sched.is_context_switch_pending = sched.selected_thread != sched.current_thread;
std::atomic_thread_fence(std::memory_order_seq_cst); std::atomic_thread_fence(std::memory_order_seq_cst);
}; };
Scheduler& sched = system.Scheduler(core); Scheduler& sched = system.Scheduler(core);
@ -318,10 +318,18 @@ void GlobalScheduler::PreemptThreads() {
} }
} }
reselection_pending.store(true, std::memory_order_release); is_reselection_pending.store(true, std::memory_order_release);
} }
} }
void GlobalScheduler::Suggest(u32 priority, u32 core, Thread* thread) {
suggested_queue[core].add(thread, priority);
}
void GlobalScheduler::Unsuggest(u32 priority, u32 core, Thread* thread) {
suggested_queue[core].remove(thread, priority);
}
void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
scheduled_queue[core].add(thread, priority); scheduled_queue[core].add(thread, priority);
@ -332,12 +340,40 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
scheduled_queue[core].add(thread, priority, false); scheduled_queue[core].add(thread, priority, false);
} }
void GlobalScheduler::Reschedule(u32 priority, u32 core, Thread* thread) {
scheduled_queue[core].remove(thread, priority);
scheduled_queue[core].add(thread, priority);
}
void GlobalScheduler::Unschedule(u32 priority, u32 core, Thread* thread) {
scheduled_queue[core].remove(thread, priority);
}
void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
const s32 source_core = thread->GetProcessorID();
if (source_core == destination_core || !schedulable) {
return;
}
thread->SetProcessorID(destination_core);
if (source_core >= 0) {
Unschedule(priority, source_core, thread);
}
if (destination_core >= 0) {
Unsuggest(priority, destination_core, thread);
Schedule(priority, destination_core, thread);
}
if (source_core >= 0) {
Suggest(priority, source_core, thread);
}
}
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
if (current_thread == winner) { if (current_thread == winner) {
current_thread->IncrementYieldCount(); current_thread->IncrementYieldCount();
return true; return true;
} else { } else {
reselection_pending.store(true, std::memory_order_release); is_reselection_pending.store(true, std::memory_order_release);
return false; return false;
} }
} }
@ -378,7 +414,7 @@ u64 Scheduler::GetLastContextSwitchTicks() const {
} }
void Scheduler::TryDoContextSwitch() { void Scheduler::TryDoContextSwitch() {
if (context_switch_pending) { if (is_context_switch_pending ) {
SwitchContext(); SwitchContext();
} }
} }
@ -409,7 +445,7 @@ void Scheduler::SwitchContext() {
Thread* const previous_thread = GetCurrentThread(); Thread* const previous_thread = GetCurrentThread();
Thread* const new_thread = GetSelectedThread(); Thread* const new_thread = GetSelectedThread();
context_switch_pending = false; is_context_switch_pending = false;
if (new_thread == previous_thread) { if (new_thread == previous_thread) {
return; return;
} }
@ -477,4 +513,9 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
last_context_switch_time = most_recent_switch_ticks; last_context_switch_time = most_recent_switch_ticks;
} }
void Scheduler::Shutdown() {
current_thread = nullptr;
selected_thread = nullptr;
}
} // namespace Kernel } // namespace Kernel

View file

@ -39,15 +39,11 @@ public:
// Add a thread to the suggested queue of a cpu core. Suggested threads may be // Add a thread to the suggested queue of a cpu core. Suggested threads may be
// picked if no thread is scheduled to run on the core. // picked if no thread is scheduled to run on the core.
void Suggest(u32 priority, u32 core, Thread* thread) { void Suggest(u32 priority, u32 core, Thread* thread);
suggested_queue[core].add(thread, priority);
}
// Remove a thread to the suggested queue of a cpu core. Suggested threads may be // Remove a thread to the suggested queue of a cpu core. Suggested threads may be
// picked if no thread is scheduled to run on the core. // picked if no thread is scheduled to run on the core.
void Unsuggest(u32 priority, u32 core, Thread* thread) { void Unsuggest(u32 priority, u32 core, Thread* thread);
suggested_queue[core].remove(thread, priority);
}
// Add a thread to the scheduling queue of a cpu core. The thread is added at the // Add a thread to the scheduling queue of a cpu core. The thread is added at the
// back the queue in its priority level // back the queue in its priority level
@ -58,37 +54,15 @@ public:
void SchedulePrepend(u32 priority, u32 core, Thread* thread); void SchedulePrepend(u32 priority, u32 core, Thread* thread);
// Reschedule an already scheduled thread based on a new priority // Reschedule an already scheduled thread based on a new priority
void Reschedule(u32 priority, u32 core, Thread* thread) { void Reschedule(u32 priority, u32 core, Thread* thread);
scheduled_queue[core].remove(thread, priority);
scheduled_queue[core].add(thread, priority);
}
// Unschedule a thread. // Unschedule a thread.
void Unschedule(u32 priority, u32 core, Thread* thread) { void Unschedule(u32 priority, u32 core, Thread* thread);
scheduled_queue[core].remove(thread, priority);
}
// Transfers a thread into an specific core. If the destination_core is -1 // Transfers a thread into an specific core. If the destination_core is -1
// it will be unscheduled from its source code and added into its suggested // it will be unscheduled from its source code and added into its suggested
// queue. // queue.
void TransferToCore(u32 priority, s32 destination_core, Thread* thread) { void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
const s32 source_core = thread->GetProcessorID();
if (source_core == destination_core || !schedulable) {
return;
}
thread->SetProcessorID(destination_core);
if (source_core >= 0) {
Unschedule(priority, source_core, thread);
}
if (destination_core >= 0) {
Unsuggest(priority, destination_core, thread);
Schedule(priority, destination_core, thread);
}
if (source_core >= 0) {
Suggest(priority, source_core, thread);
}
}
/* /*
* UnloadThread selects a core and forces it to unload its current thread's context * UnloadThread selects a core and forces it to unload its current thread's context
@ -133,6 +107,12 @@ public:
*/ */
bool YieldThreadAndWaitForLoadBalancing(Thread* thread); bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
/*
* PreemptThreads this operation rotates the scheduling queues of threads at
* a preemption priority and then does some core rebalancing. Preemption priorities
* can be found in the array 'preemption_priorities'. This operation happens
* every 10ms.
*/
void PreemptThreads(); void PreemptThreads();
u32 CpuCoresCount() const { u32 CpuCoresCount() const {
@ -140,11 +120,11 @@ public:
} }
void SetReselectionPending() { void SetReselectionPending() {
reselection_pending.store(true, std::memory_order_release); is_reselection_pending.store(true, std::memory_order_release);
} }
bool IsReselectionPending() const { bool IsReselectionPending() const {
return reselection_pending.load(); return is_reselection_pending.load(std::memory_order_acquire);
} }
void Shutdown(); void Shutdown();
@ -155,8 +135,10 @@ private:
static constexpr u32 min_regular_priority = 2; static constexpr u32 min_regular_priority = 2;
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue; std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue;
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue; std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue;
std::atomic<bool> reselection_pending; std::atomic<bool> is_reselection_pending;
// `preemption_priorities` are the priority levels at which the global scheduler
// preempts threads every 10 ms. They are ordered from Core 0 to Core 3
std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
/// Lists all thread ids that aren't deleted/etc. /// Lists all thread ids that aren't deleted/etc.
@ -166,7 +148,7 @@ private:
class Scheduler final { class Scheduler final {
public: public:
explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, const u32 core_id); explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id);
~Scheduler(); ~Scheduler();
/// Returns whether there are any threads that are ready to run. /// Returns whether there are any threads that are ready to run.
@ -175,26 +157,27 @@ public:
/// Reschedules to the next available thread (call after current thread is suspended) /// Reschedules to the next available thread (call after current thread is suspended)
void TryDoContextSwitch(); void TryDoContextSwitch();
/// Unloads currently running thread
void UnloadThread(); void UnloadThread();
/// Select the threads in top of the scheduling multilist.
void SelectThreads(); void SelectThreads();
/// Gets the current running thread /// Gets the current running thread
Thread* GetCurrentThread() const; Thread* GetCurrentThread() const;
/// Gets the currently selected thread from the top of the multilevel queue
Thread* GetSelectedThread() const; Thread* GetSelectedThread() const;
/// Gets the timestamp for the last context switch in ticks. /// Gets the timestamp for the last context switch in ticks.
u64 GetLastContextSwitchTicks() const; u64 GetLastContextSwitchTicks() const;
bool ContextSwitchPending() const { bool ContextSwitchPending() const {
return context_switch_pending; return is_context_switch_pending;
} }
void Shutdown() { /// Shutdowns the scheduler.
current_thread = nullptr; void Shutdown();
selected_thread = nullptr;
}
private: private:
friend class GlobalScheduler; friend class GlobalScheduler;
@ -226,7 +209,7 @@ private:
u64 idle_selection_count = 0; u64 idle_selection_count = 0;
const u32 core_id; const u32 core_id;
bool context_switch_pending = false; bool is_context_switch_pending = false;
}; };
} // namespace Kernel } // namespace Kernel

View file

@ -1556,18 +1556,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
auto& scheduler = system.CurrentScheduler(); auto& scheduler = system.CurrentScheduler();
auto* const current_thread = scheduler.GetCurrentThread(); auto* const current_thread = scheduler.GetCurrentThread();
bool redundant = false; bool is_redundant = false;
if (nanoseconds <= 0) { if (nanoseconds <= 0) {
switch (static_cast<SleepType>(nanoseconds)) { switch (static_cast<SleepType>(nanoseconds)) {
case SleepType::YieldWithoutLoadBalancing: case SleepType::YieldWithoutLoadBalancing:
redundant = current_thread->YieldSimple(); is_redundant = current_thread->YieldSimple();
break; break;
case SleepType::YieldWithLoadBalancing: case SleepType::YieldWithLoadBalancing:
redundant = current_thread->YieldAndBalanceLoad(); is_redundant = current_thread->YieldAndBalanceLoad();
break; break;
case SleepType::YieldAndWaitForLoadBalancing: case SleepType::YieldAndWaitForLoadBalancing:
redundant = current_thread->YieldAndWaitForLoadBalancing(); is_redundant = current_thread->YieldAndWaitForLoadBalancing();
break; break;
default: default:
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
@ -1576,9 +1576,9 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
current_thread->Sleep(nanoseconds); current_thread->Sleep(nanoseconds);
} }
if (redundant) { if (is_redundant) {
// If it's redundant, the core is pretty much idle. Some games keep idling // If it's redundant, the core is pretty much idle. Some games keep idling
// a core while it's doing nothing, we advance timing to avoid costly continuos // a core while it's doing nothing, we advance timing to avoid costly continuous
// calls. // calls.
system.CoreTiming().AddTicks(2000); system.CoreTiming().AddTicks(2000);
} }

View file

@ -389,13 +389,13 @@ bool Thread::YieldAndWaitForLoadBalancing() {
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
const u32 old_flags = scheduling_state; const u32 old_flags = scheduling_state;
scheduling_state = scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
(scheduling_state & ThreadSchedMasks::HighMask) | static_cast<u32>(new_status); static_cast<u32>(new_status);
AdjustSchedulingOnStatus(old_flags); AdjustSchedulingOnStatus(old_flags);
} }
void Thread::SetCurrentPriority(u32 new_priority) { void Thread::SetCurrentPriority(u32 new_priority) {
u32 old_priority = std::exchange(current_priority, new_priority); const u32 old_priority = std::exchange(current_priority, new_priority);
AdjustSchedulingOnPriority(old_priority); AdjustSchedulingOnPriority(old_priority);
} }
@ -410,10 +410,9 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
}; };
const bool use_override = affinity_override_count != 0; const bool use_override = affinity_override_count != 0;
// The value -3 is "do not change the ideal core". if (new_core == static_cast<s32>(CoreFlags::DontChangeIdealCore)) {
if (new_core == -3) {
new_core = use_override ? ideal_core_override : ideal_core; new_core = use_override ? ideal_core_override : ideal_core;
if ((new_affinity_mask & (1 << new_core)) == 0) { if ((new_affinity_mask & (1ULL << new_core)) == 0) {
return ERR_INVALID_COMBINATION; return ERR_INVALID_COMBINATION;
} }
} }
@ -444,14 +443,14 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
} }
auto& scheduler = kernel.GlobalScheduler(); auto& scheduler = kernel.GlobalScheduler();
if (static_cast<ThreadSchedStatus>(old_flags & ThreadSchedMasks::LowMask) == if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) ==
ThreadSchedStatus::Runnable) { ThreadSchedStatus::Runnable) {
// In this case the thread was running, now it's pausing/exitting // In this case the thread was running, now it's pausing/exitting
if (processor_id >= 0) { if (processor_id >= 0) {
scheduler.Unschedule(current_priority, processor_id, this); scheduler.Unschedule(current_priority, processor_id, this);
} }
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
scheduler.Unsuggest(current_priority, core, this); scheduler.Unsuggest(current_priority, core, this);
} }
@ -462,7 +461,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
scheduler.Schedule(current_priority, processor_id, this); scheduler.Schedule(current_priority, processor_id, this);
} }
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
scheduler.Suggest(current_priority, core, this); scheduler.Suggest(current_priority, core, this);
} }

View file

@ -82,19 +82,25 @@ enum class ThreadSchedStatus : u32 {
Exited = 3, Exited = 3,
}; };
enum ThreadSchedFlags : u32 { enum class ThreadSchedFlags : u32 {
ProcessPauseFlag = 1 << 4, ProcessPauseFlag = 1 << 4,
ThreadPauseFlag = 1 << 5, ThreadPauseFlag = 1 << 5,
ProcessDebugPauseFlag = 1 << 6, ProcessDebugPauseFlag = 1 << 6,
KernelInitPauseFlag = 1 << 8, KernelInitPauseFlag = 1 << 8,
}; };
enum ThreadSchedMasks : u32 { enum class ThreadSchedMasks : u32 {
LowMask = 0x000f, LowMask = 0x000f,
HighMask = 0xfff0, HighMask = 0xfff0,
ForcePauseMask = 0x0070, ForcePauseMask = 0x0070,
}; };
enum class CoreFlags : s32 {
IgnoreIdealCore = -1,
ProcessIdealCore = -2,
DontChangeIdealCore = -3,
};
class Thread final : public WaitObject { class Thread final : public WaitObject {
public: public:
using MutexWaitingThreads = std::vector<SharedPtr<Thread>>; using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
@ -428,7 +434,8 @@ public:
} }
ThreadSchedStatus GetSchedulingStatus() const { ThreadSchedStatus GetSchedulingStatus() const {
return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask); return static_cast<ThreadSchedStatus>(scheduling_state &
static_cast<u32>(ThreadSchedMasks::LowMask));
} }
bool IsRunning() const { bool IsRunning() const {
@ -471,7 +478,8 @@ private:
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
u64 last_running_ticks = 0; ///< CPU tick when thread was last running u64 last_running_ticks = 0; ///< CPU tick when thread was last running
u64 yield_count = 0; ///< Number of innecessaries yields occured. u64 yield_count = 0; ///< Number of redundant yields carried by this thread.
///< a redundant yield is one where no scheduling is changed
s32 processor_id = 0; s32 processor_id = 0;