1
0
Fork 0
forked from suyu/suyu

Scheduler: Add protections for Yield bombing

In case of redundant yields, the scheduler will now idle the core for 
it's timeslice, in order to avoid continuously yielding the same thing 
over and over.
This commit is contained in:
Fernando Sahmkow 2019-09-10 10:23:43 -04:00 committed by FernandoS27
parent 82218c925a
commit 103f3a2fe5
5 changed files with 31 additions and 24 deletions

View file

@ -118,7 +118,7 @@ void GlobalScheduler::SelectThread(u32 core) {
* YieldThread takes a thread and moves it to the back of the it's priority list
* This operation can be redundant and no scheduling is changed if marked as so.
*/
void GlobalScheduler::YieldThread(Thread* yielding_thread) {
bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
// Note: caller should use critical section, etc.
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
const u32 priority = yielding_thread->GetPriority();
@ -129,7 +129,7 @@ void GlobalScheduler::YieldThread(Thread* yielding_thread) {
scheduled_queue[core_id].yield(priority);
Thread* winner = scheduled_queue[core_id].front(priority);
AskForReselectionOrMarkRedundant(yielding_thread, winner);
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
/*
@ -138,7 +138,7 @@ void GlobalScheduler::YieldThread(Thread* yielding_thread) {
* a better priority than the next thread in the core.
* This operation can be redundant and no scheduling is changed if marked as so.
*/
void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
// etc.
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
@ -186,7 +186,7 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
winner = next_thread;
}
AskForReselectionOrMarkRedundant(yielding_thread, winner);
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
/*
@ -195,7 +195,7 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
* a suggested thread is obtained instead.
* This operation can be redundant and no scheduling is changed if marked as so.
*/
void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
// etc.
Thread* winner = nullptr;
@ -235,7 +235,7 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
}
}
AskForReselectionOrMarkRedundant(yielding_thread, winner);
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
@ -248,13 +248,15 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
scheduled_queue[core].add(thread, priority, false);
}
void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
if (current_thread == winner) {
// TODO(blinkhawk): manage redundant operations, this is not implemented.
// as its mostly an optimization.
// current_thread->SetRedundantSchedulerOperation();
return true;
} else {
reselection_pending.store(true, std::memory_order_release);
return false;
}
}

View file

@ -115,7 +115,7 @@ public:
* YieldThread takes a thread and moves it to the back of the it's priority list
* This operation can be redundant and no scheduling is changed if marked as so.
*/
void YieldThread(Thread* thread);
bool YieldThread(Thread* thread);
/*
* YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list.
@ -123,7 +123,7 @@ public:
* a better priority than the next thread in the core.
* This operation can be redundant and no scheduling is changed if marked as so.
*/
void YieldThreadAndBalanceLoad(Thread* thread);
bool YieldThreadAndBalanceLoad(Thread* thread);
/*
* YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue
@ -131,7 +131,7 @@ public:
* a suggested thread is obtained instead.
* This operation can be redundant and no scheduling is changed if marked as so.
*/
void YieldThreadAndWaitForLoadBalancing(Thread* thread);
bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
u32 CpuCoresCount() const {
return NUM_CPU_CORES;
@ -146,7 +146,7 @@ public:
}
private:
void AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner);
bool AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner);
static constexpr u32 min_regular_priority = 2;
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue;

View file

@ -1556,17 +1556,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
auto& scheduler = system.CurrentScheduler();
auto* const current_thread = scheduler.GetCurrentThread();
bool redundant = false;
if (nanoseconds <= 0) {
switch (static_cast<SleepType>(nanoseconds)) {
case SleepType::YieldWithoutLoadBalancing:
current_thread->YieldSimple();
redundant = current_thread->YieldSimple();
break;
case SleepType::YieldWithLoadBalancing:
current_thread->YieldAndBalanceLoad();
redundant = current_thread->YieldAndBalanceLoad();
break;
case SleepType::YieldAndWaitForLoadBalancing:
current_thread->YieldAndWaitForLoadBalancing();
redundant = current_thread->YieldAndWaitForLoadBalancing();
break;
default:
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
@ -1575,7 +1576,11 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
current_thread->Sleep(nanoseconds);
}
system.PrepareReschedule(current_thread->GetProcessorID());
if (redundant) {
system.CoreTiming().Idle();
} else {
system.PrepareReschedule(current_thread->GetProcessorID());
}
}
/// Wait process wide key atomic

View file

@ -373,19 +373,19 @@ void Thread::Sleep(s64 nanoseconds) {
WakeAfterDelay(nanoseconds);
}
void Thread::YieldSimple() {
bool Thread::YieldSimple() {
auto& scheduler = kernel.GlobalScheduler();
scheduler.YieldThread(this);
return scheduler.YieldThread(this);
}
void Thread::YieldAndBalanceLoad() {
bool Thread::YieldAndBalanceLoad() {
auto& scheduler = kernel.GlobalScheduler();
scheduler.YieldThreadAndBalanceLoad(this);
return scheduler.YieldThreadAndBalanceLoad(this);
}
void Thread::YieldAndWaitForLoadBalancing() {
bool Thread::YieldAndWaitForLoadBalancing() {
auto& scheduler = kernel.GlobalScheduler();
scheduler.YieldThreadAndWaitForLoadBalancing(this);
return scheduler.YieldThreadAndWaitForLoadBalancing(this);
}
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {

View file

@ -408,13 +408,13 @@ public:
void Sleep(s64 nanoseconds);
/// Yields this thread without rebalancing loads.
void YieldSimple();
bool YieldSimple();
/// Yields this thread and does a load rebalancing.
void YieldAndBalanceLoad();
bool YieldAndBalanceLoad();
/// Yields this thread and if the core is left idle, loads are rebalanced
void YieldAndWaitForLoadBalancing();
bool YieldAndWaitForLoadBalancing();
ThreadSchedStatus GetSchedulingStatus() const {
return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask);