3
0
Fork 0
forked from suyu/suyu

hle: kernel: Migrate some code from Common::SpinLock to KSpinLock.

This commit is contained in:
bunnei 2021-02-13 01:29:32 -08:00
parent 541b4353e4
commit 5872561077
5 changed files with 25 additions and 25 deletions

View file

@ -62,7 +62,7 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
}
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
std::scoped_lock lock{guard};
KScopedSpinLock lk{guard};
if (KThread* prev_highest_thread = state.highest_priority_thread;
prev_highest_thread != highest_thread) {
if (prev_highest_thread != nullptr) {
@ -637,11 +637,11 @@ void KScheduler::RescheduleCurrentCore() {
if (phys_core.IsInterrupted()) {
phys_core.ClearInterrupt();
}
guard.lock();
guard.Lock();
if (state.needs_scheduling.load()) {
Schedule();
} else {
guard.unlock();
guard.Unlock();
}
}
@ -669,7 +669,7 @@ void KScheduler::Unload(KThread* thread) {
} else {
prev_thread = nullptr;
}
thread->context_guard.unlock();
thread->context_guard.Unlock();
}
}
@ -713,7 +713,7 @@ void KScheduler::ScheduleImpl() {
// If we're not actually switching thread, there's nothing to do.
if (next_thread == current_thread.load()) {
guard.unlock();
guard.Unlock();
return;
}
@ -732,7 +732,7 @@ void KScheduler::ScheduleImpl() {
} else {
old_context = &idle_thread->GetHostContext();
}
guard.unlock();
guard.Unlock();
Common::Fiber::YieldTo(*old_context, *switch_fiber);
/// When a thread wakes up, the scheduler may have changed to other in another core.
@ -748,24 +748,24 @@ void KScheduler::OnSwitch(void* this_scheduler) {
void KScheduler::SwitchToCurrent() {
while (true) {
{
std::scoped_lock lock{guard};
KScopedSpinLock lk{guard};
current_thread.store(state.highest_priority_thread);
state.needs_scheduling.store(false);
}
const auto is_switch_pending = [this] {
std::scoped_lock lock{guard};
KScopedSpinLock lk{guard};
return state.needs_scheduling.load();
};
do {
auto next_thread = current_thread.load();
if (next_thread != nullptr) {
next_thread->context_guard.lock();
next_thread->context_guard.Lock();
if (next_thread->GetRawState() != ThreadState::Runnable) {
next_thread->context_guard.unlock();
next_thread->context_guard.Unlock();
break;
}
if (next_thread->GetActiveCore() != core_id) {
next_thread->context_guard.unlock();
next_thread->context_guard.Unlock();
break;
}
}

View file

@ -2,19 +2,16 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#pragma once
#include <atomic>
#include "common/common_types.h"
#include "common/spin_lock.h"
#include "core/hle/kernel/global_scheduler_context.h"
#include "core/hle/kernel/k_priority_queue.h"
#include "core/hle/kernel/k_scheduler_lock.h"
#include "core/hle/kernel/k_scoped_lock.h"
#include "core/hle/kernel/k_spin_lock.h"
namespace Common {
class Fiber;
@ -195,7 +192,7 @@ private:
u64 last_context_switch_time{};
const s32 core_id;
Common::SpinLock guard{};
KSpinLock guard{};
};
class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {

View file

@ -2,14 +2,11 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#pragma once
#include "common/assert.h"
#include "common/spin_lock.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
@ -34,7 +31,7 @@ public:
} else {
// Otherwise, we want to disable scheduling and acquire the spinlock.
SchedulerType::DisableScheduling(kernel);
spin_lock.lock();
spin_lock.Lock();
// For debug, ensure that our state is valid.
ASSERT(lock_count == 0);
@ -58,7 +55,7 @@ public:
// Note that we no longer hold the lock, and unlock the spinlock.
owner_thread = nullptr;
spin_lock.unlock();
spin_lock.Unlock();
// Enable scheduling, and perform a rescheduling operation.
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
@ -67,7 +64,7 @@ public:
private:
KernelCore& kernel;
Common::SpinLock spin_lock{};
KAlignedSpinLock spin_lock{};
s32 lock_count{};
KThread* owner_thread{};
};

View file

@ -28,6 +28,12 @@ private:
std::atomic_flag lck = ATOMIC_FLAG_INIT;
};
// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
using KAlignedSpinLock = KSpinLock;
using KNotAlignedSpinLock = KSpinLock;
using KScopedSpinLock = KScopedLock<KSpinLock>;
using KScopedAlignedSpinLock = KScopedLock<KAlignedSpinLock>;
using KScopedNotAlignedSpinLock = KScopedLock<KNotAlignedSpinLock>;
} // namespace Kernel

View file

@ -14,10 +14,10 @@
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
#include "core/hle/kernel/k_affinity_mask.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/svc_common.h"
@ -732,7 +732,7 @@ private:
s8 priority_inheritance_count{};
bool resource_limit_release_hint{};
StackParameters stack_parameters{};
Common::SpinLock context_guard{};
KSpinLock context_guard{};
// For emulation
std::shared_ptr<Common::Fiber> host_context{};