1
1
Fork 0
forked from suyu/suyu

kernel: convert KAbstractSchedulerLock

This commit is contained in:
Liam 2023-03-06 22:31:50 -05:00
parent 467adc1acd
commit 7322c99e5f

View file

@ -14,74 +14,67 @@
namespace Kernel { namespace Kernel {
class KernelCore; class KernelCore;
class GlobalSchedulerContext;
template <typename SchedulerType> template <typename SchedulerType>
class KAbstractSchedulerLock { class KAbstractSchedulerLock {
public: public:
explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {} explicit KAbstractSchedulerLock(KernelCore& kernel) : m_kernel{kernel} {}
bool IsLockedByCurrentThread() const { bool IsLockedByCurrentThread() const {
return owner_thread == GetCurrentThreadPointer(kernel); return m_owner_thread == GetCurrentThreadPointer(m_kernel);
} }
void Lock() { void Lock() {
// If we are shutting down the kernel, none of this is relevant anymore. if (this->IsLockedByCurrentThread()) {
if (kernel.IsShuttingDown()) {
return;
}
if (IsLockedByCurrentThread()) {
// If we already own the lock, the lock count should be > 0. // If we already own the lock, the lock count should be > 0.
// For debug, ensure this is true. // For debug, ensure this is true.
ASSERT(lock_count > 0); ASSERT(m_lock_count > 0);
} else { } else {
// Otherwise, we want to disable scheduling and acquire the spinlock. // Otherwise, we want to disable scheduling and acquire the spinlock.
SchedulerType::DisableScheduling(kernel); SchedulerType::DisableScheduling(m_kernel);
spin_lock.Lock(); m_spin_lock.Lock();
ASSERT(lock_count == 0); ASSERT(m_lock_count == 0);
ASSERT(owner_thread == nullptr); ASSERT(m_owner_thread == nullptr);
// Take ownership of the lock. // Take ownership of the lock.
owner_thread = GetCurrentThreadPointer(kernel); m_owner_thread = GetCurrentThreadPointer(m_kernel);
} }
// Increment the lock count. // Increment the lock count.
lock_count++; m_lock_count++;
} }
void Unlock() { void Unlock() {
// If we are shutting down the kernel, none of this is relevant anymore. ASSERT(this->IsLockedByCurrentThread());
if (kernel.IsShuttingDown()) { ASSERT(m_lock_count > 0);
return;
}
ASSERT(IsLockedByCurrentThread());
ASSERT(lock_count > 0);
// Release an instance of the lock. // Release an instance of the lock.
if ((--lock_count) == 0) { if ((--m_lock_count) == 0) {
// Perform a memory barrier here. // Perform a memory barrier here.
std::atomic_thread_fence(std::memory_order_seq_cst); std::atomic_thread_fence(std::memory_order_seq_cst);
// We're no longer going to hold the lock. Take note of what cores need scheduling. // We're no longer going to hold the lock. Take note of what cores need scheduling.
const u64 cores_needing_scheduling = const u64 cores_needing_scheduling =
SchedulerType::UpdateHighestPriorityThreads(kernel); SchedulerType::UpdateHighestPriorityThreads(m_kernel);
// Note that we no longer hold the lock, and unlock the spinlock. // Note that we no longer hold the lock, and unlock the spinlock.
owner_thread = nullptr; m_owner_thread = nullptr;
spin_lock.Unlock(); m_spin_lock.Unlock();
// Enable scheduling, and perform a rescheduling operation. // Enable scheduling, and perform a rescheduling operation.
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling); SchedulerType::EnableScheduling(m_kernel, cores_needing_scheduling);
} }
} }
private: private:
KernelCore& kernel; friend class GlobalSchedulerContext;
KAlignedSpinLock spin_lock{};
s32 lock_count{}; KernelCore& m_kernel;
std::atomic<KThread*> owner_thread{}; KAlignedSpinLock m_spin_lock{};
s32 m_lock_count{};
std::atomic<KThread*> m_owner_thread{};
}; };
} // namespace Kernel } // namespace Kernel