2020-12-22 07:36:53 +01:00
|
|
|
// Copyright 2021 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/common_types.h"
|
|
|
|
#include "core/hle/kernel/k_scheduler.h"
|
|
|
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
|
|
|
#include "core/hle/kernel/k_synchronization_object.h"
|
2020-12-31 08:01:08 +01:00
|
|
|
#include "core/hle/kernel/k_thread.h"
|
2021-11-10 04:02:11 +01:00
|
|
|
#include "core/hle/kernel/k_thread_queue.h"
|
2020-12-22 07:36:53 +01:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
|
|
|
#include "core/hle/kernel/svc_results.h"
|
|
|
|
|
|
|
|
namespace Kernel {
|
|
|
|
|
2021-11-10 04:02:11 +01:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
|
|
|
|
private:
|
|
|
|
using ThreadListNode = KSynchronizationObject::ThreadListNode;
|
|
|
|
|
|
|
|
private:
|
|
|
|
KSynchronizationObject** m_objects;
|
|
|
|
ThreadListNode* m_nodes;
|
|
|
|
s32 m_count;
|
|
|
|
|
|
|
|
public:
|
|
|
|
ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o,
|
|
|
|
ThreadListNode* n, s32 c)
|
|
|
|
: KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) { // ...
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
|
|
|
|
ResultCode wait_result) override {
|
|
|
|
// Determine the sync index, and unlink all nodes.
|
|
|
|
s32 sync_index = -1;
|
|
|
|
for (auto i = 0; i < m_count; ++i) {
|
|
|
|
// Check if this is the signaled object.
|
|
|
|
if (m_objects[i] == signaled_object && sync_index == -1) {
|
|
|
|
sync_index = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unlink the current node from the current object.
|
|
|
|
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the waiting thread's sync index.
|
|
|
|
waiting_thread->SetSyncedIndex(sync_index);
|
|
|
|
|
|
|
|
// Set the waiting thread as not cancellable.
|
|
|
|
waiting_thread->ClearCancellable();
|
|
|
|
|
|
|
|
// Invoke the base end wait handler.
|
|
|
|
KThreadQueue::EndWait(waiting_thread, wait_result);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
|
|
|
bool cancel_timer_task) override {
|
|
|
|
// Remove all nodes from our list.
|
|
|
|
for (auto i = 0; i < m_count; ++i) {
|
|
|
|
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the waiting thread as not cancellable.
|
|
|
|
waiting_thread->ClearCancellable();
|
|
|
|
|
|
|
|
// Invoke the base cancel wait handler.
|
|
|
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2021-04-04 04:11:46 +02:00
|
|
|
void KSynchronizationObject::Finalize() {
|
|
|
|
this->OnFinalizeSynchronizationObject();
|
|
|
|
KAutoObject::Finalize();
|
|
|
|
}
|
|
|
|
|
2021-05-08 18:11:36 +02:00
|
|
|
ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
2020-12-22 07:36:53 +01:00
|
|
|
KSynchronizationObject** objects, const s32 num_objects,
|
|
|
|
s64 timeout) {
|
|
|
|
// Allocate space on stack for thread nodes.
|
|
|
|
std::vector<ThreadListNode> thread_nodes(num_objects);
|
|
|
|
|
|
|
|
// Prepare for wait.
|
2021-11-10 04:02:11 +01:00
|
|
|
KThread* thread = GetCurrentThreadPointer(kernel_ctx);
|
|
|
|
ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects,
|
|
|
|
thread_nodes.data(), num_objects);
|
2020-12-22 07:36:53 +01:00
|
|
|
|
|
|
|
{
|
|
|
|
// Setup the scheduling lock and sleep.
|
2021-11-10 04:02:11 +01:00
|
|
|
KScopedSchedulerLockAndSleep slp(kernel_ctx, thread, timeout);
|
|
|
|
|
|
|
|
// Check if the thread should terminate.
|
|
|
|
if (thread->IsTerminationRequested()) {
|
|
|
|
slp.CancelSleep();
|
|
|
|
return ResultTerminationRequested;
|
|
|
|
}
|
2020-12-22 07:36:53 +01:00
|
|
|
|
|
|
|
// Check if any of the objects are already signaled.
|
|
|
|
for (auto i = 0; i < num_objects; ++i) {
|
|
|
|
ASSERT(objects[i] != nullptr);
|
|
|
|
|
|
|
|
if (objects[i]->IsSignaled()) {
|
|
|
|
*out_index = i;
|
|
|
|
slp.CancelSleep();
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-12-22 07:36:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the timeout is zero.
|
|
|
|
if (timeout == 0) {
|
|
|
|
slp.CancelSleep();
|
2021-02-13 00:43:01 +01:00
|
|
|
return ResultTimedOut;
|
2020-12-22 07:36:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check if waiting was canceled.
|
|
|
|
if (thread->IsWaitCancelled()) {
|
|
|
|
slp.CancelSleep();
|
|
|
|
thread->ClearWaitCancelled();
|
2021-02-13 00:43:01 +01:00
|
|
|
return ResultCancelled;
|
2020-12-22 07:36:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add the waiters.
|
|
|
|
for (auto i = 0; i < num_objects; ++i) {
|
|
|
|
thread_nodes[i].thread = thread;
|
|
|
|
thread_nodes[i].next = nullptr;
|
|
|
|
|
2021-11-10 04:02:11 +01:00
|
|
|
objects[i]->LinkNode(std::addressof(thread_nodes[i]));
|
2020-12-22 07:36:53 +01:00
|
|
|
}
|
|
|
|
|
2021-11-10 04:02:11 +01:00
|
|
|
// Mark the thread as cancellable.
|
2020-12-22 07:36:53 +01:00
|
|
|
thread->SetCancellable();
|
|
|
|
|
2021-11-10 04:02:11 +01:00
|
|
|
// Clear the thread's synced index.
|
|
|
|
thread->SetSyncedIndex(-1);
|
2020-12-22 07:36:53 +01:00
|
|
|
|
2021-11-10 04:02:11 +01:00
|
|
|
// Wait for an object to be signaled.
|
|
|
|
thread->BeginWait(std::addressof(wait_queue));
|
|
|
|
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
|
|
|
|
}
|
2020-12-22 07:36:53 +01:00
|
|
|
|
2021-11-10 04:02:11 +01:00
|
|
|
// Set the output index.
|
|
|
|
*out_index = thread->GetSyncedIndex();
|
2020-12-22 07:36:53 +01:00
|
|
|
|
|
|
|
// Get the wait result.
|
2021-11-10 04:02:11 +01:00
|
|
|
return thread->GetWaitResult();
|
2020-12-22 07:36:53 +01:00
|
|
|
}
|
|
|
|
|
2021-05-08 18:11:36 +02:00
|
|
|
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
|
|
|
|
: KAutoObjectWithList{kernel_} {}
|
2021-01-31 10:38:57 +01:00
|
|
|
|
2021-01-16 09:25:29 +01:00
|
|
|
KSynchronizationObject::~KSynchronizationObject() = default;
|
2020-12-22 07:36:53 +01:00
|
|
|
|
|
|
|
void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
2021-11-10 04:02:11 +01:00
|
|
|
KScopedSchedulerLock sl(kernel);
|
2020-12-22 07:36:53 +01:00
|
|
|
|
|
|
|
// If we're not signaled, we've nothing to notify.
|
|
|
|
if (!this->IsSignaled()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over each thread.
|
|
|
|
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
2021-11-10 04:02:11 +01:00
|
|
|
cur_node->thread->NotifyAvailable(this, result);
|
2020-12-22 07:36:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-31 08:01:08 +01:00
|
|
|
std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
|
|
|
|
std::vector<KThread*> threads;
|
2020-12-22 07:36:53 +01:00
|
|
|
|
|
|
|
// If debugging, dump the list of waiters.
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock lock(kernel);
|
|
|
|
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
|
|
|
threads.emplace_back(cur_node->thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return threads;
|
|
|
|
}
|
|
|
|
} // namespace Kernel
|