2021-01-20 22:42:27 +01:00
|
|
|
// Copyright 2021 yuzu Emulator Project
|
2014-12-17 06:38:14 +01:00
|
|
|
// Licensed under GPLv2 or any later version
|
2014-11-19 09:49:13 +01:00
|
|
|
// Refer to the license.txt file included.
|
2014-05-10 04:11:18 +02:00
|
|
|
|
2014-06-06 04:35:36 +02:00
|
|
|
#include <algorithm>
|
2018-02-14 06:33:15 +01:00
|
|
|
#include <cinttypes>
|
2018-10-30 05:03:25 +01:00
|
|
|
#include <optional>
|
2014-08-18 05:03:22 +02:00
|
|
|
#include <vector>
|
2018-07-31 14:06:09 +02:00
|
|
|
|
2015-05-06 09:06:12 +02:00
|
|
|
#include "common/assert.h"
|
2021-01-20 22:42:27 +01:00
|
|
|
#include "common/bit_util.h"
|
2021-01-01 11:06:06 +01:00
|
|
|
#include "common/common_funcs.h"
|
2015-05-06 09:06:12 +02:00
|
|
|
#include "common/common_types.h"
|
2020-02-25 03:04:12 +01:00
|
|
|
#include "common/fiber.h"
|
2015-05-06 09:06:12 +02:00
|
|
|
#include "common/logging/log.h"
|
2021-01-20 22:42:27 +01:00
|
|
|
#include "common/scope_exit.h"
|
2014-05-16 00:27:08 +02:00
|
|
|
#include "common/thread_queue_list.h"
|
2014-05-14 04:00:11 +02:00
|
|
|
#include "core/core.h"
|
2020-02-25 03:04:12 +01:00
|
|
|
#include "core/cpu_manager.h"
|
2020-02-12 00:56:24 +01:00
|
|
|
#include "core/hardware_properties.h"
|
2017-05-21 09:11:36 +02:00
|
|
|
#include "core/hle/kernel/errors.h"
|
2017-05-30 01:45:42 +02:00
|
|
|
#include "core/hle/kernel/handle_table.h"
|
2020-12-30 10:14:02 +01:00
|
|
|
#include "core/hle/kernel/k_condition_variable.h"
|
2020-12-03 03:08:35 +01:00
|
|
|
#include "core/hle/kernel/k_scheduler.h"
|
2020-12-04 06:56:02 +01:00
|
|
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
2020-12-31 08:01:08 +01:00
|
|
|
#include "core/hle/kernel/k_thread.h"
|
2021-01-20 22:42:27 +01:00
|
|
|
#include "core/hle/kernel/k_thread_queue.h"
|
2018-08-28 18:30:33 +02:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
2020-12-30 10:14:02 +01:00
|
|
|
#include "core/hle/kernel/memory/memory_layout.h"
|
2018-08-02 04:40:00 +02:00
|
|
|
#include "core/hle/kernel/object.h"
|
2016-09-18 02:38:01 +02:00
|
|
|
#include "core/hle/kernel/process.h"
|
2021-01-20 22:42:27 +01:00
|
|
|
#include "core/hle/kernel/resource_limit.h"
|
2021-01-01 11:06:06 +01:00
|
|
|
#include "core/hle/kernel/svc_results.h"
|
2020-02-25 03:04:12 +01:00
|
|
|
#include "core/hle/kernel/time_manager.h"
|
2014-10-23 05:20:01 +02:00
|
|
|
#include "core/hle/result.h"
|
2015-05-13 03:38:29 +02:00
|
|
|
#include "core/memory.h"
|
2014-05-10 04:11:18 +02:00
|
|
|
|
2020-07-15 19:13:31 +02:00
|
|
|
#ifdef ARCHITECTURE_x86_64
|
|
|
|
#include "core/arm/dynarmic/arm_dynarmic_32.h"
|
|
|
|
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
|
|
|
#endif
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
namespace {
|
|
|
|
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
|
|
|
|
u32 entry_point, u32 arg) {
|
|
|
|
context = {};
|
|
|
|
context.cpu_registers[0] = arg;
|
|
|
|
context.cpu_registers[15] = entry_point;
|
|
|
|
context.cpu_registers[13] = stack_top;
|
|
|
|
}
|
2014-05-14 04:00:11 +02:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
|
|
|
|
VAddr entry_point, u64 arg) {
|
|
|
|
context = {};
|
|
|
|
context.cpu_registers[0] = arg;
|
|
|
|
context.pc = entry_point;
|
|
|
|
context.sp = stack_top;
|
|
|
|
// TODO(merry): Perform a hardware test to determine the below value.
|
|
|
|
context.fpcr = 0;
|
2020-02-11 22:36:39 +01:00
|
|
|
}
|
2021-01-20 22:42:27 +01:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
namespace Kernel {
|
2020-02-11 22:36:39 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
KThread::KThread(KernelCore& kernel)
|
|
|
|
: KSynchronizationObject{kernel}, activity_pause_lock{kernel} {}
|
2020-12-31 08:01:08 +01:00
|
|
|
KThread::~KThread() = default;
|
2015-01-31 22:22:40 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
|
|
|
|
s32 virt_core, Process* owner, ThreadType type) {
|
|
|
|
// Assert parameters are valid.
|
|
|
|
ASSERT((type == ThreadType::Main) ||
|
|
|
|
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
|
|
|
|
ASSERT((owner != nullptr) || (type != ThreadType::User));
|
|
|
|
ASSERT(0 <= virt_core && virt_core < static_cast<s32>(Common::BitSize<u64>()));
|
|
|
|
|
|
|
|
// Convert the virtual core to a physical core.
|
|
|
|
const s32 phys_core = Core::Hardware::VirtualToPhysicalCoreMap[virt_core];
|
|
|
|
ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
|
|
|
|
|
|
|
// First, clear the TLS address.
|
|
|
|
tls_address = {};
|
|
|
|
|
|
|
|
// Next, assert things based on the type.
|
|
|
|
switch (type) {
|
|
|
|
case ThreadType::Main:
|
|
|
|
ASSERT(arg == 0);
|
|
|
|
[[fallthrough]];
|
|
|
|
case ThreadType::HighPriority:
|
|
|
|
[[fallthrough]];
|
|
|
|
case ThreadType::User:
|
|
|
|
ASSERT(((owner == nullptr) ||
|
|
|
|
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
|
|
|
ASSERT(((owner == nullptr) ||
|
|
|
|
(owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
|
|
|
|
break;
|
|
|
|
case ThreadType::Kernel:
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
|
|
|
|
break;
|
|
|
|
}
|
2021-01-25 07:55:08 +01:00
|
|
|
thread_type_for_debugging = type;
|
2021-01-20 22:42:27 +01:00
|
|
|
|
|
|
|
// Set the ideal core ID and affinity mask.
|
|
|
|
virtual_ideal_core_id = virt_core;
|
|
|
|
physical_ideal_core_id = phys_core;
|
2021-01-25 07:55:08 +01:00
|
|
|
virtual_affinity_mask = 1ULL << virt_core;
|
2021-01-20 22:42:27 +01:00
|
|
|
physical_affinity_mask.SetAffinity(phys_core, true);
|
|
|
|
|
|
|
|
// Set the thread state.
|
|
|
|
thread_state = (type == ThreadType::Main) ? ThreadState::Runnable : ThreadState::Initialized;
|
|
|
|
|
|
|
|
// Set TLS address.
|
|
|
|
tls_address = 0;
|
|
|
|
|
|
|
|
// Set parent and condvar tree.
|
|
|
|
parent = nullptr;
|
|
|
|
condvar_tree = nullptr;
|
|
|
|
|
|
|
|
// Set sync booleans.
|
|
|
|
signaled = false;
|
|
|
|
termination_requested = false;
|
|
|
|
wait_cancelled = false;
|
|
|
|
cancellable = false;
|
|
|
|
|
|
|
|
// Set core ID and wait result.
|
|
|
|
core_id = phys_core;
|
|
|
|
wait_result = Svc::ResultNoSynchronizationObject;
|
|
|
|
|
|
|
|
// Set priorities.
|
|
|
|
priority = prio;
|
|
|
|
base_priority = prio;
|
|
|
|
|
|
|
|
// Set sync object and waiting lock to null.
|
|
|
|
synced_object = nullptr;
|
|
|
|
|
|
|
|
// Initialize sleeping queue.
|
|
|
|
sleeping_queue = nullptr;
|
|
|
|
|
|
|
|
// Set suspend flags.
|
|
|
|
suspend_request_flags = 0;
|
|
|
|
suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask);
|
|
|
|
|
|
|
|
// We're neither debug attached, nor are we nesting our priority inheritance.
|
|
|
|
debug_attached = false;
|
|
|
|
priority_inheritance_count = 0;
|
|
|
|
|
|
|
|
// We haven't been scheduled, and we have done no light IPC.
|
|
|
|
schedule_count = -1;
|
|
|
|
last_scheduled_tick = 0;
|
|
|
|
light_ipc_data = nullptr;
|
|
|
|
|
|
|
|
// We're not waiting for a lock, and we haven't disabled migration.
|
|
|
|
lock_owner = nullptr;
|
|
|
|
num_core_migration_disables = 0;
|
|
|
|
|
|
|
|
// We have no waiters, but we do have an entrypoint.
|
|
|
|
num_kernel_waiters = 0;
|
|
|
|
|
|
|
|
// Set our current core id.
|
|
|
|
current_core_id = phys_core;
|
|
|
|
|
|
|
|
// We haven't released our resource limit hint, and we've spent no time on the cpu.
|
|
|
|
resource_limit_release_hint = false;
|
|
|
|
cpu_time = 0;
|
|
|
|
|
|
|
|
// Clear our stack parameters.
|
|
|
|
std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0,
|
|
|
|
sizeof(StackParameters));
|
|
|
|
|
|
|
|
// Setup the TLS, if needed.
|
|
|
|
if (type == ThreadType::User) {
|
|
|
|
tls_address = owner->CreateTLSRegion();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set parent, if relevant.
|
|
|
|
if (owner != nullptr) {
|
|
|
|
parent = owner;
|
|
|
|
parent->IncrementThreadCount();
|
|
|
|
}
|
2020-03-03 18:02:50 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// Initialize thread context.
|
|
|
|
ResetThreadContext64(thread_context_64, user_stack_top, func, arg);
|
|
|
|
ResetThreadContext32(thread_context_32, static_cast<u32>(user_stack_top),
|
|
|
|
static_cast<u32>(func), static_cast<u32>(arg));
|
2020-03-03 18:02:50 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// Setup the stack parameters.
|
|
|
|
StackParameters& sp = GetStackParameters();
|
|
|
|
sp.cur_thread = this;
|
|
|
|
sp.disable_count = 1;
|
|
|
|
SetInExceptionHandler();
|
|
|
|
|
|
|
|
// Set thread ID.
|
|
|
|
thread_id = kernel.CreateNewThreadID();
|
|
|
|
|
|
|
|
// We initialized!
|
|
|
|
initialized = true;
|
|
|
|
|
|
|
|
// Register ourselves with our parent process.
|
|
|
|
if (parent != nullptr) {
|
|
|
|
parent->RegisterThread(this);
|
|
|
|
if (parent->IsSuspended()) {
|
|
|
|
RequestSuspend(SuspendType::Process);
|
2020-03-12 01:44:53 +01:00
|
|
|
}
|
2020-03-03 18:02:50 +01:00
|
|
|
}
|
2014-05-14 04:00:11 +02:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
return RESULT_SUCCESS;
|
2020-02-25 03:04:12 +01:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
|
|
|
|
VAddr user_stack_top, s32 prio, s32 core, Process* owner,
|
|
|
|
ThreadType type) {
|
|
|
|
// Initialize the thread.
|
|
|
|
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
|
|
|
|
|
2020-02-25 17:40:33 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
void KThread::Finalize() {
|
|
|
|
// If the thread has an owner process, unregister it.
|
|
|
|
if (parent != nullptr) {
|
|
|
|
parent->UnregisterThread(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the thread has a local region, delete it.
|
|
|
|
if (tls_address != 0) {
|
|
|
|
parent->FreeTLSRegion(tls_address);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release any waiters.
|
|
|
|
{
|
|
|
|
ASSERT(lock_owner == nullptr);
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
auto it = waiter_list.begin();
|
|
|
|
while (it != waiter_list.end()) {
|
|
|
|
// The thread shouldn't be a kernel waiter.
|
|
|
|
it->SetLockOwner(nullptr);
|
|
|
|
it->SetSyncedObject(nullptr, Svc::ResultInvalidState);
|
|
|
|
it->Wakeup();
|
|
|
|
it = waiter_list.erase(it);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decrement the parent process's thread count.
|
|
|
|
if (parent != nullptr) {
|
|
|
|
parent->DecrementThreadCount();
|
2021-01-29 06:49:47 +01:00
|
|
|
parent->GetResourceLimit()->Release(ResourceType::Threads, 1);
|
2019-11-16 16:05:39 +01:00
|
|
|
}
|
2019-04-17 13:08:12 +02:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
bool KThread::IsSignaled() const {
|
|
|
|
return signaled;
|
2020-03-02 05:46:10 +01:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
void KThread::Wakeup() {
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
if (GetState() == ThreadState::Waiting) {
|
|
|
|
if (sleeping_queue != nullptr) {
|
|
|
|
sleeping_queue->WakeupThread(this);
|
|
|
|
} else {
|
|
|
|
SetState(ThreadState::Runnable);
|
|
|
|
}
|
|
|
|
}
|
2016-09-02 14:53:42 +02:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
void KThread::StartTermination() {
|
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
|
|
|
|
// Release user exception and unpin, if relevant.
|
|
|
|
if (parent != nullptr) {
|
|
|
|
parent->ReleaseUserException(this);
|
|
|
|
if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) {
|
|
|
|
parent->UnpinCurrentThread();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set state to terminated.
|
|
|
|
SetState(ThreadState::Terminated);
|
|
|
|
|
|
|
|
// Clear the thread's status as running in parent.
|
|
|
|
if (parent != nullptr) {
|
|
|
|
parent->ClearRunningThread(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Signal.
|
|
|
|
signaled = true;
|
|
|
|
NotifyAvailable();
|
|
|
|
|
|
|
|
// Clear previous thread in KScheduler.
|
|
|
|
KScheduler::ClearPreviousThread(kernel, this);
|
|
|
|
|
|
|
|
// Register terminated dpc flag.
|
|
|
|
RegisterDpc(DpcFlag::Terminated);
|
2020-02-25 03:04:12 +01:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
void KThread::Pin() {
|
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
|
|
|
|
// Set ourselves as pinned.
|
|
|
|
GetStackParameters().is_pinned = true;
|
|
|
|
|
|
|
|
// Disable core migration.
|
|
|
|
ASSERT(num_core_migration_disables == 0);
|
|
|
|
{
|
|
|
|
++num_core_migration_disables;
|
|
|
|
|
|
|
|
// Save our ideal state to restore when we're unpinned.
|
|
|
|
original_physical_ideal_core_id = physical_ideal_core_id;
|
|
|
|
original_physical_affinity_mask = physical_affinity_mask;
|
|
|
|
|
|
|
|
// Bind ourselves to this core.
|
|
|
|
const s32 active_core = GetActiveCore();
|
|
|
|
const s32 current_core = GetCurrentCoreId(kernel);
|
|
|
|
|
|
|
|
SetActiveCore(current_core);
|
|
|
|
physical_ideal_core_id = current_core;
|
|
|
|
physical_affinity_mask.SetAffinityMask(1ULL << current_core);
|
|
|
|
|
|
|
|
if (active_core != current_core || physical_affinity_mask.GetAffinityMask() !=
|
|
|
|
original_physical_affinity_mask.GetAffinityMask()) {
|
|
|
|
KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask,
|
|
|
|
active_core);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disallow performing thread suspension.
|
|
|
|
{
|
|
|
|
// Update our allow flags.
|
|
|
|
suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) +
|
|
|
|
static_cast<u32>(ThreadState::SuspendShift)));
|
|
|
|
|
|
|
|
// Update our state.
|
|
|
|
const ThreadState old_state = thread_state;
|
|
|
|
thread_state = static_cast<ThreadState>(GetSuspendFlags() |
|
|
|
|
static_cast<u32>(old_state & ThreadState::Mask));
|
|
|
|
if (thread_state != old_state) {
|
|
|
|
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(bunnei): Update our SVC access permissions.
|
|
|
|
ASSERT(parent != nullptr);
|
2020-02-25 03:04:12 +01:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
void KThread::Unpin() {
|
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
|
|
|
|
// Set ourselves as unpinned.
|
|
|
|
GetStackParameters().is_pinned = false;
|
|
|
|
|
|
|
|
// Enable core migration.
|
|
|
|
ASSERT(num_core_migration_disables == 1);
|
|
|
|
{
|
2021-01-25 07:55:08 +01:00
|
|
|
num_core_migration_disables--;
|
2021-01-20 22:42:27 +01:00
|
|
|
|
|
|
|
// Restore our original state.
|
|
|
|
const KAffinityMask old_mask = physical_affinity_mask;
|
2021-01-01 11:06:06 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
physical_ideal_core_id = original_physical_ideal_core_id;
|
|
|
|
physical_affinity_mask = original_physical_affinity_mask;
|
|
|
|
|
|
|
|
if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
|
|
|
const s32 active_core = GetActiveCore();
|
|
|
|
|
|
|
|
if (!physical_affinity_mask.GetAffinity(active_core)) {
|
|
|
|
if (physical_ideal_core_id >= 0) {
|
|
|
|
SetActiveCore(physical_ideal_core_id);
|
|
|
|
} else {
|
|
|
|
SetActiveCore(static_cast<s32>(
|
|
|
|
Common::BitSize<u64>() - 1 -
|
|
|
|
std::countl_zero(physical_affinity_mask.GetAffinityMask())));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
|
2020-02-25 03:04:12 +01:00
|
|
|
}
|
2014-12-22 14:07:22 +01:00
|
|
|
}
|
2014-05-21 03:02:35 +02:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// Allow performing thread suspension (if termination hasn't been requested).
|
|
|
|
{
|
|
|
|
// Update our allow flags.
|
|
|
|
if (!IsTerminationRequested()) {
|
|
|
|
suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) +
|
|
|
|
static_cast<u32>(ThreadState::SuspendShift)));
|
|
|
|
}
|
2014-06-06 04:35:36 +02:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// Update our state.
|
|
|
|
const ThreadState old_state = thread_state;
|
|
|
|
thread_state = static_cast<ThreadState>(GetSuspendFlags() |
|
|
|
|
static_cast<u32>(old_state & ThreadState::Mask));
|
|
|
|
if (thread_state != old_state) {
|
|
|
|
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
|
|
|
}
|
|
|
|
}
|
2020-12-31 09:46:09 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// TODO(bunnei): Update our SVC access permissions.
|
|
|
|
ASSERT(parent != nullptr);
|
|
|
|
|
|
|
|
// Resume any threads that began waiting on us while we were pinned.
|
|
|
|
for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) {
|
|
|
|
if (it->GetState() == ThreadState::Waiting) {
|
|
|
|
it->SetState(ThreadState::Runnable);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
// Get the virtual mask.
|
|
|
|
*out_ideal_core = virtual_ideal_core_id;
|
|
|
|
*out_affinity_mask = virtual_affinity_mask;
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
2020-12-31 09:46:09 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
ASSERT(num_core_migration_disables >= 0);
|
|
|
|
|
|
|
|
// Select between core mask and original core mask.
|
|
|
|
if (num_core_migration_disables == 0) {
|
|
|
|
*out_ideal_core = physical_ideal_core_id;
|
|
|
|
*out_affinity_mask = physical_affinity_mask.GetAffinityMask();
|
2020-02-25 03:04:12 +01:00
|
|
|
} else {
|
2021-01-20 22:42:27 +01:00
|
|
|
*out_ideal_core = original_physical_ideal_core_id;
|
|
|
|
*out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
|
2020-02-25 03:04:12 +01:00
|
|
|
}
|
2020-11-04 01:54:53 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
2020-12-31 09:46:09 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
|
|
|
|
ASSERT(parent != nullptr);
|
|
|
|
ASSERT(v_affinity_mask != 0);
|
|
|
|
KScopedLightLock lk{activity_pause_lock};
|
2015-01-26 07:56:17 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// Set the core mask.
|
|
|
|
u64 p_affinity_mask = 0;
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
ASSERT(num_core_migration_disables >= 0);
|
|
|
|
|
|
|
|
// If the core id is no-update magic, preserve the ideal core id.
|
|
|
|
if (core_id == Svc::IdealCoreNoUpdate) {
|
|
|
|
core_id = virtual_ideal_core_id;
|
|
|
|
R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, Svc::ResultInvalidCombination);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the virtual core/affinity mask.
|
|
|
|
virtual_ideal_core_id = core_id;
|
|
|
|
virtual_affinity_mask = v_affinity_mask;
|
|
|
|
|
|
|
|
// Translate the virtual core to a physical core.
|
|
|
|
if (core_id >= 0) {
|
|
|
|
core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Translate the virtual affinity mask to a physical one.
|
|
|
|
while (v_affinity_mask != 0) {
|
|
|
|
const u64 next = std::countr_zero(v_affinity_mask);
|
|
|
|
v_affinity_mask &= ~(1ULL << next);
|
|
|
|
p_affinity_mask |= (1ULL << Core::Hardware::VirtualToPhysicalCoreMap[next]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we haven't disabled migration, perform an affinity change.
|
|
|
|
if (num_core_migration_disables == 0) {
|
|
|
|
const KAffinityMask old_mask = physical_affinity_mask;
|
|
|
|
|
|
|
|
// Set our new ideals.
|
|
|
|
physical_ideal_core_id = core_id;
|
|
|
|
physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
|
|
|
|
|
|
|
if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
|
|
|
const s32 active_core = GetActiveCore();
|
|
|
|
|
|
|
|
if (active_core >= 0 && !physical_affinity_mask.GetAffinity(active_core)) {
|
|
|
|
const s32 new_core = static_cast<s32>(
|
|
|
|
physical_ideal_core_id >= 0
|
|
|
|
? physical_ideal_core_id
|
|
|
|
: Common::BitSize<u64>() - 1 -
|
|
|
|
std::countl_zero(physical_affinity_mask.GetAffinityMask()));
|
|
|
|
SetActiveCore(new_core);
|
|
|
|
}
|
|
|
|
KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise, we edit the original affinity for restoration later.
|
|
|
|
original_physical_ideal_core_id = core_id;
|
|
|
|
original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the pinned waiter list.
|
|
|
|
{
|
2021-01-25 07:55:08 +01:00
|
|
|
bool retry_update{};
|
|
|
|
bool thread_is_pinned{};
|
2021-01-20 22:42:27 +01:00
|
|
|
do {
|
|
|
|
// Lock the scheduler.
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
// Don't do any further management if our termination has been requested.
|
|
|
|
R_SUCCEED_IF(IsTerminationRequested());
|
|
|
|
|
|
|
|
// By default, we won't need to retry.
|
|
|
|
retry_update = false;
|
|
|
|
|
|
|
|
// Check if the thread is currently running.
|
2021-01-25 07:55:08 +01:00
|
|
|
bool thread_is_current{};
|
2021-01-20 22:42:27 +01:00
|
|
|
s32 thread_core;
|
|
|
|
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
|
|
|
|
++thread_core) {
|
|
|
|
if (kernel.Scheduler(thread_core).GetCurrentThread() == this) {
|
|
|
|
thread_is_current = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the thread is currently running, check whether it's no longer allowed under the
|
|
|
|
// new mask.
|
|
|
|
if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) {
|
|
|
|
// If the thread is pinned, we want to wait until it's not pinned.
|
|
|
|
if (GetStackParameters().is_pinned) {
|
|
|
|
// Verify that the current thread isn't terminating.
|
|
|
|
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
|
|
|
Svc::ResultTerminationRequested);
|
|
|
|
|
|
|
|
// Note that the thread was pinned.
|
|
|
|
thread_is_pinned = true;
|
|
|
|
|
|
|
|
// Wait until the thread isn't pinned any more.
|
|
|
|
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
|
|
|
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
|
|
|
|
} else {
|
|
|
|
// If the thread isn't pinned, release the scheduler lock and retry until it's
|
|
|
|
// not current.
|
|
|
|
retry_update = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (retry_update);
|
|
|
|
|
|
|
|
// If the thread was pinned, it no longer is, and we should remove the current thread from
|
|
|
|
// our waiter list.
|
|
|
|
if (thread_is_pinned) {
|
|
|
|
// Lock the scheduler.
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
// Remove from the list.
|
|
|
|
pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
2014-06-02 04:12:54 +02:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
void KThread::SetBasePriority(s32 value) {
|
|
|
|
ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority);
|
2020-12-30 10:14:02 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
KScopedSchedulerLock sl{kernel};
|
2020-12-30 10:14:02 +01:00
|
|
|
|
|
|
|
// Change our base priority.
|
2021-01-20 22:42:27 +01:00
|
|
|
base_priority = value;
|
2020-12-30 10:14:02 +01:00
|
|
|
|
|
|
|
// Perform a priority restoration.
|
|
|
|
RestorePriority(kernel, this);
|
2014-06-02 04:12:54 +02:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
void KThread::RequestSuspend(SuspendType type) {
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
2015-01-17 08:03:44 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// Note the request in our flags.
|
|
|
|
suspend_request_flags |=
|
|
|
|
(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
|
|
|
|
|
|
|
|
// Try to perform the suspend.
|
|
|
|
TrySuspend();
|
2017-01-04 16:53:01 +01:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
void KThread::Resume(SuspendType type) {
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
2020-12-30 10:14:02 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// Clear the request in our flags.
|
|
|
|
suspend_request_flags &=
|
|
|
|
~(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
|
2021-01-10 23:29:02 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// Update our state.
|
2020-12-30 10:14:02 +01:00
|
|
|
const ThreadState old_state = thread_state;
|
2021-01-20 22:42:27 +01:00
|
|
|
thread_state = static_cast<ThreadState>(GetSuspendFlags() |
|
|
|
|
static_cast<u32>(old_state & ThreadState::Mask));
|
2020-12-30 10:14:02 +01:00
|
|
|
if (thread_state != old_state) {
|
|
|
|
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
2018-10-04 00:47:57 +02:00
|
|
|
}
|
2020-12-30 10:14:02 +01:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
void KThread::WaitCancel() {
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
// Check if we're waiting and cancellable.
|
|
|
|
if (GetState() == ThreadState::Waiting && cancellable) {
|
|
|
|
if (sleeping_queue != nullptr) {
|
|
|
|
sleeping_queue->WakeupThread(this);
|
|
|
|
wait_cancelled = true;
|
|
|
|
} else {
|
|
|
|
SetSyncedObject(nullptr, Svc::ResultCancelled);
|
|
|
|
SetState(ThreadState::Runnable);
|
|
|
|
wait_cancelled = false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise, note that we cancelled a wait.
|
|
|
|
wait_cancelled = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::TrySuspend() {
|
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
ASSERT(IsSuspendRequested());
|
|
|
|
|
|
|
|
// Ensure that we have no waiters.
|
|
|
|
if (GetNumKernelWaiters() > 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ASSERT(GetNumKernelWaiters() == 0);
|
|
|
|
|
|
|
|
// Perform the suspend.
|
|
|
|
Suspend();
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::Suspend() {
|
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
ASSERT(IsSuspendRequested());
|
|
|
|
|
|
|
|
// Set our suspend flags in state.
|
|
|
|
const auto old_state = thread_state;
|
|
|
|
thread_state = static_cast<ThreadState>(GetSuspendFlags()) | (old_state & ThreadState::Mask);
|
|
|
|
|
|
|
|
// Note the state change in scheduler.
|
|
|
|
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::Continue() {
|
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
|
|
|
|
// Clear our suspend flags in state.
|
|
|
|
const auto old_state = thread_state;
|
|
|
|
thread_state = old_state & ThreadState::Mask;
|
|
|
|
|
|
|
|
// Note the state change in scheduler.
|
|
|
|
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
|
|
|
// Lock ourselves.
|
|
|
|
KScopedLightLock lk(activity_pause_lock);
|
|
|
|
|
|
|
|
// Set the activity.
|
|
|
|
{
|
|
|
|
// Lock the scheduler.
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
// Verify our state.
|
|
|
|
const auto cur_state = GetState();
|
|
|
|
R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
|
|
|
|
Svc::ResultInvalidState);
|
|
|
|
|
|
|
|
// Either pause or resume.
|
|
|
|
if (activity == Svc::ThreadActivity::Paused) {
|
|
|
|
// Verify that we're not suspended.
|
|
|
|
R_UNLESS(!IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
|
|
|
|
|
|
|
|
// Suspend.
|
|
|
|
RequestSuspend(SuspendType::Thread);
|
|
|
|
} else {
|
|
|
|
ASSERT(activity == Svc::ThreadActivity::Runnable);
|
|
|
|
|
|
|
|
// Verify that we're suspended.
|
|
|
|
R_UNLESS(IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
|
|
|
|
|
|
|
|
// Resume.
|
|
|
|
Resume(SuspendType::Thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the thread is now paused, update the pinned waiter list.
|
|
|
|
if (activity == Svc::ThreadActivity::Paused) {
|
2021-01-25 07:55:08 +01:00
|
|
|
bool thread_is_pinned{};
|
|
|
|
bool thread_is_current{};
|
2021-01-20 22:42:27 +01:00
|
|
|
do {
|
|
|
|
// Lock the scheduler.
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
// Don't do any further management if our termination has been requested.
|
|
|
|
R_SUCCEED_IF(IsTerminationRequested());
|
|
|
|
|
|
|
|
// Check whether the thread is pinned.
|
|
|
|
if (GetStackParameters().is_pinned) {
|
|
|
|
// Verify that the current thread isn't terminating.
|
|
|
|
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
|
|
|
Svc::ResultTerminationRequested);
|
|
|
|
|
|
|
|
// Note that the thread was pinned and not current.
|
|
|
|
thread_is_pinned = true;
|
|
|
|
thread_is_current = false;
|
|
|
|
|
|
|
|
// Wait until the thread isn't pinned any more.
|
|
|
|
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
|
|
|
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
|
|
|
|
} else {
|
|
|
|
// Check if the thread is currently running.
|
|
|
|
// If it is, we'll need to retry.
|
|
|
|
thread_is_current = false;
|
|
|
|
|
|
|
|
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
|
|
|
if (kernel.Scheduler(i).GetCurrentThread() == this) {
|
|
|
|
thread_is_current = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (thread_is_current);
|
|
|
|
|
|
|
|
// If the thread was pinned, it no longer is, and we should remove the current thread from
|
|
|
|
// our waiter list.
|
|
|
|
if (thread_is_pinned) {
|
|
|
|
// Lock the scheduler.
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
// Remove from the list.
|
|
|
|
pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
ResultCode KThread::GetThreadContext3(std::vector<u8>& out) {
|
|
|
|
// Lock ourselves.
|
|
|
|
KScopedLightLock lk{activity_pause_lock};
|
|
|
|
|
|
|
|
// Get the context.
|
|
|
|
{
|
|
|
|
// Lock the scheduler.
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
// Verify that we're suspended.
|
|
|
|
R_UNLESS(IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
|
|
|
|
|
|
|
|
// If we're not terminating, get the thread's user context.
|
|
|
|
if (!IsTerminationRequested()) {
|
|
|
|
if (parent->Is64BitProcess()) {
|
|
|
|
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
|
|
|
|
auto context = GetContext64();
|
|
|
|
context.pstate &= 0xFF0FFE20;
|
|
|
|
|
|
|
|
out.resize(sizeof(context));
|
|
|
|
std::memcpy(out.data(), &context, sizeof(context));
|
|
|
|
} else {
|
|
|
|
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
|
|
|
|
auto context = GetContext32();
|
|
|
|
context.cpsr &= 0xFF0FFE20;
|
|
|
|
|
|
|
|
out.resize(sizeof(context));
|
|
|
|
std::memcpy(out.data(), &context, sizeof(context));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-12-31 08:01:08 +01:00
|
|
|
void KThread::AddWaiterImpl(KThread* thread) {
|
2020-12-30 10:14:02 +01:00
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
2018-10-04 00:47:57 +02:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Find the right spot to insert the waiter.
|
|
|
|
auto it = waiter_list.begin();
|
|
|
|
while (it != waiter_list.end()) {
|
|
|
|
if (it->GetPriority() > thread->GetPriority()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
it++;
|
2019-03-29 22:01:46 +01:00
|
|
|
}
|
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
|
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
|
|
|
ASSERT((num_kernel_waiters++) >= 0);
|
|
|
|
}
|
2020-12-28 22:16:43 +01:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Insert the waiter.
|
|
|
|
waiter_list.insert(it, *thread);
|
|
|
|
thread->SetLockOwner(this);
|
2018-10-04 00:47:57 +02:00
|
|
|
}
|
|
|
|
|
2020-12-31 08:01:08 +01:00
|
|
|
void KThread::RemoveWaiterImpl(KThread* thread) {
|
2020-12-30 10:14:02 +01:00
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
|
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
|
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
|
|
|
ASSERT((num_kernel_waiters--) > 0);
|
2018-08-12 23:35:27 +02:00
|
|
|
}
|
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Remove the waiter.
|
|
|
|
waiter_list.erase(waiter_list.iterator_to(*thread));
|
|
|
|
thread->SetLockOwner(nullptr);
|
|
|
|
}
|
2018-08-12 23:35:27 +02:00
|
|
|
|
2020-12-31 08:01:08 +01:00
|
|
|
void KThread::RestorePriority(KernelCore& kernel, KThread* thread) {
|
2020-12-30 10:14:02 +01:00
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
2019-03-15 06:02:13 +01:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
while (true) {
|
|
|
|
// We want to inherit priority where possible.
|
|
|
|
s32 new_priority = thread->GetBasePriority();
|
|
|
|
if (thread->HasWaiters()) {
|
|
|
|
new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
|
|
|
|
}
|
2019-03-15 06:02:13 +01:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// If the priority we would inherit is not different from ours, don't do anything.
|
|
|
|
if (new_priority == thread->GetPriority()) {
|
|
|
|
return;
|
|
|
|
}
|
2018-04-21 03:15:16 +02:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Ensure we don't violate condition variable red black tree invariants.
|
|
|
|
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
|
|
|
BeforeUpdatePriority(kernel, cv_tree, thread);
|
|
|
|
}
|
2018-08-12 23:35:27 +02:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Change the priority.
|
|
|
|
const s32 old_priority = thread->GetPriority();
|
|
|
|
thread->SetPriority(new_priority);
|
2019-03-15 06:02:13 +01:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Restore the condition variable, if relevant.
|
|
|
|
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
|
|
|
AfterUpdatePriority(kernel, cv_tree, thread);
|
|
|
|
}
|
2018-08-12 23:35:27 +02:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Update the scheduler.
|
|
|
|
KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
|
2018-04-21 03:15:16 +02:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Keep the lock owner up to date.
|
2020-12-31 08:01:08 +01:00
|
|
|
KThread* lock_owner = thread->GetLockOwner();
|
2020-12-30 10:14:02 +01:00
|
|
|
if (lock_owner == nullptr) {
|
|
|
|
return;
|
2019-03-15 02:51:03 +01:00
|
|
|
}
|
2018-04-21 03:15:16 +02:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Update the thread in the lock owner's sorted list, and continue inheriting.
|
|
|
|
lock_owner->RemoveWaiterImpl(thread);
|
|
|
|
lock_owner->AddWaiterImpl(thread);
|
|
|
|
thread = lock_owner;
|
2019-03-15 02:51:03 +01:00
|
|
|
}
|
2020-12-30 10:14:02 +01:00
|
|
|
}
|
2018-04-21 03:15:16 +02:00
|
|
|
|
2020-12-31 08:01:08 +01:00
|
|
|
void KThread::AddWaiter(KThread* thread) {
|
2020-12-30 10:14:02 +01:00
|
|
|
AddWaiterImpl(thread);
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
}
|
2019-11-15 01:13:18 +01:00
|
|
|
|
2020-12-31 08:01:08 +01:00
|
|
|
void KThread::RemoveWaiter(KThread* thread) {
|
2020-12-30 10:14:02 +01:00
|
|
|
RemoveWaiterImpl(thread);
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
}
|
2018-04-21 03:15:16 +02:00
|
|
|
|
2020-12-31 08:01:08 +01:00
|
|
|
KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
|
2020-12-30 10:14:02 +01:00
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
2019-11-15 01:13:18 +01:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
s32 num_waiters{};
|
2020-12-31 08:01:08 +01:00
|
|
|
KThread* next_lock_owner{};
|
2020-12-30 10:14:02 +01:00
|
|
|
auto it = waiter_list.begin();
|
|
|
|
while (it != waiter_list.end()) {
|
|
|
|
if (it->GetAddressKey() == key) {
|
2020-12-31 08:01:08 +01:00
|
|
|
KThread* thread = std::addressof(*it);
|
2020-12-30 10:14:02 +01:00
|
|
|
|
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
|
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
|
|
|
ASSERT((num_kernel_waiters--) > 0);
|
|
|
|
}
|
|
|
|
it = waiter_list.erase(it);
|
|
|
|
|
|
|
|
// Update the next lock owner.
|
|
|
|
if (next_lock_owner == nullptr) {
|
|
|
|
next_lock_owner = thread;
|
|
|
|
next_lock_owner->SetLockOwner(nullptr);
|
|
|
|
} else {
|
|
|
|
next_lock_owner->AddWaiterImpl(thread);
|
|
|
|
}
|
|
|
|
num_waiters++;
|
|
|
|
} else {
|
|
|
|
it++;
|
|
|
|
}
|
2019-03-15 06:02:13 +01:00
|
|
|
}
|
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Do priority updates, if we have a next owner.
|
|
|
|
if (next_lock_owner) {
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
RestorePriority(kernel, next_lock_owner);
|
|
|
|
}
|
2019-03-15 06:02:13 +01:00
|
|
|
|
2020-12-30 10:14:02 +01:00
|
|
|
// Return output.
|
|
|
|
*out_num_waiters = num_waiters;
|
|
|
|
return next_lock_owner;
|
2018-04-21 03:15:16 +02:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
ResultCode KThread::Run() {
|
|
|
|
while (true) {
|
|
|
|
KScopedSchedulerLock lk{kernel};
|
2020-03-07 17:44:35 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// If either this thread or the current thread are requesting termination, note it.
|
|
|
|
R_UNLESS(!IsTerminationRequested(), Svc::ResultTerminationRequested);
|
|
|
|
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
|
|
|
Svc::ResultTerminationRequested);
|
2020-03-07 17:44:35 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// Ensure our thread state is correct.
|
|
|
|
R_UNLESS(GetState() == ThreadState::Initialized, Svc::ResultInvalidState);
|
2020-03-07 17:44:35 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// If the current thread has been asked to suspend, suspend it and retry.
|
|
|
|
if (GetCurrentThread(kernel).IsSuspended()) {
|
|
|
|
GetCurrentThread(kernel).Suspend();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're not a kernel thread and we've been asked to suspend, suspend ourselves.
|
|
|
|
if (IsUserThread() && IsSuspended()) {
|
|
|
|
Suspend();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set our state and finish.
|
|
|
|
SetState(ThreadState::Runnable);
|
2020-03-07 17:44:35 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
2021-01-20 22:42:27 +01:00
|
|
|
}
|
2018-12-03 18:25:27 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
void KThread::Exit() {
|
|
|
|
ASSERT(this == GetCurrentThreadPointer(kernel));
|
|
|
|
|
|
|
|
// Release the thread resource hint from parent.
|
|
|
|
if (parent != nullptr) {
|
|
|
|
// TODO(bunnei): Hint that the resource is about to be released.
|
|
|
|
resource_limit_release_hint = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform termination.
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
// Disallow all suspension.
|
|
|
|
suspend_allowed_flags = 0;
|
|
|
|
|
|
|
|
// Start termination.
|
|
|
|
StartTermination();
|
2018-12-03 18:25:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
ResultCode KThread::Sleep(s64 timeout) {
|
|
|
|
ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
ASSERT(this == GetCurrentThreadPointer(kernel));
|
|
|
|
ASSERT(timeout > 0);
|
|
|
|
|
2020-02-25 03:04:12 +01:00
|
|
|
{
|
2021-01-20 22:42:27 +01:00
|
|
|
// Setup the scheduling lock and sleep.
|
|
|
|
KScopedSchedulerLockAndSleep slp{kernel, this, timeout};
|
|
|
|
|
|
|
|
// Check if the thread should terminate.
|
|
|
|
if (IsTerminationRequested()) {
|
|
|
|
slp.CancelSleep();
|
|
|
|
return Svc::ResultTerminationRequested;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark the thread as waiting.
|
2020-12-28 22:16:43 +01:00
|
|
|
SetState(ThreadState::Waiting);
|
2021-01-10 23:29:02 +01:00
|
|
|
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
2020-02-25 03:04:12 +01:00
|
|
|
}
|
2019-03-16 04:28:29 +01:00
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
// The lock/sleep is done.
|
|
|
|
|
|
|
|
// Cancel the timer.
|
|
|
|
kernel.TimeManager().UnscheduleTimeEvent(this);
|
|
|
|
|
2020-02-25 17:40:33 +01:00
|
|
|
return RESULT_SUCCESS;
|
2019-03-16 04:28:29 +01:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
void KThread::SetState(ThreadState state) {
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
// Clear debugging state
|
|
|
|
SetMutexWaitAddressForDebugging({});
|
|
|
|
SetWaitReasonForDebugging({});
|
|
|
|
|
|
|
|
const ThreadState old_state = thread_state;
|
|
|
|
thread_state =
|
|
|
|
static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
|
|
|
|
if (thread_state != old_state) {
|
|
|
|
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
|
|
|
}
|
2020-03-07 17:44:35 +01:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
|
|
|
|
return host_context;
|
2020-03-07 17:44:35 +01:00
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
|
|
|
|
std::string name, VAddr entry_point,
|
|
|
|
u32 priority, u64 arg, s32 processor_id,
|
|
|
|
VAddr stack_top, Process* owner_process) {
|
|
|
|
std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
|
|
|
|
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
|
|
|
return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
|
|
|
|
owner_process, std::move(init_func), init_func_parameter);
|
|
|
|
}
|
|
|
|
|
|
|
|
ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
|
|
|
|
std::string name, VAddr entry_point,
|
|
|
|
u32 priority, u64 arg, s32 processor_id,
|
|
|
|
VAddr stack_top, Process* owner_process,
|
|
|
|
std::function<void(void*)>&& thread_start_func,
|
|
|
|
void* thread_start_parameter) {
|
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
|
|
|
|
std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel);
|
|
|
|
|
2021-01-21 03:10:07 +01:00
|
|
|
if (const auto result =
|
|
|
|
thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority,
|
|
|
|
processor_id, owner_process, type_flags);
|
|
|
|
result.IsError()) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-01-20 22:42:27 +01:00
|
|
|
thread->name = name;
|
|
|
|
|
|
|
|
auto& scheduler = kernel.GlobalSchedulerContext();
|
|
|
|
scheduler.AddThread(thread);
|
|
|
|
|
|
|
|
thread->host_context =
|
|
|
|
std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
|
|
|
|
|
|
|
|
return MakeResult<std::shared_ptr<KThread>>(std::move(thread));
|
|
|
|
}
|
|
|
|
|
|
|
|
KThread* GetCurrentThreadPointer(KernelCore& kernel) {
|
2021-01-21 22:00:16 +01:00
|
|
|
return kernel.GetCurrentEmuThread();
|
2021-01-20 22:42:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
KThread& GetCurrentThread(KernelCore& kernel) {
|
|
|
|
return *GetCurrentThreadPointer(kernel);
|
|
|
|
}
|
|
|
|
|
|
|
|
s32 GetCurrentCoreId(KernelCore& kernel) {
|
|
|
|
return GetCurrentThread(kernel).GetCurrentCore();
|
2019-03-29 22:01:46 +01:00
|
|
|
}
|
|
|
|
|
2018-01-08 17:35:03 +01:00
|
|
|
} // namespace Kernel
|