1
0
Fork 0
forked from suyu/suyu

Merge pull request #1439 from lioncash/thread

kernel/thread: Make all instance variables private
This commit is contained in:
bunnei 2018-10-05 13:41:54 -04:00 committed by GitHub
commit e51d715700
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 418 additions and 227 deletions

View file

@ -86,7 +86,7 @@ public:
parent.jit->HaltExecution(); parent.jit->HaltExecution();
parent.SetPC(pc); parent.SetPC(pc);
Kernel::Thread* thread = Kernel::GetCurrentThread(); Kernel::Thread* thread = Kernel::GetCurrentThread();
parent.SaveContext(thread->context); parent.SaveContext(thread->GetContext());
GDBStub::Break(); GDBStub::Break();
GDBStub::SendTrap(thread, 5); GDBStub::SendTrap(thread, 5);
return; return;

View file

@ -195,7 +195,7 @@ void ARM_Unicorn::ExecuteInstructions(int num_instructions) {
uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address); uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address);
} }
Kernel::Thread* thread = Kernel::GetCurrentThread(); Kernel::Thread* thread = Kernel::GetCurrentThread();
SaveContext(thread->context); SaveContext(thread->GetContext());
if (last_bkpt_hit || GDBStub::GetCpuStepFlag()) { if (last_bkpt_hit || GDBStub::GetCpuStepFlag()) {
last_bkpt_hit = false; last_bkpt_hit = false;
GDBStub::Break(); GDBStub::Break();

View file

@ -209,7 +209,7 @@ static Kernel::Thread* FindThreadById(int id) {
for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) { for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) {
const auto& threads = Core::System::GetInstance().Scheduler(core)->GetThreadList(); const auto& threads = Core::System::GetInstance().Scheduler(core)->GetThreadList();
for (auto& thread : threads) { for (auto& thread : threads) {
if (thread->GetThreadId() == static_cast<u32>(id)) { if (thread->GetThreadID() == static_cast<u32>(id)) {
current_core = core; current_core = core;
return thread.get(); return thread.get();
} }
@ -223,16 +223,18 @@ static u64 RegRead(std::size_t id, Kernel::Thread* thread = nullptr) {
return 0; return 0;
} }
const auto& thread_context = thread->GetContext();
if (id < SP_REGISTER) { if (id < SP_REGISTER) {
return thread->context.cpu_registers[id]; return thread_context.cpu_registers[id];
} else if (id == SP_REGISTER) { } else if (id == SP_REGISTER) {
return thread->context.sp; return thread_context.sp;
} else if (id == PC_REGISTER) { } else if (id == PC_REGISTER) {
return thread->context.pc; return thread_context.pc;
} else if (id == PSTATE_REGISTER) { } else if (id == PSTATE_REGISTER) {
return thread->context.pstate; return thread_context.pstate;
} else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) { } else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) {
return thread->context.vector_registers[id - UC_ARM64_REG_Q0][0]; return thread_context.vector_registers[id - UC_ARM64_REG_Q0][0];
} else { } else {
return 0; return 0;
} }
@ -243,16 +245,18 @@ static void RegWrite(std::size_t id, u64 val, Kernel::Thread* thread = nullptr)
return; return;
} }
auto& thread_context = thread->GetContext();
if (id < SP_REGISTER) { if (id < SP_REGISTER) {
thread->context.cpu_registers[id] = val; thread_context.cpu_registers[id] = val;
} else if (id == SP_REGISTER) { } else if (id == SP_REGISTER) {
thread->context.sp = val; thread_context.sp = val;
} else if (id == PC_REGISTER) { } else if (id == PC_REGISTER) {
thread->context.pc = val; thread_context.pc = val;
} else if (id == PSTATE_REGISTER) { } else if (id == PSTATE_REGISTER) {
thread->context.pstate = static_cast<u32>(val); thread_context.pstate = static_cast<u32>(val);
} else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) { } else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) {
thread->context.vector_registers[id - (PSTATE_REGISTER + 1)][0] = val; thread_context.vector_registers[id - (PSTATE_REGISTER + 1)][0] = val;
} }
} }
@ -595,7 +599,7 @@ static void HandleQuery() {
for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) { for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) {
const auto& threads = Core::System::GetInstance().Scheduler(core)->GetThreadList(); const auto& threads = Core::System::GetInstance().Scheduler(core)->GetThreadList();
for (const auto& thread : threads) { for (const auto& thread : threads) {
val += fmt::format("{:x}", thread->GetThreadId()); val += fmt::format("{:x}", thread->GetThreadID());
val += ","; val += ",";
} }
} }
@ -612,7 +616,7 @@ static void HandleQuery() {
for (const auto& thread : threads) { for (const auto& thread : threads) {
buffer += buffer +=
fmt::format(R"*(<thread id="{:x}" core="{:d}" name="Thread {:x}"></thread>)*", fmt::format(R"*(<thread id="{:x}" core="{:d}" name="Thread {:x}"></thread>)*",
thread->GetThreadId(), core, thread->GetThreadId()); thread->GetThreadID(), core, thread->GetThreadID());
} }
} }
buffer += "</threads>"; buffer += "</threads>";
@ -693,7 +697,7 @@ static void SendSignal(Kernel::Thread* thread, u32 signal, bool full = true) {
} }
if (thread) { if (thread) {
buffer += fmt::format(";thread:{:x};", thread->GetThreadId()); buffer += fmt::format(";thread:{:x};", thread->GetThreadID());
} }
SendReply(buffer.c_str()); SendReply(buffer.c_str());
@ -857,7 +861,9 @@ static void WriteRegister() {
} }
// Update Unicorn context skipping scheduler, no running threads at this point // Update Unicorn context skipping scheduler, no running threads at this point
Core::System::GetInstance().ArmInterface(current_core).LoadContext(current_thread->context); Core::System::GetInstance()
.ArmInterface(current_core)
.LoadContext(current_thread->GetContext());
SendReply("OK"); SendReply("OK");
} }
@ -886,7 +892,9 @@ static void WriteRegisters() {
} }
// Update Unicorn context skipping scheduler, no running threads at this point // Update Unicorn context skipping scheduler, no running threads at this point
Core::System::GetInstance().ArmInterface(current_core).LoadContext(current_thread->context); Core::System::GetInstance()
.ArmInterface(current_core)
.LoadContext(current_thread->GetContext());
SendReply("OK"); SendReply("OK");
} }
@ -960,7 +968,9 @@ static void Step() {
if (command_length > 1) { if (command_length > 1) {
RegWrite(PC_REGISTER, GdbHexToLong(command_buffer + 1), current_thread); RegWrite(PC_REGISTER, GdbHexToLong(command_buffer + 1), current_thread);
// Update Unicorn context skipping scheduler, no running threads at this point // Update Unicorn context skipping scheduler, no running threads at this point
Core::System::GetInstance().ArmInterface(current_core).LoadContext(current_thread->context); Core::System::GetInstance()
.ArmInterface(current_core)
.LoadContext(current_thread->GetContext());
} }
step_loop = true; step_loop = true;
halt_loop = true; halt_loop = true;

View file

@ -23,13 +23,13 @@ namespace AddressArbiter {
// Performs actual address waiting logic. // Performs actual address waiting logic.
static ResultCode WaitForAddress(VAddr address, s64 timeout) { static ResultCode WaitForAddress(VAddr address, s64 timeout) {
SharedPtr<Thread> current_thread = GetCurrentThread(); SharedPtr<Thread> current_thread = GetCurrentThread();
current_thread->arb_wait_address = address; current_thread->SetArbiterWaitAddress(address);
current_thread->status = ThreadStatus::WaitArb; current_thread->SetStatus(ThreadStatus::WaitArb);
current_thread->wakeup_callback = nullptr; current_thread->InvalidateWakeupCallback();
current_thread->WakeAfterDelay(timeout); current_thread->WakeAfterDelay(timeout);
Core::System::GetInstance().CpuCore(current_thread->processor_id).PrepareReschedule(); Core::System::GetInstance().CpuCore(current_thread->GetProcessorID()).PrepareReschedule();
return RESULT_TIMEOUT; return RESULT_TIMEOUT;
} }
@ -39,10 +39,10 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address)
std::vector<SharedPtr<Thread>>& waiting_threads, std::vector<SharedPtr<Thread>>& waiting_threads,
VAddr arb_addr) { VAddr arb_addr) {
const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
auto& thread_list = scheduler->GetThreadList(); const auto& thread_list = scheduler->GetThreadList();
for (auto& thread : thread_list) { for (const auto& thread : thread_list) {
if (thread->arb_wait_address == arb_addr) if (thread->GetArbiterWaitAddress() == arb_addr)
waiting_threads.push_back(thread); waiting_threads.push_back(thread);
} }
}; };
@ -57,7 +57,7 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address)
// Sort them by priority, such that the highest priority ones come first. // Sort them by priority, such that the highest priority ones come first.
std::sort(threads.begin(), threads.end(), std::sort(threads.begin(), threads.end(),
[](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
return lhs->current_priority < rhs->current_priority; return lhs->GetPriority() < rhs->GetPriority();
}); });
return threads; return threads;
@ -73,9 +73,9 @@ static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num
// Signal the waiting threads. // Signal the waiting threads.
for (std::size_t i = 0; i < last; i++) { for (std::size_t i = 0; i < last; i++) {
ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb); ASSERT(waiting_threads[i]->GetStatus() == ThreadStatus::WaitArb);
waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS); waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
waiting_threads[i]->arb_wait_address = 0; waiting_threads[i]->SetArbiterWaitAddress(0);
waiting_threads[i]->ResumeFromWait(); waiting_threads[i]->ResumeFromWait();
} }
} }

View file

@ -42,14 +42,14 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
Kernel::SharedPtr<Kernel::Event> event) { Kernel::SharedPtr<Kernel::Event> event) {
// Put the client thread to sleep until the wait event is signaled or the timeout expires. // Put the client thread to sleep until the wait event is signaled or the timeout expires.
thread->wakeup_callback = [context = *this, callback]( thread->SetWakeupCallback([context = *this, callback](
ThreadWakeupReason reason, SharedPtr<Thread> thread, ThreadWakeupReason reason, SharedPtr<Thread> thread,
SharedPtr<WaitObject> object, std::size_t index) mutable -> bool { SharedPtr<WaitObject> object, std::size_t index) mutable -> bool {
ASSERT(thread->status == ThreadStatus::WaitHLEEvent); ASSERT(thread->GetStatus() == ThreadStatus::WaitHLEEvent);
callback(thread, context, reason); callback(thread, context, reason);
context.WriteToOutgoingCommandBuffer(*thread); context.WriteToOutgoingCommandBuffer(*thread);
return true; return true;
}; });
if (!event) { if (!event) {
// Create event if not provided // Create event if not provided
@ -59,8 +59,8 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
} }
event->Clear(); event->Clear();
thread->status = ThreadStatus::WaitHLEEvent; thread->SetStatus(ThreadStatus::WaitHLEEvent);
thread->wait_objects = {event}; thread->SetWaitObjects({event});
event->AddWaitingThread(thread); event->AddWaitingThread(thread);
if (timeout > 0) { if (timeout > 0) {
@ -209,7 +209,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) { ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) {
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmdbuf; std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmdbuf;
Memory::ReadBlock(*thread.owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(), Memory::ReadBlock(*thread.GetOwnerProcess(), thread.GetTLSAddress(), dst_cmdbuf.data(),
dst_cmdbuf.size() * sizeof(u32)); dst_cmdbuf.size() * sizeof(u32));
// The header was already built in the internal command buffer. Attempt to parse it to verify // The header was already built in the internal command buffer. Attempt to parse it to verify
@ -268,7 +268,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
} }
// Copy the translated command buffer back into the thread's command buffer area. // Copy the translated command buffer back into the thread's command buffer area.
Memory::WriteBlock(*thread.owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(), Memory::WriteBlock(*thread.GetOwnerProcess(), thread.GetTLSAddress(), dst_cmdbuf.data(),
dst_cmdbuf.size() * sizeof(u32)); dst_cmdbuf.size() * sizeof(u32));
return RESULT_SUCCESS; return RESULT_SUCCESS;

View file

@ -46,40 +46,40 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_
bool resume = true; bool resume = true;
if (thread->status == ThreadStatus::WaitSynchAny || if (thread->GetStatus() == ThreadStatus::WaitSynchAny ||
thread->status == ThreadStatus::WaitSynchAll || thread->GetStatus() == ThreadStatus::WaitSynchAll ||
thread->status == ThreadStatus::WaitHLEEvent) { thread->GetStatus() == ThreadStatus::WaitHLEEvent) {
// Remove the thread from each of its waiting objects' waitlists // Remove the thread from each of its waiting objects' waitlists
for (auto& object : thread->wait_objects) { for (const auto& object : thread->GetWaitObjects()) {
object->RemoveWaitingThread(thread.get()); object->RemoveWaitingThread(thread.get());
} }
thread->wait_objects.clear(); thread->ClearWaitObjects();
// Invoke the wakeup callback before clearing the wait objects // Invoke the wakeup callback before clearing the wait objects
if (thread->wakeup_callback) { if (thread->HasWakeupCallback()) {
resume = thread->wakeup_callback(ThreadWakeupReason::Timeout, thread, nullptr, 0); resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0);
} }
} }
if (thread->mutex_wait_address != 0 || thread->condvar_wait_address != 0 || if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 ||
thread->wait_handle) { thread->GetWaitHandle() != 0) {
ASSERT(thread->status == ThreadStatus::WaitMutex); ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
thread->mutex_wait_address = 0; thread->SetMutexWaitAddress(0);
thread->condvar_wait_address = 0; thread->SetCondVarWaitAddress(0);
thread->wait_handle = 0; thread->SetWaitHandle(0);
auto lock_owner = thread->lock_owner; auto* const lock_owner = thread->GetLockOwner();
// Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance // Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance
// and don't have a lock owner unless SignalProcessWideKey was called first and the thread // and don't have a lock owner unless SignalProcessWideKey was called first and the thread
// wasn't awakened due to the mutex already being acquired. // wasn't awakened due to the mutex already being acquired.
if (lock_owner) { if (lock_owner != nullptr) {
lock_owner->RemoveMutexWaiter(thread); lock_owner->RemoveMutexWaiter(thread);
} }
} }
if (thread->arb_wait_address != 0) { if (thread->GetArbiterWaitAddress() != 0) {
ASSERT(thread->status == ThreadStatus::WaitArb); ASSERT(thread->GetStatus() == ThreadStatus::WaitArb);
thread->arb_wait_address = 0; thread->SetArbiterWaitAddress(0);
} }
if (resume) { if (resume) {

View file

@ -28,11 +28,11 @@ static std::pair<SharedPtr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
SharedPtr<Thread> highest_priority_thread; SharedPtr<Thread> highest_priority_thread;
u32 num_waiters = 0; u32 num_waiters = 0;
for (auto& thread : current_thread->wait_mutex_threads) { for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
if (thread->mutex_wait_address != mutex_addr) if (thread->GetMutexWaitAddress() != mutex_addr)
continue; continue;
ASSERT(thread->status == ThreadStatus::WaitMutex); ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
++num_waiters; ++num_waiters;
if (highest_priority_thread == nullptr || if (highest_priority_thread == nullptr ||
@ -47,12 +47,12 @@ static std::pair<SharedPtr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner. /// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_thread, static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_thread,
SharedPtr<Thread> new_owner) { SharedPtr<Thread> new_owner) {
auto threads = current_thread->wait_mutex_threads; const auto& threads = current_thread->GetMutexWaitingThreads();
for (auto& thread : threads) { for (const auto& thread : threads) {
if (thread->mutex_wait_address != mutex_addr) if (thread->GetMutexWaitAddress() != mutex_addr)
continue; continue;
ASSERT(thread->lock_owner == current_thread); ASSERT(thread->GetLockOwner() == current_thread);
current_thread->RemoveMutexWaiter(thread); current_thread->RemoveMutexWaiter(thread);
if (new_owner != thread) if (new_owner != thread)
new_owner->AddMutexWaiter(thread); new_owner->AddMutexWaiter(thread);
@ -84,11 +84,11 @@ ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle ho
return ERR_INVALID_HANDLE; return ERR_INVALID_HANDLE;
// Wait until the mutex is released // Wait until the mutex is released
GetCurrentThread()->mutex_wait_address = address; GetCurrentThread()->SetMutexWaitAddress(address);
GetCurrentThread()->wait_handle = requesting_thread_handle; GetCurrentThread()->SetWaitHandle(requesting_thread_handle);
GetCurrentThread()->status = ThreadStatus::WaitMutex; GetCurrentThread()->SetStatus(ThreadStatus::WaitMutex);
GetCurrentThread()->wakeup_callback = nullptr; GetCurrentThread()->InvalidateWakeupCallback();
// Update the lock holder thread's priority to prevent priority inversion. // Update the lock holder thread's priority to prevent priority inversion.
holding_thread->AddMutexWaiter(GetCurrentThread()); holding_thread->AddMutexWaiter(GetCurrentThread());
@ -115,7 +115,7 @@ ResultCode Mutex::Release(VAddr address) {
// Transfer the ownership of the mutex from the previous owner to the new one. // Transfer the ownership of the mutex from the previous owner to the new one.
TransferMutexOwnership(address, GetCurrentThread(), thread); TransferMutexOwnership(address, GetCurrentThread(), thread);
u32 mutex_value = thread->wait_handle; u32 mutex_value = thread->GetWaitHandle();
if (num_waiters >= 2) { if (num_waiters >= 2) {
// Notify the guest that there are still some threads waiting for the mutex // Notify the guest that there are still some threads waiting for the mutex
@ -125,13 +125,13 @@ ResultCode Mutex::Release(VAddr address) {
// Grant the mutex to the next waiting thread and resume it. // Grant the mutex to the next waiting thread and resume it.
Memory::Write32(address, mutex_value); Memory::Write32(address, mutex_value);
ASSERT(thread->status == ThreadStatus::WaitMutex); ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
thread->ResumeFromWait(); thread->ResumeFromWait();
thread->lock_owner = nullptr; thread->SetLockOwner(nullptr);
thread->condvar_wait_address = 0; thread->SetCondVarWaitAddress(0);
thread->mutex_wait_address = 0; thread->SetMutexWaitAddress(0);
thread->wait_handle = 0; thread->SetWaitHandle(0);
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }

View file

@ -144,15 +144,15 @@ void Process::PrepareForTermination() {
const auto stop_threads = [this](const std::vector<SharedPtr<Thread>>& thread_list) { const auto stop_threads = [this](const std::vector<SharedPtr<Thread>>& thread_list) {
for (auto& thread : thread_list) { for (auto& thread : thread_list) {
if (thread->owner_process != this) if (thread->GetOwnerProcess() != this)
continue; continue;
if (thread == GetCurrentThread()) if (thread == GetCurrentThread())
continue; continue;
// TODO(Subv): When are the other running/ready threads terminated? // TODO(Subv): When are the other running/ready threads terminated?
ASSERT_MSG(thread->status == ThreadStatus::WaitSynchAny || ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynchAny ||
thread->status == ThreadStatus::WaitSynchAll, thread->GetStatus() == ThreadStatus::WaitSynchAll,
"Exiting processes with non-waiting threads is currently unimplemented"); "Exiting processes with non-waiting threads is currently unimplemented");
thread->Stop(); thread->Stop();

View file

@ -38,10 +38,10 @@ Thread* Scheduler::PopNextReadyThread() {
Thread* next = nullptr; Thread* next = nullptr;
Thread* thread = GetCurrentThread(); Thread* thread = GetCurrentThread();
if (thread && thread->status == ThreadStatus::Running) { if (thread && thread->GetStatus() == ThreadStatus::Running) {
// We have to do better than the current thread. // We have to do better than the current thread.
// This call returns null when that's not possible. // This call returns null when that's not possible.
next = ready_queue.pop_first_better(thread->current_priority); next = ready_queue.pop_first_better(thread->GetPriority());
if (!next) { if (!next) {
// Otherwise just keep going with the current thread // Otherwise just keep going with the current thread
next = thread; next = thread;
@ -58,22 +58,21 @@ void Scheduler::SwitchContext(Thread* new_thread) {
// Save context for previous thread // Save context for previous thread
if (previous_thread) { if (previous_thread) {
previous_thread->last_running_ticks = CoreTiming::GetTicks(); cpu_core.SaveContext(previous_thread->GetContext());
cpu_core.SaveContext(previous_thread->context);
// Save the TPIDR_EL0 system register in case it was modified. // Save the TPIDR_EL0 system register in case it was modified.
previous_thread->tpidr_el0 = cpu_core.GetTPIDR_EL0(); previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
if (previous_thread->status == ThreadStatus::Running) { if (previous_thread->GetStatus() == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread // This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc) // yielding execution (i.e. an event triggered, system core time-sliced, etc)
ready_queue.push_front(previous_thread->current_priority, previous_thread); ready_queue.push_front(previous_thread->GetPriority(), previous_thread);
previous_thread->status = ThreadStatus::Ready; previous_thread->SetStatus(ThreadStatus::Ready);
} }
} }
// Load context of new thread // Load context of new thread
if (new_thread) { if (new_thread) {
ASSERT_MSG(new_thread->status == ThreadStatus::Ready, ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready,
"Thread must be ready to become running."); "Thread must be ready to become running.");
// Cancel any outstanding wakeup events for this thread // Cancel any outstanding wakeup events for this thread
@ -83,15 +82,16 @@ void Scheduler::SwitchContext(Thread* new_thread) {
current_thread = new_thread; current_thread = new_thread;
ready_queue.remove(new_thread->current_priority, new_thread); ready_queue.remove(new_thread->GetPriority(), new_thread);
new_thread->status = ThreadStatus::Running; new_thread->SetStatus(ThreadStatus::Running);
if (previous_process != current_thread->owner_process) { const auto thread_owner_process = current_thread->GetOwnerProcess();
Core::CurrentProcess() = current_thread->owner_process; if (previous_process != thread_owner_process) {
Core::CurrentProcess() = thread_owner_process;
SetCurrentPageTable(&Core::CurrentProcess()->VMManager().page_table); SetCurrentPageTable(&Core::CurrentProcess()->VMManager().page_table);
} }
cpu_core.LoadContext(new_thread->context); cpu_core.LoadContext(new_thread->GetContext());
cpu_core.SetTlsAddress(new_thread->GetTLSAddress()); cpu_core.SetTlsAddress(new_thread->GetTLSAddress());
cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
cpu_core.ClearExclusiveState(); cpu_core.ClearExclusiveState();
@ -136,14 +136,14 @@ void Scheduler::RemoveThread(Thread* thread) {
void Scheduler::ScheduleThread(Thread* thread, u32 priority) { void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex); std::lock_guard<std::mutex> lock(scheduler_mutex);
ASSERT(thread->status == ThreadStatus::Ready); ASSERT(thread->GetStatus() == ThreadStatus::Ready);
ready_queue.push_back(priority, thread); ready_queue.push_back(priority, thread);
} }
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex); std::lock_guard<std::mutex> lock(scheduler_mutex);
ASSERT(thread->status == ThreadStatus::Ready); ASSERT(thread->GetStatus() == ThreadStatus::Ready);
ready_queue.remove(priority, thread); ready_queue.remove(priority, thread);
} }
@ -151,8 +151,8 @@ void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex); std::lock_guard<std::mutex> lock(scheduler_mutex);
// If thread was ready, adjust queues // If thread was ready, adjust queues
if (thread->status == ThreadStatus::Ready) if (thread->GetStatus() == ThreadStatus::Ready)
ready_queue.move(thread, thread->current_priority, priority); ready_queue.move(thread, thread->GetPriority(), priority);
else else
ready_queue.prepare(priority); ready_queue.prepare(priority);
} }

View file

@ -120,10 +120,10 @@ ResultCode ServerSession::HandleSyncRequest(SharedPtr<Thread> thread) {
result = hle_handler->HandleSyncRequest(context); result = hle_handler->HandleSyncRequest(context);
} }
if (thread->status == ThreadStatus::Running) { if (thread->GetStatus() == ThreadStatus::Running) {
// Put the thread to sleep until the server replies, it will be awoken in // Put the thread to sleep until the server replies, it will be awoken in
// svcReplyAndReceive for LLE servers. // svcReplyAndReceive for LLE servers.
thread->status = ThreadStatus::WaitIPC; thread->SetStatus(ThreadStatus::WaitIPC);
if (hle_handler != nullptr) { if (hle_handler != nullptr) {
// For HLE services, we put the request threads to sleep for a short duration to // For HLE services, we put the request threads to sleep for a short duration to

View file

@ -156,7 +156,7 @@ static ResultCode GetThreadId(u32* thread_id, Handle thread_handle) {
return ERR_INVALID_HANDLE; return ERR_INVALID_HANDLE;
} }
*thread_id = thread->GetThreadId(); *thread_id = thread->GetThreadID();
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
@ -177,7 +177,7 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
/// Default thread wakeup callback for WaitSynchronization /// Default thread wakeup callback for WaitSynchronization
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread, static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
SharedPtr<WaitObject> object, std::size_t index) { SharedPtr<WaitObject> object, std::size_t index) {
ASSERT(thread->status == ThreadStatus::WaitSynchAny); ASSERT(thread->GetStatus() == ThreadStatus::WaitSynchAny);
if (reason == ThreadWakeupReason::Timeout) { if (reason == ThreadWakeupReason::Timeout) {
thread->SetWaitSynchronizationResult(RESULT_TIMEOUT); thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
@ -204,10 +204,10 @@ static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64
if (handle_count > MaxHandles) if (handle_count > MaxHandles)
return ResultCode(ErrorModule::Kernel, ErrCodes::TooLarge); return ResultCode(ErrorModule::Kernel, ErrCodes::TooLarge);
auto thread = GetCurrentThread(); auto* const thread = GetCurrentThread();
using ObjectPtr = SharedPtr<WaitObject>; using ObjectPtr = Thread::ThreadWaitObjects::value_type;
std::vector<ObjectPtr> objects(handle_count); Thread::ThreadWaitObjects objects(handle_count);
auto& kernel = Core::System::GetInstance().Kernel(); auto& kernel = Core::System::GetInstance().Kernel();
for (u64 i = 0; i < handle_count; ++i) { for (u64 i = 0; i < handle_count; ++i) {
@ -244,14 +244,14 @@ static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64
for (auto& object : objects) for (auto& object : objects)
object->AddWaitingThread(thread); object->AddWaitingThread(thread);
thread->wait_objects = std::move(objects); thread->SetWaitObjects(std::move(objects));
thread->status = ThreadStatus::WaitSynchAny; thread->SetStatus(ThreadStatus::WaitSynchAny);
// Create an event to wake the thread up after the specified nanosecond delay has passed // Create an event to wake the thread up after the specified nanosecond delay has passed
thread->WakeAfterDelay(nano_seconds); thread->WakeAfterDelay(nano_seconds);
thread->wakeup_callback = DefaultThreadWakeupCallback; thread->SetWakeupCallback(DefaultThreadWakeupCallback);
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule(); Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
return RESULT_TIMEOUT; return RESULT_TIMEOUT;
} }
@ -266,7 +266,7 @@ static ResultCode CancelSynchronization(Handle thread_handle) {
return ERR_INVALID_HANDLE; return ERR_INVALID_HANDLE;
} }
ASSERT(thread->status == ThreadStatus::WaitSynchAny); ASSERT(thread->GetStatus() == ThreadStatus::WaitSynchAny);
thread->SetWaitSynchronizationResult( thread->SetWaitSynchronizationResult(
ResultCode(ErrorModule::Kernel, ErrCodes::SynchronizationCanceled)); ResultCode(ErrorModule::Kernel, ErrCodes::SynchronizationCanceled));
thread->ResumeFromWait(); thread->ResumeFromWait();
@ -425,7 +425,7 @@ static ResultCode GetThreadContext(VAddr thread_context, Handle handle) {
} }
const auto current_process = Core::CurrentProcess(); const auto current_process = Core::CurrentProcess();
if (thread->owner_process != current_process) { if (thread->GetOwnerProcess() != current_process) {
return ERR_INVALID_HANDLE; return ERR_INVALID_HANDLE;
} }
@ -433,7 +433,7 @@ static ResultCode GetThreadContext(VAddr thread_context, Handle handle) {
return ERR_ALREADY_REGISTERED; return ERR_ALREADY_REGISTERED;
} }
Core::ARM_Interface::ThreadContext ctx = thread->context; Core::ARM_Interface::ThreadContext ctx = thread->GetContext();
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits. // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
ctx.pstate &= 0xFF0FFE20; ctx.pstate &= 0xFF0FFE20;
@ -479,14 +479,14 @@ static ResultCode SetThreadPriority(Handle handle, u32 priority) {
thread->SetPriority(priority); thread->SetPriority(priority);
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule(); Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
/// Get which CPU core is executing the current thread /// Get which CPU core is executing the current thread
static u32 GetCurrentProcessorNumber() { static u32 GetCurrentProcessorNumber() {
LOG_TRACE(Kernel_SVC, "called"); LOG_TRACE(Kernel_SVC, "called");
return GetCurrentThread()->processor_id; return GetCurrentThread()->GetProcessorID();
} }
static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size, static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size,
@ -622,10 +622,14 @@ static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, V
CASCADE_RESULT(SharedPtr<Thread> thread, CASCADE_RESULT(SharedPtr<Thread> thread,
Thread::Create(kernel, name, entry_point, priority, arg, processor_id, stack_top, Thread::Create(kernel, name, entry_point, priority, arg, processor_id, stack_top,
Core::CurrentProcess())); Core::CurrentProcess()));
CASCADE_RESULT(thread->guest_handle, kernel.HandleTable().Create(thread)); const auto new_guest_handle = kernel.HandleTable().Create(thread);
*out_handle = thread->guest_handle; if (new_guest_handle.Failed()) {
return new_guest_handle.Code();
}
thread->SetGuestHandle(*new_guest_handle);
*out_handle = *new_guest_handle;
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule(); Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
LOG_TRACE(Kernel_SVC, LOG_TRACE(Kernel_SVC,
"called entrypoint=0x{:08X} ({}), arg=0x{:08X}, stacktop=0x{:08X}, " "called entrypoint=0x{:08X} ({}), arg=0x{:08X}, stacktop=0x{:08X}, "
@ -645,10 +649,10 @@ static ResultCode StartThread(Handle thread_handle) {
return ERR_INVALID_HANDLE; return ERR_INVALID_HANDLE;
} }
ASSERT(thread->status == ThreadStatus::Dormant); ASSERT(thread->GetStatus() == ThreadStatus::Dormant);
thread->ResumeFromWait(); thread->ResumeFromWait();
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule(); Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
@ -694,17 +698,17 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
CASCADE_CODE(Mutex::Release(mutex_addr)); CASCADE_CODE(Mutex::Release(mutex_addr));
SharedPtr<Thread> current_thread = GetCurrentThread(); SharedPtr<Thread> current_thread = GetCurrentThread();
current_thread->condvar_wait_address = condition_variable_addr; current_thread->SetCondVarWaitAddress(condition_variable_addr);
current_thread->mutex_wait_address = mutex_addr; current_thread->SetMutexWaitAddress(mutex_addr);
current_thread->wait_handle = thread_handle; current_thread->SetWaitHandle(thread_handle);
current_thread->status = ThreadStatus::WaitMutex; current_thread->SetStatus(ThreadStatus::WaitMutex);
current_thread->wakeup_callback = nullptr; current_thread->InvalidateWakeupCallback();
current_thread->WakeAfterDelay(nano_seconds); current_thread->WakeAfterDelay(nano_seconds);
// Note: Deliberately don't attempt to inherit the lock owner's priority. // Note: Deliberately don't attempt to inherit the lock owner's priority.
Core::System::GetInstance().CpuCore(current_thread->processor_id).PrepareReschedule(); Core::System::GetInstance().CpuCore(current_thread->GetProcessorID()).PrepareReschedule();
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
@ -713,14 +717,14 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
condition_variable_addr, target); condition_variable_addr, target);
auto RetrieveWaitingThreads = [](std::size_t core_index, const auto RetrieveWaitingThreads = [](std::size_t core_index,
std::vector<SharedPtr<Thread>>& waiting_threads, std::vector<SharedPtr<Thread>>& waiting_threads,
VAddr condvar_addr) { VAddr condvar_addr) {
const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
auto& thread_list = scheduler->GetThreadList(); const auto& thread_list = scheduler->GetThreadList();
for (auto& thread : thread_list) { for (const auto& thread : thread_list) {
if (thread->condvar_wait_address == condvar_addr) if (thread->GetCondVarWaitAddress() == condvar_addr)
waiting_threads.push_back(thread); waiting_threads.push_back(thread);
} }
}; };
@ -734,7 +738,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
// Sort them by priority, such that the highest priority ones come first. // Sort them by priority, such that the highest priority ones come first.
std::sort(waiting_threads.begin(), waiting_threads.end(), std::sort(waiting_threads.begin(), waiting_threads.end(),
[](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
return lhs->current_priority < rhs->current_priority; return lhs->GetPriority() < rhs->GetPriority();
}); });
// Only process up to 'target' threads, unless 'target' is -1, in which case process // Only process up to 'target' threads, unless 'target' is -1, in which case process
@ -750,7 +754,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
for (std::size_t index = 0; index < last; ++index) { for (std::size_t index = 0; index < last; ++index) {
auto& thread = waiting_threads[index]; auto& thread = waiting_threads[index];
ASSERT(thread->condvar_wait_address == condition_variable_addr); ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
@ -759,42 +763,43 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
// Atomically read the value of the mutex. // Atomically read the value of the mutex.
u32 mutex_val = 0; u32 mutex_val = 0;
do { do {
monitor.SetExclusive(current_core, thread->mutex_wait_address); monitor.SetExclusive(current_core, thread->GetMutexWaitAddress());
// If the mutex is not yet acquired, acquire it. // If the mutex is not yet acquired, acquire it.
mutex_val = Memory::Read32(thread->mutex_wait_address); mutex_val = Memory::Read32(thread->GetMutexWaitAddress());
if (mutex_val != 0) { if (mutex_val != 0) {
monitor.ClearExclusive(); monitor.ClearExclusive();
break; break;
} }
} while (!monitor.ExclusiveWrite32(current_core, thread->mutex_wait_address, } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
thread->wait_handle)); thread->GetWaitHandle()));
if (mutex_val == 0) { if (mutex_val == 0) {
// We were able to acquire the mutex, resume this thread. // We were able to acquire the mutex, resume this thread.
ASSERT(thread->status == ThreadStatus::WaitMutex); ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
thread->ResumeFromWait(); thread->ResumeFromWait();
auto lock_owner = thread->lock_owner; auto* const lock_owner = thread->GetLockOwner();
if (lock_owner) if (lock_owner != nullptr) {
lock_owner->RemoveMutexWaiter(thread); lock_owner->RemoveMutexWaiter(thread);
}
thread->lock_owner = nullptr; thread->SetLockOwner(nullptr);
thread->mutex_wait_address = 0; thread->SetMutexWaitAddress(0);
thread->condvar_wait_address = 0; thread->SetCondVarWaitAddress(0);
thread->wait_handle = 0; thread->SetWaitHandle(0);
} else { } else {
// Atomically signal that the mutex now has a waiting thread. // Atomically signal that the mutex now has a waiting thread.
do { do {
monitor.SetExclusive(current_core, thread->mutex_wait_address); monitor.SetExclusive(current_core, thread->GetMutexWaitAddress());
// Ensure that the mutex value is still what we expect. // Ensure that the mutex value is still what we expect.
u32 value = Memory::Read32(thread->mutex_wait_address); u32 value = Memory::Read32(thread->GetMutexWaitAddress());
// TODO(Subv): When this happens, the kernel just clears the exclusive state and // TODO(Subv): When this happens, the kernel just clears the exclusive state and
// retries the initial read for this thread. // retries the initial read for this thread.
ASSERT_MSG(mutex_val == value, "Unhandled synchronization primitive case"); ASSERT_MSG(mutex_val == value, "Unhandled synchronization primitive case");
} while (!monitor.ExclusiveWrite32(current_core, thread->mutex_wait_address, } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
mutex_val | Mutex::MutexHasWaitersFlag)); mutex_val | Mutex::MutexHasWaitersFlag));
// The mutex is already owned by some other thread, make this thread wait on it. // The mutex is already owned by some other thread, make this thread wait on it.
@ -802,12 +807,12 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask); Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
auto owner = kernel.HandleTable().Get<Thread>(owner_handle); auto owner = kernel.HandleTable().Get<Thread>(owner_handle);
ASSERT(owner); ASSERT(owner);
ASSERT(thread->status == ThreadStatus::WaitMutex); ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
thread->wakeup_callback = nullptr; thread->InvalidateWakeupCallback();
owner->AddMutexWaiter(thread); owner->AddMutexWaiter(thread);
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule(); Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
} }
} }
@ -913,8 +918,8 @@ static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask)
return ERR_INVALID_HANDLE; return ERR_INVALID_HANDLE;
} }
*core = thread->ideal_core; *core = thread->GetIdealCore();
*mask = thread->affinity_mask; *mask = thread->GetAffinityMask();
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
@ -930,11 +935,13 @@ static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) {
} }
if (core == static_cast<u32>(THREADPROCESSORID_DEFAULT)) { if (core == static_cast<u32>(THREADPROCESSORID_DEFAULT)) {
ASSERT(thread->owner_process->GetDefaultProcessorID() != const u8 default_processor_id = thread->GetOwnerProcess()->GetDefaultProcessorID();
static_cast<u8>(THREADPROCESSORID_DEFAULT));
ASSERT(default_processor_id != static_cast<u8>(THREADPROCESSORID_DEFAULT));
// Set the target CPU to the one specified in the process' exheader. // Set the target CPU to the one specified in the process' exheader.
core = thread->owner_process->GetDefaultProcessorID(); core = default_processor_id;
mask = 1ull << core; mask = 1ULL << core;
} }
if (mask == 0) { if (mask == 0) {
@ -945,7 +952,7 @@ static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) {
static constexpr u32 OnlyChangeMask = static_cast<u32>(-3); static constexpr u32 OnlyChangeMask = static_cast<u32>(-3);
if (core == OnlyChangeMask) { if (core == OnlyChangeMask) {
core = thread->ideal_core; core = thread->GetIdealCore();
} else if (core >= Core::NUM_CPU_CORES && core != static_cast<u32>(-1)) { } else if (core >= Core::NUM_CPU_CORES && core != static_cast<u32>(-1)) {
return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidProcessorId); return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidProcessorId);
} }

View file

@ -70,7 +70,7 @@ void Thread::Stop() {
void WaitCurrentThread_Sleep() { void WaitCurrentThread_Sleep() {
Thread* thread = GetCurrentThread(); Thread* thread = GetCurrentThread();
thread->status = ThreadStatus::WaitSleep; thread->SetStatus(ThreadStatus::WaitSleep);
} }
void ExitCurrentThread() { void ExitCurrentThread() {
@ -269,9 +269,9 @@ SharedPtr<Thread> SetupMainThread(KernelCore& kernel, VAddr entry_point, u32 pri
SharedPtr<Thread> thread = std::move(thread_res).Unwrap(); SharedPtr<Thread> thread = std::move(thread_res).Unwrap();
// Register 1 must be a handle to the main thread // Register 1 must be a handle to the main thread
thread->guest_handle = kernel.HandleTable().Create(thread).Unwrap(); const Handle guest_handle = kernel.HandleTable().Create(thread).Unwrap();
thread->SetGuestHandle(guest_handle);
thread->context.cpu_registers[1] = thread->guest_handle; thread->GetContext().cpu_registers[1] = guest_handle;
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
thread->ResumeFromWait(); thread->ResumeFromWait();
@ -299,6 +299,18 @@ VAddr Thread::GetCommandBufferAddress() const {
return GetTLSAddress() + CommandHeaderOffset; return GetTLSAddress() + CommandHeaderOffset;
} }
void Thread::SetStatus(ThreadStatus new_status) {
if (new_status == status) {
return;
}
if (status == ThreadStatus::Running) {
last_running_ticks = CoreTiming::GetTicks();
}
status = new_status;
}
void Thread::AddMutexWaiter(SharedPtr<Thread> thread) { void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
if (thread->lock_owner == this) { if (thread->lock_owner == this) {
// If the thread is already waiting for this thread to release the mutex, ensure that the // If the thread is already waiting for this thread to release the mutex, ensure that the
@ -393,6 +405,18 @@ void Thread::ChangeCore(u32 core, u64 mask) {
Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule(); Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule();
} }
bool Thread::AllWaitObjectsReady() {
return std::none_of(
wait_objects.begin(), wait_objects.end(),
[this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); });
}
bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
SharedPtr<WaitObject> object, std::size_t index) {
ASSERT(wakeup_callback);
return wakeup_callback(reason, std::move(thread), std::move(object), index);
}
//////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////
/** /**

View file

@ -65,6 +65,15 @@ public:
using TLSMemory = std::vector<u8>; using TLSMemory = std::vector<u8>;
using TLSMemoryPtr = std::shared_ptr<TLSMemory>; using TLSMemoryPtr = std::shared_ptr<TLSMemory>;
using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
using ThreadContext = Core::ARM_Interface::ThreadContext;
using ThreadWaitObjects = std::vector<SharedPtr<WaitObject>>;
using WakeupCallback = std::function<bool(ThreadWakeupReason reason, SharedPtr<Thread> thread,
SharedPtr<WaitObject> object, std::size_t index)>;
/** /**
* Creates and returns a new thread. The new thread is immediately scheduled * Creates and returns a new thread. The new thread is immediately scheduled
* @param kernel The kernel instance this thread will be created under. * @param kernel The kernel instance this thread will be created under.
@ -105,6 +114,14 @@ public:
return current_priority; return current_priority;
} }
/**
* Gets the thread's nominal priority.
* @return The current thread's nominal priority.
*/
u32 GetNominalPriority() const {
return nominal_priority;
}
/** /**
* Sets the thread's current priority * Sets the thread's current priority
* @param priority The new priority * @param priority The new priority
@ -133,7 +150,7 @@ public:
* Gets the thread's thread ID * Gets the thread's thread ID
* @return The thread's ID * @return The thread's ID
*/ */
u32 GetThreadId() const { u32 GetThreadID() const {
return thread_id; return thread_id;
} }
@ -203,6 +220,11 @@ public:
return tpidr_el0; return tpidr_el0;
} }
/// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
void SetTPIDR_EL0(u64 value) {
tpidr_el0 = value;
}
/* /*
* Returns the address of the current thread's command buffer, located in the TLS. * Returns the address of the current thread's command buffer, located in the TLS.
* @returns VAddr of the thread's command buffer. * @returns VAddr of the thread's command buffer.
@ -218,69 +240,193 @@ public:
return status == ThreadStatus::WaitSynchAll; return status == ThreadStatus::WaitSynchAll;
} }
Core::ARM_Interface::ThreadContext context; ThreadContext& GetContext() {
return context;
}
u32 thread_id; const ThreadContext& GetContext() const {
return context;
}
ThreadStatus status; ThreadStatus GetStatus() const {
VAddr entry_point; return status;
VAddr stack_top; }
u32 nominal_priority; ///< Nominal thread priority, as set by the emulated application void SetStatus(ThreadStatus new_status);
u32 current_priority; ///< Current thread priority, can be temporarily changed
u64 last_running_ticks; ///< CPU tick when thread was last running u64 GetLastRunningTicks() const {
return last_running_ticks;
}
s32 processor_id; s32 GetProcessorID() const {
return processor_id;
}
VAddr tls_address; ///< Virtual address of the Thread Local Storage of the thread SharedPtr<Process>& GetOwnerProcess() {
u64 tpidr_el0; ///< TPIDR_EL0 read/write system register. return owner_process;
}
SharedPtr<Process> owner_process; ///< Process that owns this thread const SharedPtr<Process>& GetOwnerProcess() const {
return owner_process;
}
const ThreadWaitObjects& GetWaitObjects() const {
return wait_objects;
}
void SetWaitObjects(ThreadWaitObjects objects) {
wait_objects = std::move(objects);
}
void ClearWaitObjects() {
wait_objects.clear();
}
/// Determines whether all the objects this thread is waiting on are ready.
bool AllWaitObjectsReady();
const MutexWaitingThreads& GetMutexWaitingThreads() const {
return wait_mutex_threads;
}
Thread* GetLockOwner() const {
return lock_owner.get();
}
void SetLockOwner(SharedPtr<Thread> owner) {
lock_owner = std::move(owner);
}
VAddr GetCondVarWaitAddress() const {
return condvar_wait_address;
}
void SetCondVarWaitAddress(VAddr address) {
condvar_wait_address = address;
}
VAddr GetMutexWaitAddress() const {
return mutex_wait_address;
}
void SetMutexWaitAddress(VAddr address) {
mutex_wait_address = address;
}
Handle GetWaitHandle() const {
return wait_handle;
}
void SetWaitHandle(Handle handle) {
wait_handle = handle;
}
VAddr GetArbiterWaitAddress() const {
return arb_wait_address;
}
void SetArbiterWaitAddress(VAddr address) {
arb_wait_address = address;
}
void SetGuestHandle(Handle handle) {
guest_handle = handle;
}
bool HasWakeupCallback() const {
return wakeup_callback != nullptr;
}
void SetWakeupCallback(WakeupCallback callback) {
wakeup_callback = std::move(callback);
}
void InvalidateWakeupCallback() {
SetWakeupCallback(nullptr);
}
/**
* Invokes the thread's wakeup callback.
*
* @pre A valid wakeup callback has been set. Violating this precondition
* will cause an assertion to trigger.
*/
bool InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
SharedPtr<WaitObject> object, std::size_t index);
u32 GetIdealCore() const {
return ideal_core;
}
u64 GetAffinityMask() const {
return affinity_mask;
}
private:
explicit Thread(KernelCore& kernel);
~Thread() override;
Core::ARM_Interface::ThreadContext context{};
u32 thread_id = 0;
ThreadStatus status = ThreadStatus::Dormant;
VAddr entry_point = 0;
VAddr stack_top = 0;
u32 nominal_priority = 0; ///< Nominal thread priority, as set by the emulated application
u32 current_priority = 0; ///< Current thread priority, can be temporarily changed
u64 last_running_ticks = 0; ///< CPU tick when thread was last running
s32 processor_id = 0;
VAddr tls_address = 0; ///< Virtual address of the Thread Local Storage of the thread
u64 tpidr_el0 = 0; ///< TPIDR_EL0 read/write system register.
/// Process that owns this thread
SharedPtr<Process> owner_process;
/// Objects that the thread is waiting on, in the same order as they were /// Objects that the thread is waiting on, in the same order as they were
// passed to WaitSynchronization1/N. /// passed to WaitSynchronization1/N.
std::vector<SharedPtr<WaitObject>> wait_objects; ThreadWaitObjects wait_objects;
/// List of threads that are waiting for a mutex that is held by this thread. /// List of threads that are waiting for a mutex that is held by this thread.
std::vector<SharedPtr<Thread>> wait_mutex_threads; MutexWaitingThreads wait_mutex_threads;
/// Thread that owns the lock that this thread is waiting for. /// Thread that owns the lock that this thread is waiting for.
SharedPtr<Thread> lock_owner; SharedPtr<Thread> lock_owner;
// If waiting on a ConditionVariable, this is the ConditionVariable address /// If waiting on a ConditionVariable, this is the ConditionVariable address
VAddr condvar_wait_address; VAddr condvar_wait_address = 0;
VAddr mutex_wait_address; ///< If waiting on a Mutex, this is the mutex address /// If waiting on a Mutex, this is the mutex address
Handle wait_handle; ///< The handle used to wait for the mutex. VAddr mutex_wait_address = 0;
/// The handle used to wait for the mutex.
Handle wait_handle = 0;
// If waiting for an AddressArbiter, this is the address being waited on. /// If waiting for an AddressArbiter, this is the address being waited on.
VAddr arb_wait_address{0}; VAddr arb_wait_address{0};
std::string name;
/// Handle used by guest emulated application to access this thread /// Handle used by guest emulated application to access this thread
Handle guest_handle; Handle guest_handle = 0;
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue. /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
Handle callback_handle; Handle callback_handle = 0;
using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr<Thread> thread, /// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
SharedPtr<WaitObject> object, std::size_t index); /// was waiting via WaitSynchronizationN then the object will be the last object that became
// Callback that will be invoked when the thread is resumed from a waiting state. If the thread /// available. In case of a timeout, the object will be nullptr.
// was waiting via WaitSynchronizationN then the object will be the last object that became WakeupCallback wakeup_callback;
// available. In case of a timeout, the object will be nullptr.
std::function<WakeupCallback> wakeup_callback;
std::shared_ptr<Scheduler> scheduler; std::shared_ptr<Scheduler> scheduler;
u32 ideal_core{0xFFFFFFFF}; u32 ideal_core{0xFFFFFFFF};
u64 affinity_mask{0x1}; u64 affinity_mask{0x1};
private:
explicit Thread(KernelCore& kernel);
~Thread() override;
TLSMemoryPtr tls_memory = std::make_shared<TLSMemory>(); TLSMemoryPtr tls_memory = std::make_shared<TLSMemory>();
std::string name;
}; };
/** /**

View file

@ -35,13 +35,15 @@ SharedPtr<Thread> WaitObject::GetHighestPriorityReadyThread() {
u32 candidate_priority = THREADPRIO_LOWEST + 1; u32 candidate_priority = THREADPRIO_LOWEST + 1;
for (const auto& thread : waiting_threads) { for (const auto& thread : waiting_threads) {
const ThreadStatus thread_status = thread->GetStatus();
// The list of waiting threads must not contain threads that are not waiting to be awakened. // The list of waiting threads must not contain threads that are not waiting to be awakened.
ASSERT_MSG(thread->status == ThreadStatus::WaitSynchAny || ASSERT_MSG(thread_status == ThreadStatus::WaitSynchAny ||
thread->status == ThreadStatus::WaitSynchAll || thread_status == ThreadStatus::WaitSynchAll ||
thread->status == ThreadStatus::WaitHLEEvent, thread_status == ThreadStatus::WaitHLEEvent,
"Inconsistent thread statuses in waiting_threads"); "Inconsistent thread statuses in waiting_threads");
if (thread->current_priority >= candidate_priority) if (thread->GetPriority() >= candidate_priority)
continue; continue;
if (ShouldWait(thread.get())) if (ShouldWait(thread.get()))
@ -50,16 +52,13 @@ SharedPtr<Thread> WaitObject::GetHighestPriorityReadyThread() {
// A thread is ready to run if it's either in ThreadStatus::WaitSynchAny or // A thread is ready to run if it's either in ThreadStatus::WaitSynchAny or
// in ThreadStatus::WaitSynchAll and the rest of the objects it is waiting on are ready. // in ThreadStatus::WaitSynchAll and the rest of the objects it is waiting on are ready.
bool ready_to_run = true; bool ready_to_run = true;
if (thread->status == ThreadStatus::WaitSynchAll) { if (thread_status == ThreadStatus::WaitSynchAll) {
ready_to_run = std::none_of(thread->wait_objects.begin(), thread->wait_objects.end(), ready_to_run = thread->AllWaitObjectsReady();
[&thread](const SharedPtr<WaitObject>& object) {
return object->ShouldWait(thread.get());
});
} }
if (ready_to_run) { if (ready_to_run) {
candidate = thread.get(); candidate = thread.get();
candidate_priority = thread->current_priority; candidate_priority = thread->GetPriority();
} }
} }
@ -75,24 +74,24 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) {
if (!thread->IsSleepingOnWaitAll()) { if (!thread->IsSleepingOnWaitAll()) {
Acquire(thread.get()); Acquire(thread.get());
} else { } else {
for (auto& object : thread->wait_objects) { for (const auto& object : thread->GetWaitObjects()) {
ASSERT(!object->ShouldWait(thread.get())); ASSERT(!object->ShouldWait(thread.get()));
object->Acquire(thread.get()); object->Acquire(thread.get());
} }
} }
std::size_t index = thread->GetWaitObjectIndex(this); const std::size_t index = thread->GetWaitObjectIndex(this);
for (auto& object : thread->wait_objects) for (const auto& object : thread->GetWaitObjects())
object->RemoveWaitingThread(thread.get()); object->RemoveWaitingThread(thread.get());
thread->wait_objects.clear(); thread->ClearWaitObjects();
thread->CancelWakeupTimer(); thread->CancelWakeupTimer();
bool resume = true; bool resume = true;
if (thread->wakeup_callback) if (thread->HasWakeupCallback())
resume = thread->wakeup_callback(ThreadWakeupReason::Signal, thread, this, index); resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Signal, thread, this, index);
if (resume) if (resume)
thread->ResumeFromWait(); thread->ResumeFromWait();

View file

@ -119,7 +119,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() cons
std::vector<std::unique_ptr<WaitTreeItem>> list; std::vector<std::unique_ptr<WaitTreeItem>> list;
constexpr std::size_t BaseRegister = 29; constexpr std::size_t BaseRegister = 29;
u64 base_pointer = thread.context.cpu_registers[BaseRegister]; u64 base_pointer = thread.GetContext().cpu_registers[BaseRegister];
while (base_pointer != 0) { while (base_pointer != 0) {
u64 lr = Memory::Read64(base_pointer + sizeof(u64)); u64 lr = Memory::Read64(base_pointer + sizeof(u64));
@ -213,7 +213,7 @@ WaitTreeThread::~WaitTreeThread() = default;
QString WaitTreeThread::GetText() const { QString WaitTreeThread::GetText() const {
const auto& thread = static_cast<const Kernel::Thread&>(object); const auto& thread = static_cast<const Kernel::Thread&>(object);
QString status; QString status;
switch (thread.status) { switch (thread.GetStatus()) {
case Kernel::ThreadStatus::Running: case Kernel::ThreadStatus::Running:
status = tr("running"); status = tr("running");
break; break;
@ -246,15 +246,17 @@ QString WaitTreeThread::GetText() const {
status = tr("dead"); status = tr("dead");
break; break;
} }
QString pc_info = tr(" PC = 0x%1 LR = 0x%2")
.arg(thread.context.pc, 8, 16, QLatin1Char('0')) const auto& context = thread.GetContext();
.arg(thread.context.cpu_registers[30], 8, 16, QLatin1Char('0')); const QString pc_info = tr(" PC = 0x%1 LR = 0x%2")
.arg(context.pc, 8, 16, QLatin1Char('0'))
.arg(context.cpu_registers[30], 8, 16, QLatin1Char('0'));
return WaitTreeWaitObject::GetText() + pc_info + " (" + status + ") "; return WaitTreeWaitObject::GetText() + pc_info + " (" + status + ") ";
} }
QColor WaitTreeThread::GetColor() const { QColor WaitTreeThread::GetColor() const {
const auto& thread = static_cast<const Kernel::Thread&>(object); const auto& thread = static_cast<const Kernel::Thread&>(object);
switch (thread.status) { switch (thread.GetStatus()) {
case Kernel::ThreadStatus::Running: case Kernel::ThreadStatus::Running:
return QColor(Qt::GlobalColor::darkGreen); return QColor(Qt::GlobalColor::darkGreen);
case Kernel::ThreadStatus::Ready: case Kernel::ThreadStatus::Ready:
@ -284,7 +286,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
const auto& thread = static_cast<const Kernel::Thread&>(object); const auto& thread = static_cast<const Kernel::Thread&>(object);
QString processor; QString processor;
switch (thread.processor_id) { switch (thread.GetProcessorID()) {
case Kernel::ThreadProcessorId::THREADPROCESSORID_DEFAULT: case Kernel::ThreadProcessorId::THREADPROCESSORID_DEFAULT:
processor = tr("default"); processor = tr("default");
break; break;
@ -292,32 +294,35 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
case Kernel::ThreadProcessorId::THREADPROCESSORID_1: case Kernel::ThreadProcessorId::THREADPROCESSORID_1:
case Kernel::ThreadProcessorId::THREADPROCESSORID_2: case Kernel::ThreadProcessorId::THREADPROCESSORID_2:
case Kernel::ThreadProcessorId::THREADPROCESSORID_3: case Kernel::ThreadProcessorId::THREADPROCESSORID_3:
processor = tr("core %1").arg(thread.processor_id); processor = tr("core %1").arg(thread.GetProcessorID());
break; break;
default: default:
processor = tr("Unknown processor %1").arg(thread.processor_id); processor = tr("Unknown processor %1").arg(thread.GetProcessorID());
break; break;
} }
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor))); list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
list.push_back(std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.ideal_core)));
list.push_back( list.push_back(
std::make_unique<WaitTreeText>(tr("affinity mask = %1").arg(thread.affinity_mask))); std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.GetIdealCore())));
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId()))); list.push_back(
std::make_unique<WaitTreeText>(tr("affinity mask = %1").arg(thread.GetAffinityMask())));
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID())));
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)") list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
.arg(thread.current_priority) .arg(thread.GetPriority())
.arg(thread.nominal_priority))); .arg(thread.GetNominalPriority())));
list.push_back(std::make_unique<WaitTreeText>( list.push_back(std::make_unique<WaitTreeText>(
tr("last running ticks = %1").arg(thread.last_running_ticks))); tr("last running ticks = %1").arg(thread.GetLastRunningTicks())));
if (thread.mutex_wait_address != 0) const VAddr mutex_wait_address = thread.GetMutexWaitAddress();
list.push_back(std::make_unique<WaitTreeMutexInfo>(thread.mutex_wait_address)); if (mutex_wait_address != 0) {
else list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address));
} else {
list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex"))); list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex")));
}
if (thread.status == Kernel::ThreadStatus::WaitSynchAny || if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAny ||
thread.status == Kernel::ThreadStatus::WaitSynchAll) { thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAll) {
list.push_back(std::make_unique<WaitTreeObjectList>(thread.wait_objects, list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetWaitObjects(),
thread.IsSleepingOnWaitAll())); thread.IsSleepingOnWaitAll()));
} }