forked from suyu/suyu
kernel: convert KProcess to new style
This commit is contained in:
parent
6bfb4c8f71
commit
9863db9db4
10 changed files with 254 additions and 240 deletions
|
@ -434,7 +434,7 @@ struct System::Impl {
|
|||
}
|
||||
|
||||
Service::Glue::ApplicationLaunchProperty launch{};
|
||||
launch.title_id = process.GetProgramID();
|
||||
launch.title_id = process.GetProgramId();
|
||||
|
||||
FileSys::PatchManager pm{launch.title_id, fs_controller, *content_provider};
|
||||
launch.version = pm.GetGameVersion().value_or(0);
|
||||
|
@ -762,7 +762,7 @@ const Core::SpeedLimiter& System::SpeedLimiter() const {
|
|||
}
|
||||
|
||||
u64 System::GetApplicationProcessProgramID() const {
|
||||
return impl->kernel.ApplicationProcess()->GetProgramID();
|
||||
return impl->kernel.ApplicationProcess()->GetProgramId();
|
||||
}
|
||||
|
||||
Loader::ResultStatus System::GetGameName(std::string& out) const {
|
||||
|
|
|
@ -756,7 +756,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
|||
|
||||
reply = fmt::format("Process: {:#x} ({})\n"
|
||||
"Program Id: {:#018x}\n",
|
||||
process->GetProcessID(), process->GetName(), process->GetProgramID());
|
||||
process->GetProcessId(), process->GetName(), process->GetProgramId());
|
||||
reply +=
|
||||
fmt::format("Layout:\n"
|
||||
" Alias: {:#012x} - {:#012x}\n"
|
||||
|
|
|
@ -71,32 +71,32 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
|
|||
auto& kernel = system.Kernel();
|
||||
|
||||
process->name = std::move(process_name);
|
||||
process->resource_limit = res_limit;
|
||||
process->system_resource_address = 0;
|
||||
process->state = State::Created;
|
||||
process->program_id = 0;
|
||||
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
|
||||
: kernel.CreateNewUserProcessID();
|
||||
process->capabilities.InitializeForMetadatalessProcess();
|
||||
process->is_initialized = true;
|
||||
process->m_resource_limit = res_limit;
|
||||
process->m_system_resource_address = 0;
|
||||
process->m_state = State::Created;
|
||||
process->m_program_id = 0;
|
||||
process->m_process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
|
||||
: kernel.CreateNewUserProcessID();
|
||||
process->m_capabilities.InitializeForMetadatalessProcess();
|
||||
process->m_is_initialized = true;
|
||||
|
||||
std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr)));
|
||||
std::uniform_int_distribution<u64> distribution;
|
||||
std::generate(process->random_entropy.begin(), process->random_entropy.end(),
|
||||
std::generate(process->m_random_entropy.begin(), process->m_random_entropy.end(),
|
||||
[&] { return distribution(rng); });
|
||||
|
||||
kernel.AppendNewProcess(process);
|
||||
|
||||
// Clear remaining fields.
|
||||
process->num_running_threads = 0;
|
||||
process->is_signaled = false;
|
||||
process->exception_thread = nullptr;
|
||||
process->is_suspended = false;
|
||||
process->schedule_count = 0;
|
||||
process->is_handle_table_initialized = false;
|
||||
process->m_num_running_threads = 0;
|
||||
process->m_is_signaled = false;
|
||||
process->m_exception_thread = nullptr;
|
||||
process->m_is_suspended = false;
|
||||
process->m_schedule_count = 0;
|
||||
process->m_is_handle_table_initialized = false;
|
||||
|
||||
// Open a reference to the resource limit.
|
||||
process->resource_limit->Open();
|
||||
process->m_resource_limit->Open();
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
@ -106,34 +106,34 @@ void KProcess::DoWorkerTaskImpl() {
|
|||
}
|
||||
|
||||
KResourceLimit* KProcess::GetResourceLimit() const {
|
||||
return resource_limit;
|
||||
return m_resource_limit;
|
||||
}
|
||||
|
||||
void KProcess::IncrementRunningThreadCount() {
|
||||
ASSERT(num_running_threads.load() >= 0);
|
||||
++num_running_threads;
|
||||
ASSERT(m_num_running_threads.load() >= 0);
|
||||
++m_num_running_threads;
|
||||
}
|
||||
|
||||
void KProcess::DecrementRunningThreadCount() {
|
||||
ASSERT(num_running_threads.load() > 0);
|
||||
ASSERT(m_num_running_threads.load() > 0);
|
||||
|
||||
if (const auto prev = num_running_threads--; prev == 1) {
|
||||
if (const auto prev = m_num_running_threads--; prev == 1) {
|
||||
// TODO(bunnei): Process termination to be implemented when multiprocess is supported.
|
||||
}
|
||||
}
|
||||
|
||||
u64 KProcess::GetTotalPhysicalMemoryAvailable() {
|
||||
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +
|
||||
page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
|
||||
main_thread_stack_size};
|
||||
const u64 capacity{m_resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +
|
||||
m_page_table.GetNormalMemorySize() + GetSystemResourceSize() + m_image_size +
|
||||
m_main_thread_stack_size};
|
||||
if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
|
||||
capacity != pool_size) {
|
||||
LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size);
|
||||
}
|
||||
if (capacity < memory_usage_capacity) {
|
||||
if (capacity < m_memory_usage_capacity) {
|
||||
return capacity;
|
||||
}
|
||||
return memory_usage_capacity;
|
||||
return m_memory_usage_capacity;
|
||||
}
|
||||
|
||||
u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
|
||||
|
@ -141,7 +141,7 @@ u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
|
|||
}
|
||||
|
||||
u64 KProcess::GetTotalPhysicalMemoryUsed() {
|
||||
return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() +
|
||||
return m_image_size + m_main_thread_stack_size + m_page_table.GetNormalMemorySize() +
|
||||
GetSystemResourceSize();
|
||||
}
|
||||
|
||||
|
@ -152,14 +152,14 @@ u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
|
|||
bool KProcess::ReleaseUserException(KThread* thread) {
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
if (exception_thread == thread) {
|
||||
exception_thread = nullptr;
|
||||
if (m_exception_thread == thread) {
|
||||
m_exception_thread = nullptr;
|
||||
|
||||
// Remove waiter thread.
|
||||
bool has_waiters{};
|
||||
if (KThread* next = thread->RemoveKernelWaiterByKey(
|
||||
std::addressof(has_waiters),
|
||||
reinterpret_cast<uintptr_t>(std::addressof(exception_thread)));
|
||||
reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)));
|
||||
next != nullptr) {
|
||||
next->EndWait(ResultSuccess);
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ bool KProcess::ReleaseUserException(KThread* thread) {
|
|||
}
|
||||
|
||||
void KProcess::PinCurrentThread(s32 core_id) {
|
||||
ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
||||
|
||||
// Get the current thread.
|
||||
KThread* cur_thread =
|
||||
|
@ -191,7 +191,7 @@ void KProcess::PinCurrentThread(s32 core_id) {
|
|||
}
|
||||
|
||||
void KProcess::UnpinCurrentThread(s32 core_id) {
|
||||
ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
||||
|
||||
// Get the current thread.
|
||||
KThread* cur_thread =
|
||||
|
@ -206,7 +206,7 @@ void KProcess::UnpinCurrentThread(s32 core_id) {
|
|||
}
|
||||
|
||||
void KProcess::UnpinThread(KThread* thread) {
|
||||
ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
||||
|
||||
// Get the thread's core id.
|
||||
const auto core_id = thread->GetActiveCore();
|
||||
|
@ -222,14 +222,14 @@ void KProcess::UnpinThread(KThread* thread) {
|
|||
Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||
[[maybe_unused]] size_t size) {
|
||||
// Lock ourselves, to prevent concurrent access.
|
||||
KScopedLightLock lk(state_lock);
|
||||
KScopedLightLock lk(m_state_lock);
|
||||
|
||||
// Try to find an existing info for the memory.
|
||||
KSharedMemoryInfo* shemen_info = nullptr;
|
||||
const auto iter = std::find_if(
|
||||
shared_memory_list.begin(), shared_memory_list.end(),
|
||||
m_shared_memory_list.begin(), m_shared_memory_list.end(),
|
||||
[shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; });
|
||||
if (iter != shared_memory_list.end()) {
|
||||
if (iter != m_shared_memory_list.end()) {
|
||||
shemen_info = *iter;
|
||||
}
|
||||
|
||||
|
@ -238,7 +238,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
|
|||
R_UNLESS(shemen_info != nullptr, ResultOutOfMemory);
|
||||
|
||||
shemen_info->Initialize(shmem);
|
||||
shared_memory_list.push_back(shemen_info);
|
||||
m_shared_memory_list.push_back(shemen_info);
|
||||
}
|
||||
|
||||
// Open a reference to the shared memory and its info.
|
||||
|
@ -251,20 +251,20 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
|
|||
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||
[[maybe_unused]] size_t size) {
|
||||
// Lock ourselves, to prevent concurrent access.
|
||||
KScopedLightLock lk(state_lock);
|
||||
KScopedLightLock lk(m_state_lock);
|
||||
|
||||
KSharedMemoryInfo* shemen_info = nullptr;
|
||||
const auto iter = std::find_if(
|
||||
shared_memory_list.begin(), shared_memory_list.end(),
|
||||
m_shared_memory_list.begin(), m_shared_memory_list.end(),
|
||||
[shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; });
|
||||
if (iter != shared_memory_list.end()) {
|
||||
if (iter != m_shared_memory_list.end()) {
|
||||
shemen_info = *iter;
|
||||
}
|
||||
|
||||
ASSERT(shemen_info != nullptr);
|
||||
|
||||
if (shemen_info->Close()) {
|
||||
shared_memory_list.erase(iter);
|
||||
m_shared_memory_list.erase(iter);
|
||||
KSharedMemoryInfo::Free(m_kernel, shemen_info);
|
||||
}
|
||||
|
||||
|
@ -273,22 +273,22 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a
|
|||
}
|
||||
|
||||
void KProcess::RegisterThread(KThread* thread) {
|
||||
KScopedLightLock lk{list_lock};
|
||||
KScopedLightLock lk{m_list_lock};
|
||||
|
||||
thread_list.push_back(thread);
|
||||
m_thread_list.push_back(thread);
|
||||
}
|
||||
|
||||
void KProcess::UnregisterThread(KThread* thread) {
|
||||
KScopedLightLock lk{list_lock};
|
||||
KScopedLightLock lk{m_list_lock};
|
||||
|
||||
thread_list.remove(thread);
|
||||
m_thread_list.remove(thread);
|
||||
}
|
||||
|
||||
u64 KProcess::GetFreeThreadCount() const {
|
||||
if (resource_limit != nullptr) {
|
||||
if (m_resource_limit != nullptr) {
|
||||
const auto current_value =
|
||||
resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax);
|
||||
const auto limit_value = resource_limit->GetLimitValue(LimitableResource::ThreadCountMax);
|
||||
m_resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax);
|
||||
const auto limit_value = m_resource_limit->GetLimitValue(LimitableResource::ThreadCountMax);
|
||||
return limit_value - current_value;
|
||||
} else {
|
||||
return 0;
|
||||
|
@ -297,35 +297,35 @@ u64 KProcess::GetFreeThreadCount() const {
|
|||
|
||||
Result KProcess::Reset() {
|
||||
// Lock the process and the scheduler.
|
||||
KScopedLightLock lk(state_lock);
|
||||
KScopedLightLock lk(m_state_lock);
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
// Validate that we're in a state that we can reset.
|
||||
R_UNLESS(state != State::Terminated, ResultInvalidState);
|
||||
R_UNLESS(is_signaled, ResultInvalidState);
|
||||
R_UNLESS(m_state != State::Terminated, ResultInvalidState);
|
||||
R_UNLESS(m_is_signaled, ResultInvalidState);
|
||||
|
||||
// Clear signaled.
|
||||
is_signaled = false;
|
||||
m_is_signaled = false;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
// Lock ourselves and the scheduler.
|
||||
KScopedLightLock lk{state_lock};
|
||||
KScopedLightLock list_lk{list_lock};
|
||||
KScopedLightLock lk{m_state_lock};
|
||||
KScopedLightLock list_lk{m_list_lock};
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
// Validate our state.
|
||||
R_UNLESS(state != State::Terminating, ResultInvalidState);
|
||||
R_UNLESS(state != State::Terminated, ResultInvalidState);
|
||||
R_UNLESS(m_state != State::Terminating, ResultInvalidState);
|
||||
R_UNLESS(m_state != State::Terminated, ResultInvalidState);
|
||||
|
||||
// Either pause or resume.
|
||||
if (activity == ProcessActivity::Paused) {
|
||||
// Verify that we're not suspended.
|
||||
R_UNLESS(!is_suspended, ResultInvalidState);
|
||||
R_UNLESS(!m_is_suspended, ResultInvalidState);
|
||||
|
||||
// Suspend all threads.
|
||||
for (auto* thread : GetThreadList()) {
|
||||
for (auto* thread : this->GetThreadList()) {
|
||||
thread->RequestSuspend(SuspendType::Process);
|
||||
}
|
||||
|
||||
|
@ -335,10 +335,10 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
|||
ASSERT(activity == ProcessActivity::Runnable);
|
||||
|
||||
// Verify that we're suspended.
|
||||
R_UNLESS(is_suspended, ResultInvalidState);
|
||||
R_UNLESS(m_is_suspended, ResultInvalidState);
|
||||
|
||||
// Resume all threads.
|
||||
for (auto* thread : GetThreadList()) {
|
||||
for (auto* thread : this->GetThreadList()) {
|
||||
thread->Resume(SuspendType::Process);
|
||||
}
|
||||
|
||||
|
@ -350,31 +350,32 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
|||
}
|
||||
|
||||
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
|
||||
program_id = metadata.GetTitleID();
|
||||
ideal_core = metadata.GetMainThreadCore();
|
||||
is_64bit_process = metadata.Is64BitProgram();
|
||||
system_resource_size = metadata.GetSystemResourceSize();
|
||||
image_size = code_size;
|
||||
m_program_id = metadata.GetTitleID();
|
||||
m_ideal_core = metadata.GetMainThreadCore();
|
||||
m_is_64bit_process = metadata.Is64BitProgram();
|
||||
m_system_resource_size = metadata.GetSystemResourceSize();
|
||||
m_image_size = code_size;
|
||||
|
||||
KScopedResourceReservation memory_reservation(
|
||||
resource_limit, LimitableResource::PhysicalMemoryMax, code_size + system_resource_size);
|
||||
m_resource_limit, LimitableResource::PhysicalMemoryMax, code_size + m_system_resource_size);
|
||||
if (!memory_reservation.Succeeded()) {
|
||||
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
|
||||
code_size + system_resource_size);
|
||||
code_size + m_system_resource_size);
|
||||
R_RETURN(ResultLimitReached);
|
||||
}
|
||||
// Initialize process address space
|
||||
if (const Result result{page_table.InitializeForProcess(
|
||||
if (const Result result{m_page_table.InitializeForProcess(
|
||||
metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
|
||||
0x8000000, code_size, std::addressof(m_kernel.GetAppSystemResource()), resource_limit)};
|
||||
0x8000000, code_size, std::addressof(m_kernel.GetAppSystemResource()),
|
||||
m_resource_limit)};
|
||||
result.IsError()) {
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
// Map process code region
|
||||
if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
if (const Result result{m_page_table.MapProcessCode(m_page_table.GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
result.IsError()) {
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
@ -382,7 +383,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
|||
// Initialize process capabilities
|
||||
const auto& caps{metadata.GetKernelCapabilities()};
|
||||
if (const Result result{
|
||||
capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
|
||||
m_capabilities.InitializeForUserProcess(caps.data(), caps.size(), m_page_table)};
|
||||
result.IsError()) {
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
@ -392,12 +393,14 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
|||
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
||||
memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart();
|
||||
m_memory_usage_capacity =
|
||||
m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart();
|
||||
break;
|
||||
|
||||
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
||||
memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() +
|
||||
page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart();
|
||||
m_memory_usage_capacity =
|
||||
m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart() +
|
||||
m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart();
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -406,26 +409,27 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
|||
}
|
||||
|
||||
// Create TLS region
|
||||
R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
|
||||
R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
|
||||
memory_reservation.Commit();
|
||||
|
||||
R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
|
||||
R_RETURN(m_handle_table.Initialize(m_capabilities.GetHandleTableSize()));
|
||||
}
|
||||
|
||||
void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
|
||||
ASSERT(AllocateMainThreadStack(stack_size) == ResultSuccess);
|
||||
resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
|
||||
ASSERT(this->AllocateMainThreadStack(stack_size) == ResultSuccess);
|
||||
m_resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
|
||||
|
||||
const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
|
||||
ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
|
||||
const std::size_t heap_capacity{m_memory_usage_capacity -
|
||||
(m_main_thread_stack_size + m_image_size)};
|
||||
ASSERT(!m_page_table.SetMaxHeapSize(heap_capacity).IsError());
|
||||
|
||||
ChangeState(State::Running);
|
||||
this->ChangeState(State::Running);
|
||||
|
||||
SetupMainThread(m_kernel.System(), *this, main_thread_priority, main_thread_stack_top);
|
||||
SetupMainThread(m_kernel.System(), *this, main_thread_priority, m_main_thread_stack_top);
|
||||
}
|
||||
|
||||
void KProcess::PrepareForTermination() {
|
||||
ChangeState(State::Terminating);
|
||||
this->ChangeState(State::Terminating);
|
||||
|
||||
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
|
||||
for (auto* thread : in_thread_list) {
|
||||
|
@ -445,12 +449,12 @@ void KProcess::PrepareForTermination() {
|
|||
|
||||
stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList());
|
||||
|
||||
this->DeleteThreadLocalRegion(plr_address);
|
||||
plr_address = 0;
|
||||
this->DeleteThreadLocalRegion(m_plr_address);
|
||||
m_plr_address = 0;
|
||||
|
||||
if (resource_limit) {
|
||||
resource_limit->Release(LimitableResource::PhysicalMemoryMax,
|
||||
main_thread_stack_size + image_size);
|
||||
if (m_resource_limit) {
|
||||
m_resource_limit->Release(LimitableResource::PhysicalMemoryMax,
|
||||
m_main_thread_stack_size + m_image_size);
|
||||
}
|
||||
|
||||
ChangeState(State::Terminated);
|
||||
|
@ -459,8 +463,8 @@ void KProcess::PrepareForTermination() {
|
|||
void KProcess::Finalize() {
|
||||
// Free all shared memory infos.
|
||||
{
|
||||
auto it = shared_memory_list.begin();
|
||||
while (it != shared_memory_list.end()) {
|
||||
auto it = m_shared_memory_list.begin();
|
||||
while (it != m_shared_memory_list.end()) {
|
||||
KSharedMemoryInfo* info = *it;
|
||||
KSharedMemory* shmem = info->GetSharedMemory();
|
||||
|
||||
|
@ -470,19 +474,19 @@ void KProcess::Finalize() {
|
|||
|
||||
shmem->Close();
|
||||
|
||||
it = shared_memory_list.erase(it);
|
||||
it = m_shared_memory_list.erase(it);
|
||||
KSharedMemoryInfo::Free(m_kernel, info);
|
||||
}
|
||||
}
|
||||
|
||||
// Release memory to the resource limit.
|
||||
if (resource_limit != nullptr) {
|
||||
resource_limit->Close();
|
||||
resource_limit = nullptr;
|
||||
if (m_resource_limit != nullptr) {
|
||||
m_resource_limit->Close();
|
||||
m_resource_limit = nullptr;
|
||||
}
|
||||
|
||||
// Finalize the page table.
|
||||
page_table.Finalize();
|
||||
m_page_table.Finalize();
|
||||
|
||||
// Perform inherited finalization.
|
||||
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
|
||||
|
@ -496,14 +500,14 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
|||
{
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) {
|
||||
if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) {
|
||||
tlr = it->Reserve();
|
||||
ASSERT(tlr != 0);
|
||||
|
||||
if (it->IsAllUsed()) {
|
||||
tlp = std::addressof(*it);
|
||||
partially_used_tlp_tree.erase(it);
|
||||
fully_used_tlp_tree.insert(*tlp);
|
||||
m_partially_used_tlp_tree.erase(it);
|
||||
m_fully_used_tlp_tree.insert(*tlp);
|
||||
}
|
||||
|
||||
*out = tlr;
|
||||
|
@ -527,9 +531,9 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
|||
{
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
if (tlp->IsAllUsed()) {
|
||||
fully_used_tlp_tree.insert(*tlp);
|
||||
m_fully_used_tlp_tree.insert(*tlp);
|
||||
} else {
|
||||
partially_used_tlp_tree.insert(*tlp);
|
||||
m_partially_used_tlp_tree.insert(*tlp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -547,22 +551,22 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
|||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
// Try to find the page in the partially used list.
|
||||
auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
|
||||
if (it == partially_used_tlp_tree.end()) {
|
||||
auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
|
||||
if (it == m_partially_used_tlp_tree.end()) {
|
||||
// If we don't find it, it has to be in the fully used list.
|
||||
it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
|
||||
R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress);
|
||||
it = m_fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
|
||||
R_UNLESS(it != m_fully_used_tlp_tree.end(), ResultInvalidAddress);
|
||||
|
||||
// Release the region.
|
||||
it->Release(addr);
|
||||
|
||||
// Move the page out of the fully used list.
|
||||
KThreadLocalPage* tlp = std::addressof(*it);
|
||||
fully_used_tlp_tree.erase(it);
|
||||
m_fully_used_tlp_tree.erase(it);
|
||||
if (tlp->IsAllFree()) {
|
||||
page_to_free = tlp;
|
||||
} else {
|
||||
partially_used_tlp_tree.insert(*tlp);
|
||||
m_partially_used_tlp_tree.insert(*tlp);
|
||||
}
|
||||
} else {
|
||||
// Release the region.
|
||||
|
@ -571,7 +575,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
|||
// Handle the all-free case.
|
||||
KThreadLocalPage* tlp = std::addressof(*it);
|
||||
if (tlp->IsAllFree()) {
|
||||
partially_used_tlp_tree.erase(it);
|
||||
m_partially_used_tlp_tree.erase(it);
|
||||
page_to_free = tlp;
|
||||
}
|
||||
}
|
||||
|
@ -589,11 +593,11 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
|||
|
||||
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
|
||||
DebugWatchpointType type) {
|
||||
const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
|
||||
const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
|
||||
return wp.type == DebugWatchpointType::None;
|
||||
})};
|
||||
|
||||
if (watch == watchpoints.end()) {
|
||||
if (watch == m_watchpoints.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -602,7 +606,7 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
|
|||
watch->type = type;
|
||||
|
||||
for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
|
||||
debug_page_refcounts[page]++;
|
||||
m_debug_page_refcounts[page]++;
|
||||
system.Memory().MarkRegionDebug(page, PageSize, true);
|
||||
}
|
||||
|
||||
|
@ -611,11 +615,11 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
|
|||
|
||||
bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
|
||||
DebugWatchpointType type) {
|
||||
const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
|
||||
const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
|
||||
return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
|
||||
})};
|
||||
|
||||
if (watch == watchpoints.end()) {
|
||||
if (watch == m_watchpoints.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -624,8 +628,8 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
|
|||
watch->type = DebugWatchpointType::None;
|
||||
|
||||
for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
|
||||
debug_page_refcounts[page]--;
|
||||
if (!debug_page_refcounts[page]) {
|
||||
m_debug_page_refcounts[page]--;
|
||||
if (!m_debug_page_refcounts[page]) {
|
||||
system.Memory().MarkRegionDebug(page, PageSize, false);
|
||||
}
|
||||
}
|
||||
|
@ -636,7 +640,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
|
|||
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
|
||||
Svc::MemoryPermission permission) {
|
||||
page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
||||
m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
||||
};
|
||||
|
||||
m_kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
|
||||
|
@ -648,35 +652,35 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
|
|||
}
|
||||
|
||||
bool KProcess::IsSignaled() const {
|
||||
ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
|
||||
return is_signaled;
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
||||
return m_is_signaled;
|
||||
}
|
||||
|
||||
KProcess::KProcess(KernelCore& kernel)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel}, page_table{m_kernel.System()},
|
||||
handle_table{m_kernel}, address_arbiter{m_kernel.System()}, condition_var{m_kernel.System()},
|
||||
state_lock{m_kernel}, list_lock{m_kernel} {}
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_page_table{m_kernel.System()},
|
||||
m_handle_table{m_kernel}, m_address_arbiter{m_kernel.System()},
|
||||
m_condition_var{m_kernel.System()}, m_state_lock{m_kernel}, m_list_lock{m_kernel} {}
|
||||
|
||||
KProcess::~KProcess() = default;
|
||||
|
||||
void KProcess::ChangeState(State new_state) {
|
||||
if (state == new_state) {
|
||||
if (m_state == new_state) {
|
||||
return;
|
||||
}
|
||||
|
||||
state = new_state;
|
||||
is_signaled = true;
|
||||
NotifyAvailable();
|
||||
m_state = new_state;
|
||||
m_is_signaled = true;
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
|
||||
Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
|
||||
// Ensure that we haven't already allocated stack.
|
||||
ASSERT(main_thread_stack_size == 0);
|
||||
ASSERT(m_main_thread_stack_size == 0);
|
||||
|
||||
// Ensure that we're allocating a valid stack.
|
||||
stack_size = Common::AlignUp(stack_size, PageSize);
|
||||
// R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory);
|
||||
R_UNLESS(stack_size + image_size >= image_size, ResultOutOfMemory);
|
||||
R_UNLESS(stack_size + m_image_size >= m_image_size, ResultOutOfMemory);
|
||||
|
||||
// Place a tentative reservation of memory for our new stack.
|
||||
KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax,
|
||||
|
@ -686,11 +690,11 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
|
|||
// Allocate and map our stack.
|
||||
if (stack_size) {
|
||||
KProcessAddress stack_bottom;
|
||||
R_TRY(page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
|
||||
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
|
||||
R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
|
||||
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
|
||||
|
||||
main_thread_stack_top = stack_bottom + stack_size;
|
||||
main_thread_stack_size = stack_size;
|
||||
m_main_thread_stack_top = stack_bottom + stack_size;
|
||||
m_main_thread_stack_size = stack_size;
|
||||
}
|
||||
|
||||
// We succeeded! Commit our memory reservation.
|
||||
|
|
|
@ -107,66 +107,76 @@ public:
|
|||
|
||||
/// Gets a reference to the process' page table.
|
||||
KPageTable& PageTable() {
|
||||
return page_table;
|
||||
return m_page_table;
|
||||
}
|
||||
|
||||
/// Gets const a reference to the process' page table.
|
||||
const KPageTable& PageTable() const {
|
||||
return page_table;
|
||||
return m_page_table;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' page table.
|
||||
KPageTable& GetPageTable() {
|
||||
return m_page_table;
|
||||
}
|
||||
|
||||
/// Gets const a reference to the process' page table.
|
||||
const KPageTable& GetPageTable() const {
|
||||
return m_page_table;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' handle table.
|
||||
KHandleTable& GetHandleTable() {
|
||||
return handle_table;
|
||||
return m_handle_table;
|
||||
}
|
||||
|
||||
/// Gets a const reference to the process' handle table.
|
||||
const KHandleTable& GetHandleTable() const {
|
||||
return handle_table;
|
||||
return m_handle_table;
|
||||
}
|
||||
|
||||
Result SignalToAddress(VAddr address) {
|
||||
return condition_var.SignalToAddress(address);
|
||||
return m_condition_var.SignalToAddress(address);
|
||||
}
|
||||
|
||||
Result WaitForAddress(Handle handle, VAddr address, u32 tag) {
|
||||
return condition_var.WaitForAddress(handle, address, tag);
|
||||
return m_condition_var.WaitForAddress(handle, address, tag);
|
||||
}
|
||||
|
||||
void SignalConditionVariable(u64 cv_key, int32_t count) {
|
||||
return condition_var.Signal(cv_key, count);
|
||||
return m_condition_var.Signal(cv_key, count);
|
||||
}
|
||||
|
||||
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||
R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
|
||||
R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns));
|
||||
}
|
||||
|
||||
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
|
||||
R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
|
||||
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
|
||||
}
|
||||
|
||||
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
|
||||
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
|
||||
}
|
||||
|
||||
VAddr GetProcessLocalRegionAddress() const {
|
||||
return plr_address;
|
||||
return m_plr_address;
|
||||
}
|
||||
|
||||
/// Gets the current status of the process
|
||||
State GetState() const {
|
||||
return state;
|
||||
return m_state;
|
||||
}
|
||||
|
||||
/// Gets the unique ID that identifies this particular process.
|
||||
u64 GetProcessID() const {
|
||||
return process_id;
|
||||
u64 GetProcessId() const {
|
||||
return m_process_id;
|
||||
}
|
||||
|
||||
/// Gets the program ID corresponding to this process.
|
||||
u64 GetProgramID() const {
|
||||
return program_id;
|
||||
u64 GetProgramId() const {
|
||||
return m_program_id;
|
||||
}
|
||||
|
||||
/// Gets the resource limit descriptor for this process
|
||||
|
@ -174,7 +184,7 @@ public:
|
|||
|
||||
/// Gets the ideal CPU core ID for this process
|
||||
u8 GetIdealCoreId() const {
|
||||
return ideal_core;
|
||||
return m_ideal_core;
|
||||
}
|
||||
|
||||
/// Checks if the specified thread priority is valid.
|
||||
|
@ -184,17 +194,17 @@ public:
|
|||
|
||||
/// Gets the bitmask of allowed cores that this process' threads can run on.
|
||||
u64 GetCoreMask() const {
|
||||
return capabilities.GetCoreMask();
|
||||
return m_capabilities.GetCoreMask();
|
||||
}
|
||||
|
||||
/// Gets the bitmask of allowed thread priorities.
|
||||
u64 GetPriorityMask() const {
|
||||
return capabilities.GetPriorityMask();
|
||||
return m_capabilities.GetPriorityMask();
|
||||
}
|
||||
|
||||
/// Gets the amount of secure memory to allocate for memory management.
|
||||
u32 GetSystemResourceSize() const {
|
||||
return system_resource_size;
|
||||
return m_system_resource_size;
|
||||
}
|
||||
|
||||
/// Gets the amount of secure memory currently in use for memory management.
|
||||
|
@ -214,67 +224,67 @@ public:
|
|||
|
||||
/// Whether this process is an AArch64 or AArch32 process.
|
||||
bool Is64BitProcess() const {
|
||||
return is_64bit_process;
|
||||
return m_is_64bit_process;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsSuspended() const {
|
||||
return is_suspended;
|
||||
bool IsSuspended() const {
|
||||
return m_is_suspended;
|
||||
}
|
||||
|
||||
void SetSuspended(bool suspended) {
|
||||
is_suspended = suspended;
|
||||
m_is_suspended = suspended;
|
||||
}
|
||||
|
||||
/// Gets the total running time of the process instance in ticks.
|
||||
u64 GetCPUTimeTicks() const {
|
||||
return total_process_running_time_ticks;
|
||||
return m_total_process_running_time_ticks;
|
||||
}
|
||||
|
||||
/// Updates the total running time, adding the given ticks to it.
|
||||
void UpdateCPUTimeTicks(u64 ticks) {
|
||||
total_process_running_time_ticks += ticks;
|
||||
m_total_process_running_time_ticks += ticks;
|
||||
}
|
||||
|
||||
/// Gets the process schedule count, used for thread yielding
|
||||
s64 GetScheduledCount() const {
|
||||
return schedule_count;
|
||||
return m_schedule_count;
|
||||
}
|
||||
|
||||
/// Increments the process schedule count, used for thread yielding.
|
||||
void IncrementScheduledCount() {
|
||||
++schedule_count;
|
||||
++m_schedule_count;
|
||||
}
|
||||
|
||||
void IncrementRunningThreadCount();
|
||||
void DecrementRunningThreadCount();
|
||||
|
||||
void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
|
||||
running_threads[core] = thread;
|
||||
running_thread_idle_counts[core] = idle_count;
|
||||
m_running_threads[core] = thread;
|
||||
m_running_thread_idle_counts[core] = idle_count;
|
||||
}
|
||||
|
||||
void ClearRunningThread(KThread* thread) {
|
||||
for (size_t i = 0; i < running_threads.size(); ++i) {
|
||||
if (running_threads[i] == thread) {
|
||||
running_threads[i] = nullptr;
|
||||
for (size_t i = 0; i < m_running_threads.size(); ++i) {
|
||||
if (m_running_threads[i] == thread) {
|
||||
m_running_threads[i] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] KThread* GetRunningThread(s32 core) const {
|
||||
return running_threads[core];
|
||||
return m_running_threads[core];
|
||||
}
|
||||
|
||||
bool ReleaseUserException(KThread* thread);
|
||||
|
||||
[[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
|
||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
return pinned_threads[core_id];
|
||||
return m_pinned_threads[core_id];
|
||||
}
|
||||
|
||||
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
|
||||
u64 GetRandomEntropy(std::size_t index) const {
|
||||
return random_entropy.at(index);
|
||||
return m_random_entropy.at(index);
|
||||
}
|
||||
|
||||
/// Retrieves the total physical memory available to this process in bytes.
|
||||
|
@ -293,7 +303,7 @@ public:
|
|||
|
||||
/// Gets the list of all threads created with this process as their owner.
|
||||
std::list<KThread*>& GetThreadList() {
|
||||
return thread_list;
|
||||
return m_thread_list;
|
||||
}
|
||||
|
||||
/// Registers a thread as being created under this process,
|
||||
|
@ -345,15 +355,15 @@ public:
|
|||
void LoadModule(CodeSet code_set, VAddr base_addr);
|
||||
|
||||
bool IsInitialized() const override {
|
||||
return is_initialized;
|
||||
return m_is_initialized;
|
||||
}
|
||||
|
||||
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
|
||||
static void PostDestroy(uintptr_t arg) {}
|
||||
|
||||
void Finalize() override;
|
||||
|
||||
u64 GetId() const override {
|
||||
return GetProcessID();
|
||||
return GetProcessId();
|
||||
}
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
@ -367,7 +377,7 @@ public:
|
|||
void UnpinThread(KThread* thread);
|
||||
|
||||
KLightLock& GetStateLock() {
|
||||
return state_lock;
|
||||
return m_state_lock;
|
||||
}
|
||||
|
||||
Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
|
||||
|
@ -392,7 +402,7 @@ public:
|
|||
bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
|
||||
|
||||
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
|
||||
return watchpoints;
|
||||
return m_watchpoints;
|
||||
}
|
||||
|
||||
const std::string& GetName() {
|
||||
|
@ -403,23 +413,23 @@ private:
|
|||
void PinThread(s32 core_id, KThread* thread) {
|
||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
ASSERT(thread != nullptr);
|
||||
ASSERT(pinned_threads[core_id] == nullptr);
|
||||
pinned_threads[core_id] = thread;
|
||||
ASSERT(m_pinned_threads[core_id] == nullptr);
|
||||
m_pinned_threads[core_id] = thread;
|
||||
}
|
||||
|
||||
void UnpinThread(s32 core_id, KThread* thread) {
|
||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
ASSERT(thread != nullptr);
|
||||
ASSERT(pinned_threads[core_id] == thread);
|
||||
pinned_threads[core_id] = nullptr;
|
||||
ASSERT(m_pinned_threads[core_id] == thread);
|
||||
m_pinned_threads[core_id] = nullptr;
|
||||
}
|
||||
|
||||
void FinalizeHandleTable() {
|
||||
// Finalize the table.
|
||||
handle_table.Finalize();
|
||||
m_handle_table.Finalize();
|
||||
|
||||
// Note that the table is finalized.
|
||||
is_handle_table_initialized = false;
|
||||
m_is_handle_table_initialized = false;
|
||||
}
|
||||
|
||||
void ChangeState(State new_state);
|
||||
|
@ -428,107 +438,107 @@ private:
|
|||
Result AllocateMainThreadStack(std::size_t stack_size);
|
||||
|
||||
/// Memory manager for this process
|
||||
KPageTable page_table;
|
||||
KPageTable m_page_table;
|
||||
|
||||
/// Current status of the process
|
||||
State state{};
|
||||
State m_state{};
|
||||
|
||||
/// The ID of this process
|
||||
u64 process_id = 0;
|
||||
u64 m_process_id = 0;
|
||||
|
||||
/// Title ID corresponding to the process
|
||||
u64 program_id = 0;
|
||||
u64 m_program_id = 0;
|
||||
|
||||
/// Specifies additional memory to be reserved for the process's memory management by the
|
||||
/// system. When this is non-zero, secure memory is allocated and used for page table allocation
|
||||
/// instead of using the normal global page tables/memory block management.
|
||||
u32 system_resource_size = 0;
|
||||
u32 m_system_resource_size = 0;
|
||||
|
||||
/// Resource limit descriptor for this process
|
||||
KResourceLimit* resource_limit{};
|
||||
KResourceLimit* m_resource_limit{};
|
||||
|
||||
VAddr system_resource_address{};
|
||||
VAddr m_system_resource_address{};
|
||||
|
||||
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
||||
u8 ideal_core = 0;
|
||||
u8 m_ideal_core = 0;
|
||||
|
||||
/// Contains the parsed process capability descriptors.
|
||||
ProcessCapabilities capabilities;
|
||||
ProcessCapabilities m_capabilities;
|
||||
|
||||
/// Whether or not this process is AArch64, or AArch32.
|
||||
/// By default, we currently assume this is true, unless otherwise
|
||||
/// specified by metadata provided to the process during loading.
|
||||
bool is_64bit_process = true;
|
||||
bool m_is_64bit_process = true;
|
||||
|
||||
/// Total running time for the process in ticks.
|
||||
std::atomic<u64> total_process_running_time_ticks = 0;
|
||||
std::atomic<u64> m_total_process_running_time_ticks = 0;
|
||||
|
||||
/// Per-process handle table for storing created object handles in.
|
||||
KHandleTable handle_table;
|
||||
KHandleTable m_handle_table;
|
||||
|
||||
/// Per-process address arbiter.
|
||||
KAddressArbiter address_arbiter;
|
||||
KAddressArbiter m_address_arbiter;
|
||||
|
||||
/// The per-process mutex lock instance used for handling various
|
||||
/// forms of services, such as lock arbitration, and condition
|
||||
/// variable related facilities.
|
||||
KConditionVariable condition_var;
|
||||
KConditionVariable m_condition_var;
|
||||
|
||||
/// Address indicating the location of the process' dedicated TLS region.
|
||||
VAddr plr_address = 0;
|
||||
VAddr m_plr_address = 0;
|
||||
|
||||
/// Random values for svcGetInfo RandomEntropy
|
||||
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
|
||||
std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{};
|
||||
|
||||
/// List of threads that are running with this process as their owner.
|
||||
std::list<KThread*> thread_list;
|
||||
std::list<KThread*> m_thread_list;
|
||||
|
||||
/// List of shared memory that are running with this process as their owner.
|
||||
std::list<KSharedMemoryInfo*> shared_memory_list;
|
||||
std::list<KSharedMemoryInfo*> m_shared_memory_list;
|
||||
|
||||
/// Address of the top of the main thread's stack
|
||||
VAddr main_thread_stack_top{};
|
||||
VAddr m_main_thread_stack_top{};
|
||||
|
||||
/// Size of the main thread's stack
|
||||
std::size_t main_thread_stack_size{};
|
||||
std::size_t m_main_thread_stack_size{};
|
||||
|
||||
/// Memory usage capacity for the process
|
||||
std::size_t memory_usage_capacity{};
|
||||
std::size_t m_memory_usage_capacity{};
|
||||
|
||||
/// Process total image size
|
||||
std::size_t image_size{};
|
||||
std::size_t m_image_size{};
|
||||
|
||||
/// Schedule count of this process
|
||||
s64 schedule_count{};
|
||||
s64 m_schedule_count{};
|
||||
|
||||
size_t memory_release_hint{};
|
||||
size_t m_memory_release_hint{};
|
||||
|
||||
std::string name{};
|
||||
|
||||
bool is_signaled{};
|
||||
bool is_suspended{};
|
||||
bool is_immortal{};
|
||||
bool is_handle_table_initialized{};
|
||||
bool is_initialized{};
|
||||
bool m_is_signaled{};
|
||||
bool m_is_suspended{};
|
||||
bool m_is_immortal{};
|
||||
bool m_is_handle_table_initialized{};
|
||||
bool m_is_initialized{};
|
||||
|
||||
std::atomic<u16> num_running_threads{};
|
||||
std::atomic<u16> m_num_running_threads{};
|
||||
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
|
||||
std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{};
|
||||
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> watchpoints{};
|
||||
std::map<VAddr, u64> debug_page_refcounts;
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
|
||||
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
|
||||
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
|
||||
std::map<VAddr, u64> m_debug_page_refcounts;
|
||||
|
||||
KThread* exception_thread{};
|
||||
KThread* m_exception_thread{};
|
||||
|
||||
KLightLock state_lock;
|
||||
KLightLock list_lock;
|
||||
KLightLock m_state_lock;
|
||||
KLightLock m_list_lock;
|
||||
|
||||
using TLPTree =
|
||||
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
|
||||
using TLPIterator = TLPTree::iterator;
|
||||
TLPTree fully_used_tlp_tree;
|
||||
TLPTree partially_used_tlp_tree;
|
||||
TLPTree m_fully_used_tlp_tree;
|
||||
TLPTree m_partially_used_tlp_tree;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
|
|
@ -103,7 +103,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
|
|||
R_SUCCEED();
|
||||
|
||||
case InfoType::ProgramId:
|
||||
*result = process->GetProgramID();
|
||||
*result = process->GetProgramId();
|
||||
R_SUCCEED();
|
||||
|
||||
case InfoType::UserExceptionContextAddress:
|
||||
|
|
|
@ -11,7 +11,7 @@ namespace Kernel::Svc {
|
|||
void ExitProcess(Core::System& system) {
|
||||
auto* current_process = GetCurrentProcessPointer(system.Kernel());
|
||||
|
||||
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
|
||||
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessId());
|
||||
ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
|
||||
"Process has already exited");
|
||||
|
||||
|
@ -80,7 +80,7 @@ Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_pr
|
|||
std::min(static_cast<std::size_t>(out_process_ids_size), num_processes);
|
||||
|
||||
for (std::size_t i = 0; i < copy_amount; ++i) {
|
||||
memory.Write64(out_process_ids, process_list[i]->GetProcessID());
|
||||
memory.Write64(out_process_ids, process_list[i]->GetProcessId());
|
||||
out_process_ids += sizeof(u64);
|
||||
}
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ IWindowController::IWindowController(Core::System& system_)
|
|||
IWindowController::~IWindowController() = default;
|
||||
|
||||
void IWindowController::GetAppletResourceUserId(HLERequestContext& ctx) {
|
||||
const u64 process_id = system.ApplicationProcess()->GetProcessID();
|
||||
const u64 process_id = system.ApplicationProcess()->GetProcessId();
|
||||
|
||||
LOG_DEBUG(Service_AM, "called. Process ID=0x{:016X}", process_id);
|
||||
|
||||
|
|
|
@ -18,14 +18,14 @@ namespace {
|
|||
std::optional<u64> GetTitleIDForProcessID(const Core::System& system, u64 process_id) {
|
||||
const auto& list = system.Kernel().GetProcessList();
|
||||
const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) {
|
||||
return process->GetProcessID() == process_id;
|
||||
return process->GetProcessId() == process_id;
|
||||
});
|
||||
|
||||
if (iter == list.end()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return (*iter)->GetProgramID();
|
||||
return (*iter)->GetProgramId();
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
|
|
|
@ -37,12 +37,12 @@ std::optional<Kernel::KProcess*> SearchProcessList(
|
|||
void GetApplicationPidGeneric(HLERequestContext& ctx,
|
||||
const std::vector<Kernel::KProcess*>& process_list) {
|
||||
const auto process = SearchProcessList(process_list, [](const auto& proc) {
|
||||
return proc->GetProcessID() == Kernel::KProcess::ProcessIDMin;
|
||||
return proc->GetProcessId() == Kernel::KProcess::ProcessIDMin;
|
||||
});
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push(process.has_value() ? (*process)->GetProcessID() : NO_PROCESS_FOUND_PID);
|
||||
rb.Push(process.has_value() ? (*process)->GetProcessId() : NO_PROCESS_FOUND_PID);
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
@ -108,7 +108,7 @@ private:
|
|||
|
||||
const auto process =
|
||||
SearchProcessList(kernel.GetProcessList(), [program_id](const auto& proc) {
|
||||
return proc->GetProgramID() == program_id;
|
||||
return proc->GetProgramId() == program_id;
|
||||
});
|
||||
|
||||
if (!process.has_value()) {
|
||||
|
@ -119,7 +119,7 @@ private:
|
|||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push((*process)->GetProcessID());
|
||||
rb.Push((*process)->GetProcessId());
|
||||
}
|
||||
|
||||
void GetApplicationProcessId(HLERequestContext& ctx) {
|
||||
|
@ -136,7 +136,7 @@ private:
|
|||
LOG_WARNING(Service_PM, "(Partial Implementation) called, pid={:016X}", pid);
|
||||
|
||||
const auto process = SearchProcessList(kernel.GetProcessList(), [pid](const auto& proc) {
|
||||
return proc->GetProcessID() == pid;
|
||||
return proc->GetProcessId() == pid;
|
||||
});
|
||||
|
||||
if (!process.has_value()) {
|
||||
|
@ -159,7 +159,7 @@ private:
|
|||
|
||||
OverrideStatus override_status{};
|
||||
ProgramLocation program_location{
|
||||
.program_id = (*process)->GetProgramID(),
|
||||
.program_id = (*process)->GetProgramId(),
|
||||
.storage_id = 0,
|
||||
};
|
||||
|
||||
|
@ -194,7 +194,7 @@ private:
|
|||
LOG_DEBUG(Service_PM, "called, process_id={:016X}", process_id);
|
||||
|
||||
const auto process = SearchProcessList(process_list, [process_id](const auto& proc) {
|
||||
return proc->GetProcessID() == process_id;
|
||||
return proc->GetProcessId() == process_id;
|
||||
});
|
||||
|
||||
if (!process.has_value()) {
|
||||
|
@ -205,7 +205,7 @@ private:
|
|||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push((*process)->GetProgramID());
|
||||
rb.Push((*process)->GetProgramId());
|
||||
}
|
||||
|
||||
void AtmosphereGetProcessId(HLERequestContext& ctx) {
|
||||
|
@ -215,7 +215,7 @@ private:
|
|||
LOG_DEBUG(Service_PM, "called, program_id={:016X}", program_id);
|
||||
|
||||
const auto process = SearchProcessList(process_list, [program_id](const auto& proc) {
|
||||
return proc->GetProgramID() == program_id;
|
||||
return proc->GetProgramId() == program_id;
|
||||
});
|
||||
|
||||
if (!process.has_value()) {
|
||||
|
@ -226,7 +226,7 @@ private:
|
|||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push((*process)->GetProcessID());
|
||||
rb.Push((*process)->GetProcessId());
|
||||
}
|
||||
|
||||
const std::vector<Kernel::KProcess*>& process_list;
|
||||
|
|
|
@ -196,7 +196,7 @@ void CheatEngine::Initialize() {
|
|||
});
|
||||
core_timing.ScheduleLoopingEvent(CHEAT_ENGINE_NS, CHEAT_ENGINE_NS, event);
|
||||
|
||||
metadata.process_id = system.ApplicationProcess()->GetProcessID();
|
||||
metadata.process_id = system.ApplicationProcess()->GetProcessId();
|
||||
metadata.title_id = system.GetApplicationProcessProgramID();
|
||||
|
||||
const auto& page_table = system.ApplicationProcess()->PageTable();
|
||||
|
|
Loading…
Reference in a new issue