forked from suyu/suyu
Merge pull request #6428 from bunnei/service-thread-crash-fix
hle: kernel: Remove service thread manager and use weak_ptr.
This commit is contained in:
commit
3c621d37f0
4 changed files with 56 additions and 28 deletions
|
@ -41,6 +41,21 @@ SessionRequestManager::SessionRequestManager(KernelCore& kernel_) : kernel{kerne
|
|||
|
||||
SessionRequestManager::~SessionRequestManager() = default;
|
||||
|
||||
bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& context) const {
|
||||
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||
const auto& message_header = context.GetDomainMessageHeader();
|
||||
const auto object_id = message_header.object_id;
|
||||
|
||||
if (object_id > DomainHandlerCount()) {
|
||||
LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
|
||||
return false;
|
||||
}
|
||||
return DomainHandler(object_id - 1) != nullptr;
|
||||
} else {
|
||||
return session_handler != nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void SessionRequestHandler::ClientConnected(KServerSession* session) {
|
||||
session->SetSessionHandler(shared_from_this());
|
||||
}
|
||||
|
|
|
@ -85,8 +85,8 @@ public:
|
|||
*/
|
||||
void ClientDisconnected(KServerSession* session);
|
||||
|
||||
std::shared_ptr<ServiceThread> GetServiceThread() const {
|
||||
return service_thread.lock();
|
||||
std::weak_ptr<ServiceThread> GetServiceThread() const {
|
||||
return service_thread;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -152,10 +152,12 @@ public:
|
|||
session_handler = std::move(handler);
|
||||
}
|
||||
|
||||
std::shared_ptr<ServiceThread> GetServiceThread() const {
|
||||
std::weak_ptr<ServiceThread> GetServiceThread() const {
|
||||
return session_handler->GetServiceThread();
|
||||
}
|
||||
|
||||
bool HasSessionRequestHandler(const HLERequestContext& context) const;
|
||||
|
||||
private:
|
||||
bool is_domain{};
|
||||
SessionRequestHandlerPtr session_handler;
|
||||
|
@ -163,7 +165,6 @@ private:
|
|||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
std::weak_ptr<ServiceThread> service_thread;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
|
@ -119,11 +120,25 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
|
|||
|
||||
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
|
||||
|
||||
if (auto strong_ptr = manager->GetServiceThread(); strong_ptr) {
|
||||
strong_ptr->QueueSyncRequest(*parent, std::move(context));
|
||||
return ResultSuccess;
|
||||
// In the event that something fails here, stub a result to prevent the game from crashing.
|
||||
// This is a work-around in the event that somehow we process a service request after the
|
||||
// session has been closed by the game. This has been observed to happen rarely in Pokemon
|
||||
// Sword/Shield and is likely a result of us using host threads/scheduling for services.
|
||||
// TODO(bunnei): Find a better solution here.
|
||||
auto error_guard = SCOPE_GUARD({ CompleteSyncRequest(*context); });
|
||||
|
||||
// Ensure we have a session request handler
|
||||
if (manager->HasSessionRequestHandler(*context)) {
|
||||
if (auto strong_ptr = manager->GetServiceThread().lock()) {
|
||||
strong_ptr->QueueSyncRequest(*parent, std::move(context));
|
||||
|
||||
// We succeeded.
|
||||
error_guard.Cancel();
|
||||
} else {
|
||||
ASSERT_MSG(false, "strong_ptr is nullptr!");
|
||||
}
|
||||
} else {
|
||||
ASSERT_MSG(false, "strong_ptr was nullptr!");
|
||||
ASSERT_MSG(false, "handler is invalid!");
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
|
@ -131,13 +146,20 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
|
|||
|
||||
ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
ResultCode result = ResultSuccess;
|
||||
|
||||
// If the session has been converted to a domain, handle the domain request
|
||||
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||
result = HandleDomainSyncRequest(context);
|
||||
// If there is no domain header, the regular session handler is used
|
||||
} else if (manager->HasSessionHandler()) {
|
||||
// If this ServerSession has an associated HLE handler, forward the request to it.
|
||||
result = manager->SessionHandler().HandleSyncRequest(*this, context);
|
||||
if (manager->HasSessionRequestHandler(context)) {
|
||||
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||
result = HandleDomainSyncRequest(context);
|
||||
// If there is no domain header, the regular session handler is used
|
||||
} else if (manager->HasSessionHandler()) {
|
||||
// If this ServerSession has an associated HLE handler, forward the request to it.
|
||||
result = manager->SessionHandler().HandleSyncRequest(*this, context);
|
||||
}
|
||||
} else {
|
||||
ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
|
||||
IPC::ResponseBuilder rb(context, 2);
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
if (convert_to_domain) {
|
||||
|
|
|
@ -63,8 +63,6 @@ struct KernelCore::Impl {
|
|||
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
|
||||
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
|
||||
|
||||
service_thread_manager =
|
||||
std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
|
||||
is_phantom_mode_for_singlecore = false;
|
||||
|
||||
InitializePhysicalCores();
|
||||
|
@ -96,7 +94,6 @@ struct KernelCore::Impl {
|
|||
process_list.clear();
|
||||
|
||||
// Ensures all service threads gracefully shutdown
|
||||
service_thread_manager.reset();
|
||||
service_threads.clear();
|
||||
|
||||
next_object_id = 0;
|
||||
|
@ -680,10 +677,6 @@ struct KernelCore::Impl {
|
|||
// Threads used for services
|
||||
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
|
||||
|
||||
// Service threads are managed by a worker thread, so that a calling service thread can queue up
|
||||
// the release of itself
|
||||
std::unique_ptr<Common::ThreadWorker> service_thread_manager;
|
||||
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
|
||||
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
||||
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
||||
|
@ -986,17 +979,14 @@ void KernelCore::ExitSVCProfile() {
|
|||
|
||||
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
||||
auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name);
|
||||
impl->service_thread_manager->QueueWork(
|
||||
[this, service_thread] { impl->service_threads.emplace(service_thread); });
|
||||
impl->service_threads.emplace(service_thread);
|
||||
return service_thread;
|
||||
}
|
||||
|
||||
void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
|
||||
impl->service_thread_manager->QueueWork([this, service_thread] {
|
||||
if (auto strong_ptr = service_thread.lock()) {
|
||||
impl->service_threads.erase(strong_ptr);
|
||||
}
|
||||
});
|
||||
if (auto strong_ptr = service_thread.lock()) {
|
||||
impl->service_threads.erase(strong_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() {
|
||||
|
|
Loading…
Reference in a new issue