diff --git a/src/client/windows/common/auto_critical_section.h b/src/client/windows/common/auto_critical_section.h index 82c7b7f1..a2076b04 100644 --- a/src/client/windows/common/auto_critical_section.h +++ b/src/client/windows/common/auto_critical_section.h @@ -40,14 +40,31 @@ class AutoCriticalSection { public: // Creates a new instance with the given critical section object // and enters the critical section immediately. - explicit AutoCriticalSection(CRITICAL_SECTION* cs) : cs_(cs) { + explicit AutoCriticalSection(CRITICAL_SECTION* cs) : cs_(cs), taken_(false) { assert(cs_); - EnterCriticalSection(cs_); + Acquire(); } // Destructor: leaves the critical section. ~AutoCriticalSection() { + if (taken_) { + Release(); + } + } + + // Enters the critical section. Recursive Acquire() calls are not allowed. + void Acquire() { + assert(!taken_); + EnterCriticalSection(cs_); + taken_ = true; + } + + // Leaves the critical section. The caller should not call Release() unless + // the critical seciton has been entered already. + void Release() { + assert(taken_); LeaveCriticalSection(cs_); + taken_ = false; } private: @@ -56,6 +73,7 @@ class AutoCriticalSection { AutoCriticalSection& operator=(const AutoCriticalSection&); CRITICAL_SECTION* cs_; + bool taken_; }; } // namespace google_breakpad diff --git a/src/client/windows/crash_generation/crash_generation_server.cc b/src/client/windows/crash_generation/crash_generation_server.cc index 794742db..477973c1 100644 --- a/src/client/windows/crash_generation/crash_generation_server.cc +++ b/src/client/windows/crash_generation/crash_generation_server.cc @@ -125,7 +125,7 @@ CrashGenerationServer::CrashGenerationServer( overlapped_(), client_info_(NULL), cleanup_item_count_(0) { - InitializeCriticalSection(&clients_sync_); + InitializeCriticalSection(&sync_); if (dump_path) { dump_generator_.reset(new MinidumpGenerator(*dump_path)); @@ -133,38 +133,41 @@ CrashGenerationServer::CrashGenerationServer( } CrashGenerationServer::~CrashGenerationServer() { - // Indicate to existing threads that server is shutting down. - shutting_down_ = true; - - // Even if there are no current worker threads running, it is possible that - // an I/O request is pending on the pipe right now but not yet done. In fact, - // it's very likely this is the case unless we are in an ERROR state. If we - // don't wait for the pending I/O to be done, then when the I/O completes, - // it may write to invalid memory. AppVerifier will flag this problem too. - // So we disconnect from the pipe and then wait for the server to get into - // error state so that the pending I/O will fail and get cleared. - DisconnectNamedPipe(pipe_); - int num_tries = 100; - while (num_tries-- && server_state_ != IPC_SERVER_STATE_ERROR) { - Sleep(10); - } - - // Unregister wait on the pipe. - if (pipe_wait_handle_) { - // Wait for already executing callbacks to finish. - UnregisterWaitEx(pipe_wait_handle_, INVALID_HANDLE_VALUE); - } - - // Close the pipe to avoid further client connections. - if (pipe_) { - CloseHandle(pipe_); - } - - // Request all ClientInfo objects to unregister all waits. - // New scope to hold the lock for the shortest time. + // New scope to release the lock automatically. { - AutoCriticalSection lock(&clients_sync_); + AutoCriticalSection lock(&sync_); + // Indicate to existing threads that server is shutting down. + shutting_down_ = true; + + // Even if there are no current worker threads running, it is possible that + // an I/O request is pending on the pipe right now but not yet done. + // In fact, it's very likely this is the case unless we are in an ERROR + // state. If we don't wait for the pending I/O to be done, then when the I/O + // completes, it may write to invalid memory. AppVerifier will flag this + // problem too. So we disconnect from the pipe and then wait for the server + // to get into error state so that the pending I/O will fail and get + // cleared. + DisconnectNamedPipe(pipe_); + int num_tries = 100; + while (num_tries-- && server_state_ != IPC_SERVER_STATE_ERROR) { + lock.Release(); + Sleep(10); + lock.Acquire(); + } + + // Unregister wait on the pipe. + if (pipe_wait_handle_) { + // Wait for already executing callbacks to finish. + UnregisterWaitEx(pipe_wait_handle_, INVALID_HANDLE_VALUE); + } + + // Close the pipe to avoid further client connections. + if (pipe_) { + CloseHandle(pipe_); + } + + // Request all ClientInfo objects to unregister all waits. std::list::iterator iter; for (iter = clients_.begin(); iter != clients_.end(); ++iter) { ClientInfo* client_info = *iter; @@ -185,33 +188,35 @@ CrashGenerationServer::~CrashGenerationServer() { } } - // Clean up all the ClientInfo objects. // New scope to hold the lock for the shortest time. { - AutoCriticalSection lock(&clients_sync_); + AutoCriticalSection lock(&sync_); + // Clean up all the ClientInfo objects. std::list::iterator iter; for (iter = clients_.begin(); iter != clients_.end(); ++iter) { ClientInfo* client_info = *iter; delete client_info; } + + if (server_alive_handle_) { + // Release the mutex before closing the handle so that clients requesting + // dumps wait for a long time for the server to generate a dump. + ReleaseMutex(server_alive_handle_); + CloseHandle(server_alive_handle_); + } + + if (overlapped_.hEvent) { + CloseHandle(overlapped_.hEvent); + } } - if (server_alive_handle_) { - // Release the mutex before closing the handle so that clients requesting - // dumps wait for a long time for the server to generate a dump. - ReleaseMutex(server_alive_handle_); - CloseHandle(server_alive_handle_); - } - - if (overlapped_.hEvent) { - CloseHandle(overlapped_.hEvent); - } - - DeleteCriticalSection(&clients_sync_); + DeleteCriticalSection(&sync_); } bool CrashGenerationServer::Start() { + AutoCriticalSection lock(&sync_); + if (server_state_ != IPC_SERVER_STATE_UNINITIALIZED) { return false; } @@ -682,6 +687,8 @@ bool CrashGenerationServer::RespondToClient(ClientInfo* client_info) { // implements the state machine described in ReadMe.txt along with the // helper methods HandleXXXState. void CrashGenerationServer::HandleConnectionRequest() { + AutoCriticalSection lock(&sync_); + // If we are shutting doen then get into ERROR state, reset the event so more // workers don't run and return immediately. if (shutting_down_) { @@ -768,7 +775,7 @@ bool CrashGenerationServer::AddClient(ClientInfo* client_info) { // New scope to hold the lock for the shortest time. { - AutoCriticalSection lock(&clients_sync_); + AutoCriticalSection lock(&sync_); clients_.push_back(client_info); } @@ -836,7 +843,7 @@ void CrashGenerationServer::DoCleanup(ClientInfo* client_info) { // Start a new scope to release lock automatically. { - AutoCriticalSection lock(&clients_sync_); + AutoCriticalSection lock(&sync_); clients_.remove(client_info); } diff --git a/src/client/windows/crash_generation/crash_generation_server.h b/src/client/windows/crash_generation/crash_generation_server.h index 2f0a222c..ea3776fb 100644 --- a/src/client/windows/crash_generation/crash_generation_server.h +++ b/src/client/windows/crash_generation/crash_generation_server.h @@ -216,8 +216,9 @@ class CrashGenerationServer { // asynchronous IO operation. void EnterStateWhenSignaled(IPCServerState state); - // Sync object for thread-safe access to the shared list of clients. - CRITICAL_SECTION clients_sync_; + // Sync object for thread-safe access to the shared list of clients and + // the server's state. + CRITICAL_SECTION sync_; // List of clients. std::list clients_; @@ -271,10 +272,10 @@ class CrashGenerationServer { // Note that since we restrict the pipe to one instance, we // only need to keep one state of the server. Otherwise, server // would have one state per client it is talking to. - volatile IPCServerState server_state_; + IPCServerState server_state_; // Whether the server is shutting down. - volatile bool shutting_down_; + bool shutting_down_; // Overlapped instance for async I/O on the pipe. OVERLAPPED overlapped_;