forked from suyu/suyu
Merge pull request #3405 from lioncash/thread
address_arbiter: Minor cleanup to list querying
This commit is contained in:
commit
8b9a56033a
2 changed files with 26 additions and 29 deletions
|
@ -201,42 +201,39 @@ void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) {
|
||||||
void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
|
void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
|
||||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||||
auto it = thread_list.begin();
|
|
||||||
while (it != thread_list.end()) {
|
const auto iter =
|
||||||
const std::shared_ptr<Thread>& current_thread = *it;
|
std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
|
||||||
if (current_thread->GetPriority() >= thread->GetPriority()) {
|
return entry->GetPriority() >= thread->GetPriority();
|
||||||
thread_list.insert(it, thread);
|
});
|
||||||
return;
|
|
||||||
}
|
if (iter == thread_list.cend()) {
|
||||||
++it;
|
thread_list.push_back(std::move(thread));
|
||||||
|
} else {
|
||||||
|
thread_list.insert(iter, std::move(thread));
|
||||||
}
|
}
|
||||||
thread_list.push_back(std::move(thread));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
|
void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||||
auto it = thread_list.begin();
|
|
||||||
while (it != thread_list.end()) {
|
const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
|
||||||
const std::shared_ptr<Thread>& current_thread = *it;
|
[&thread](const auto& entry) { return thread == entry; });
|
||||||
if (current_thread.get() == thread.get()) {
|
|
||||||
thread_list.erase(it);
|
ASSERT(iter != thread_list.cend());
|
||||||
return;
|
|
||||||
}
|
thread_list.erase(iter);
|
||||||
++it;
|
|
||||||
}
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(VAddr address) {
|
std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
|
||||||
std::vector<std::shared_ptr<Thread>> result;
|
VAddr address) const {
|
||||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[address];
|
const auto iter = arb_threads.find(address);
|
||||||
auto it = thread_list.begin();
|
if (iter == arb_threads.cend()) {
|
||||||
while (it != thread_list.end()) {
|
return {};
|
||||||
std::shared_ptr<Thread> current_thread = *it;
|
|
||||||
result.push_back(std::move(current_thread));
|
|
||||||
++it;
|
|
||||||
}
|
}
|
||||||
return result;
|
|
||||||
|
const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
|
||||||
|
return {thread_list.cbegin(), thread_list.cend()};
|
||||||
}
|
}
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -86,7 +86,7 @@ private:
|
||||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||||
|
|
||||||
// Gets the threads waiting on an address.
|
// Gets the threads waiting on an address.
|
||||||
std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address);
|
std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
|
||||||
|
|
||||||
/// List of threads waiting for a address arbiter
|
/// List of threads waiting for a address arbiter
|
||||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
|
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
|
||||||
|
|
Loading…
Reference in a new issue