1
0
Fork 0
forked from suyu/suyu

Address Feedback.

This commit is contained in:
Fernando Sahmkow 2020-04-16 12:29:53 -04:00
parent ec2f3e48e1
commit f616dc0b59
13 changed files with 117 additions and 132 deletions

View file

@ -154,12 +154,9 @@ public:
std::lock_guard lock{mutex};
std::vector<MapInterval> objects = GetMapsInRange(addr, size);
for (auto& object : objects) {
if (object->IsModified() && object->IsRegistered()) {
return true;
}
}
return false;
return std::any_of(objects.begin(), objects.end(), [](const MapInterval& map) {
return map->IsModified() && map->IsRegistered();
});
}
/// Mark the specified region as being invalidated
@ -199,9 +196,9 @@ public:
}
void CommitAsyncFlushes() {
if (uncommited_flushes) {
if (uncommitted_flushes) {
auto commit_list = std::make_shared<std::list<MapInterval>>();
for (auto& map : *uncommited_flushes) {
for (auto& map : *uncommitted_flushes) {
if (map->IsRegistered() && map->IsModified()) {
// TODO(Blinkhawk): Implement backend asynchronous flushing
// AsyncFlushMap(map)
@ -209,41 +206,34 @@ public:
}
}
if (!commit_list->empty()) {
commited_flushes.push_back(commit_list);
committed_flushes.push_back(commit_list);
} else {
commited_flushes.emplace_back();
committed_flushes.emplace_back();
}
} else {
commited_flushes.emplace_back();
committed_flushes.emplace_back();
}
uncommited_flushes.reset();
uncommitted_flushes.reset();
}
bool ShouldWaitAsyncFlushes() {
if (commited_flushes.empty()) {
bool ShouldWaitAsyncFlushes() const {
if (committed_flushes.empty()) {
return false;
}
auto& flush_list = commited_flushes.front();
if (!flush_list) {
return false;
}
return true;
return committed_flushes.front() != nullptr;
}
bool HasUncommitedFlushes() {
if (uncommited_flushes) {
return true;
}
return false;
bool HasUncommittedFlushes() const {
return uncommitted_flushes != nullptr;
}
void PopAsyncFlushes() {
if (commited_flushes.empty()) {
if (committed_flushes.empty()) {
return;
}
auto& flush_list = commited_flushes.front();
auto& flush_list = committed_flushes.front();
if (!flush_list) {
commited_flushes.pop_front();
committed_flushes.pop_front();
return;
}
for (MapInterval& map : *flush_list) {
@ -252,7 +242,7 @@ public:
FlushMap(map);
}
}
commited_flushes.pop_front();
committed_flushes.pop_front();
}
virtual BufferType GetEmptyBuffer(std::size_t size) = 0;
@ -568,10 +558,10 @@ private:
}
void MarkForAsyncFlush(MapInterval& map) {
if (!uncommited_flushes) {
uncommited_flushes = std::make_shared<std::unordered_set<MapInterval>>();
if (!uncommitted_flushes) {
uncommitted_flushes = std::make_shared<std::unordered_set<MapInterval>>();
}
uncommited_flushes->insert(map);
uncommitted_flushes->insert(map);
}
VideoCore::RasterizerInterface& rasterizer;
@ -605,8 +595,8 @@ private:
std::vector<u8> staging_buffer;
std::list<MapInterval> marked_for_unregister;
std::shared_ptr<std::unordered_set<MapInterval>> uncommited_flushes{};
std::list<std::shared_ptr<std::list<MapInterval>>> commited_flushes;
std::shared_ptr<std::unordered_set<MapInterval>> uncommitted_flushes{};
std::list<std::shared_ptr<std::list<MapInterval>>> committed_flushes;
std::recursive_mutex mutex;
};

View file

@ -28,15 +28,15 @@ public:
FenceBase(GPUVAddr address, u32 payload, bool is_stubbed)
: address{address}, payload{payload}, is_semaphore{true}, is_stubbed{is_stubbed} {}
constexpr GPUVAddr GetAddress() const {
GPUVAddr GetAddress() const {
return address;
}
constexpr u32 GetPayload() const {
u32 GetPayload() const {
return payload;
}
constexpr bool IsSemaphore() const {
bool IsSemaphore() const {
return is_semaphore;
}
@ -54,12 +54,8 @@ class FenceManager {
public:
void SignalSemaphore(GPUVAddr addr, u32 value) {
TryReleasePendingFences();
bool should_flush = texture_cache.HasUncommitedFlushes();
should_flush |= buffer_cache.HasUncommitedFlushes();
should_flush |= query_cache.HasUncommitedFlushes();
texture_cache.CommitAsyncFlushes();
buffer_cache.CommitAsyncFlushes();
query_cache.CommitAsyncFlushes();
bool should_flush = ShouldFlush();
CommitAsyncFlushes();
TFence new_fence = CreateFence(addr, value, !should_flush);
fences.push(new_fence);
QueueFence(new_fence);
@ -71,12 +67,8 @@ public:
void SignalSyncPoint(u32 value) {
TryReleasePendingFences();
bool should_flush = texture_cache.HasUncommitedFlushes();
should_flush |= buffer_cache.HasUncommitedFlushes();
should_flush |= query_cache.HasUncommitedFlushes();
texture_cache.CommitAsyncFlushes();
buffer_cache.CommitAsyncFlushes();
query_cache.CommitAsyncFlushes();
bool should_flush = ShouldFlush();
CommitAsyncFlushes();
TFence new_fence = CreateFence(value, !should_flush);
fences.push(new_fence);
QueueFence(new_fence);
@ -89,15 +81,10 @@ public:
void WaitPendingFences() {
while (!fences.empty()) {
TFence& current_fence = fences.front();
bool should_wait = texture_cache.ShouldWaitAsyncFlushes();
should_wait |= buffer_cache.ShouldWaitAsyncFlushes();
should_wait |= query_cache.ShouldWaitAsyncFlushes();
if (should_wait) {
if (ShouldWait()) {
WaitFence(current_fence);
}
texture_cache.PopAsyncFlushes();
buffer_cache.PopAsyncFlushes();
query_cache.PopAsyncFlushes();
PopAsyncFlushes();
auto& gpu{system.GPU()};
if (current_fence->IsSemaphore()) {
auto& memory_manager{gpu.MemoryManager()};
@ -116,10 +103,18 @@ protected:
: system{system}, rasterizer{rasterizer}, texture_cache{texture_cache},
buffer_cache{buffer_cache}, query_cache{query_cache} {}
virtual ~FenceManager() {}
/// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is
/// true
virtual TFence CreateFence(u32 value, bool is_stubbed) = 0;
/// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true
virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0;
/// Queues a fence into the backend if the fence isn't stubbed.
virtual void QueueFence(TFence& fence) = 0;
virtual bool IsFenceSignaled(TFence& fence) = 0;
/// Notifies that the backend fence has been signaled/reached in host GPU.
virtual bool IsFenceSignaled(TFence& fence) const = 0;
/// Waits until a fence has been signalled by the host GPU.
virtual void WaitFence(TFence& fence) = 0;
Core::System& system;
@ -132,15 +127,10 @@ private:
void TryReleasePendingFences() {
while (!fences.empty()) {
TFence& current_fence = fences.front();
bool should_wait = texture_cache.ShouldWaitAsyncFlushes();
should_wait |= buffer_cache.ShouldWaitAsyncFlushes();
should_wait |= query_cache.ShouldWaitAsyncFlushes();
if (should_wait && !IsFenceSignaled(current_fence)) {
if (ShouldWait() && !IsFenceSignaled(current_fence)) {
return;
}
texture_cache.PopAsyncFlushes();
buffer_cache.PopAsyncFlushes();
query_cache.PopAsyncFlushes();
PopAsyncFlushes();
auto& gpu{system.GPU()};
if (current_fence->IsSemaphore()) {
auto& memory_manager{gpu.MemoryManager()};
@ -152,6 +142,28 @@ private:
}
}
bool ShouldWait() const {
return texture_cache.ShouldWaitAsyncFlushes() || buffer_cache.ShouldWaitAsyncFlushes() ||
query_cache.ShouldWaitAsyncFlushes();
}
bool ShouldFlush() const {
return texture_cache.HasUncommittedFlushes() || buffer_cache.HasUncommittedFlushes() ||
query_cache.HasUncommittedFlushes();
}
void PopAsyncFlushes() {
texture_cache.PopAsyncFlushes();
buffer_cache.PopAsyncFlushes();
query_cache.PopAsyncFlushes();
}
void CommitAsyncFlushes() {
texture_cache.CommitAsyncFlushes();
buffer_cache.CommitAsyncFlushes();
query_cache.CommitAsyncFlushes();
}
std::queue<TFence> fences;
};

View file

@ -125,7 +125,7 @@ bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
return true;
}
u64 GPU::RequestFlush(CacheAddr addr, std::size_t size) {
u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
std::unique_lock lck{flush_request_mutex};
const u64 fence = ++last_flush_fence;
flush_requests.emplace_back(fence, addr, size);
@ -137,7 +137,7 @@ void GPU::TickWork() {
while (!flush_requests.empty()) {
auto& request = flush_requests.front();
const u64 fence = request.fence;
const CacheAddr addr = request.addr;
const VAddr addr = request.addr;
const std::size_t size = request.size;
flush_requests.pop_front();
flush_request_mutex.unlock();

View file

@ -155,16 +155,22 @@ public:
/// Calls a GPU method.
void CallMethod(const MethodCall& method_call);
/// Flush all current written commands into the host GPU for execution.
void FlushCommands();
/// Synchronizes CPU writes with Host GPU memory.
void SyncGuestHost();
/// Signal the ending of command list.
virtual void OnCommandListEnd();
u64 RequestFlush(CacheAddr addr, std::size_t size);
/// Request a host GPU memory flush from the CPU.
u64 RequestFlush(VAddr addr, std::size_t size);
/// Obtains current flush request fence id.
u64 CurrentFlushRequestFence() const {
return current_flush_fence.load(std::memory_order_relaxed);
}
/// Tick pending requests within the GPU.
void TickWork();
/// Returns a reference to the Maxwell3D GPU engine.
@ -336,10 +342,10 @@ private:
std::condition_variable sync_cv;
struct FlushRequest {
FlushRequest(u64 fence, CacheAddr addr, std::size_t size)
FlushRequest(u64 fence, VAddr addr, std::size_t size)
: fence{fence}, addr{addr}, size{size} {}
u64 fence;
CacheAddr addr;
VAddr addr;
std::size_t size;
};

View file

@ -176,41 +176,34 @@ public:
}
void CommitAsyncFlushes() {
commited_flushes.push_back(uncommited_flushes);
uncommited_flushes.reset();
committed_flushes.push_back(uncommitted_flushes);
uncommitted_flushes.reset();
}
bool HasUncommitedFlushes() {
if (uncommited_flushes) {
return true;
}
return false;
bool HasUncommittedFlushes() const {
return uncommitted_flushes != nullptr;
}
bool ShouldWaitAsyncFlushes() {
if (commited_flushes.empty()) {
bool ShouldWaitAsyncFlushes() const {
if (committed_flushes.empty()) {
return false;
}
auto& flush_list = commited_flushes.front();
if (!flush_list) {
return false;
}
return true;
return committed_flushes.front() != nullptr;
}
void PopAsyncFlushes() {
if (commited_flushes.empty()) {
if (committed_flushes.empty()) {
return;
}
auto& flush_list = commited_flushes.front();
auto& flush_list = committed_flushes.front();
if (!flush_list) {
commited_flushes.pop_front();
committed_flushes.pop_front();
return;
}
for (VAddr query_address : *flush_list) {
FlushAndRemoveRegion(query_address, 4);
}
commited_flushes.pop_front();
committed_flushes.pop_front();
}
protected:
@ -268,10 +261,10 @@ private:
}
void AsyncFlushQuery(VAddr addr) {
if (!uncommited_flushes) {
uncommited_flushes = std::make_shared<std::unordered_set<VAddr>>();
if (!uncommitted_flushes) {
uncommitted_flushes = std::make_shared<std::unordered_set<VAddr>>();
}
uncommited_flushes->insert(addr);
uncommitted_flushes->insert(addr);
}
static constexpr std::uintptr_t PAGE_SIZE = 4096;
@ -286,8 +279,8 @@ private:
std::array<CounterStream, VideoCore::NumQueryTypes> streams;
std::shared_ptr<std::unordered_set<VAddr>> uncommited_flushes{};
std::list<std::shared_ptr<std::unordered_set<VAddr>>> commited_flushes;
std::shared_ptr<std::unordered_set<VAddr>> uncommitted_flushes{};
std::list<std::shared_ptr<std::unordered_set<VAddr>>> committed_flushes;
};
template <class QueryCache, class HostCounter>

View file

@ -64,6 +64,7 @@ public:
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
virtual void FlushRegion(VAddr addr, u64 size) = 0;
/// Check if the the specified memory area requires flushing to CPU Memory.
virtual bool MustFlushRegion(VAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be invalidated

View file

@ -62,7 +62,7 @@ void FenceManagerOpenGL::QueueFence(Fence& fence) {
fence->Queue();
}
bool FenceManagerOpenGL::IsFenceSignaled(Fence& fence) {
bool FenceManagerOpenGL::IsFenceSignaled(Fence& fence) const {
return fence->IsSignaled();
}

View file

@ -46,7 +46,7 @@ protected:
Fence CreateFence(u32 value, bool is_stubbed) override;
Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
void QueueFence(Fence& fence) override;
bool IsFenceSignaled(Fence& fence) override;
bool IsFenceSignaled(Fence& fence) const override;
void WaitFence(Fence& fence) override;
};

View file

@ -653,9 +653,6 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
}
bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size) {
if (!Settings::IsGPULevelExtreme()) {
return buffer_cache.MustFlushRegion(addr, size);
}
return texture_cache.MustFlushRegion(addr, size) || buffer_cache.MustFlushRegion(addr, size);
}
@ -672,7 +669,7 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
if (!addr || !size) {
if (addr == 0 || size == 0) {
return;
}
texture_cache.OnCPUWrite(addr, size);

View file

@ -90,7 +90,7 @@ void VKFenceManager::QueueFence(Fence& fence) {
fence->Queue();
}
bool VKFenceManager::IsFenceSignaled(Fence& fence) {
bool VKFenceManager::IsFenceSignaled(Fence& fence) const {
return fence->IsSignaled();
}

View file

@ -63,7 +63,7 @@ protected:
Fence CreateFence(u32 value, bool is_stubbed) override;
Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
void QueueFence(Fence& fence) override;
bool IsFenceSignaled(Fence& fence) override;
bool IsFenceSignaled(Fence& fence) const override;
void WaitFence(Fence& fence) override;
private:

View file

@ -533,7 +533,7 @@ void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) {
}
void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
if (!addr || !size) {
if (addr == 0 || size == 0) {
return;
}
texture_cache.OnCPUWrite(addr, size);

View file

@ -120,15 +120,8 @@ public:
std::lock_guard lock{mutex};
auto surfaces = GetSurfacesInRegion(addr, size);
if (surfaces.empty()) {
return false;
}
for (const auto& surface : surfaces) {
if (surface->IsModified()) {
return true;
}
}
return false;
return std::any_of(surfaces.begin(), surfaces.end(),
[](const TSurface& surface) { return surface->IsModified(); });
}
TView GetTextureSurface(const Tegra::Texture::TICEntry& tic,
@ -333,41 +326,34 @@ public:
}
void CommitAsyncFlushes() {
commited_flushes.push_back(uncommited_flushes);
uncommited_flushes.reset();
committed_flushes.push_back(uncommitted_flushes);
uncommitted_flushes.reset();
}
bool HasUncommitedFlushes() {
if (uncommited_flushes) {
return true;
}
return false;
bool HasUncommittedFlushes() const {
return uncommitted_flushes != nullptr;
}
bool ShouldWaitAsyncFlushes() {
if (commited_flushes.empty()) {
bool ShouldWaitAsyncFlushes() const {
if (committed_flushes.empty()) {
return false;
}
auto& flush_list = commited_flushes.front();
if (!flush_list) {
return false;
}
return true;
return committed_flushes.front() != nullptr;
}
void PopAsyncFlushes() {
if (commited_flushes.empty()) {
if (committed_flushes.empty()) {
return;
}
auto& flush_list = commited_flushes.front();
auto& flush_list = committed_flushes.front();
if (!flush_list) {
commited_flushes.pop_front();
committed_flushes.pop_front();
return;
}
for (TSurface& surface : *flush_list) {
FlushSurface(surface);
}
commited_flushes.pop_front();
committed_flushes.pop_front();
}
protected:
@ -1206,10 +1192,10 @@ private:
};
void AsyncFlushSurface(TSurface& surface) {
if (!uncommited_flushes) {
uncommited_flushes = std::make_shared<std::list<TSurface>>();
if (!uncommitted_flushes) {
uncommitted_flushes = std::make_shared<std::list<TSurface>>();
}
uncommited_flushes->push_back(surface);
uncommitted_flushes->push_back(surface);
}
VideoCore::RasterizerInterface& rasterizer;
@ -1258,8 +1244,8 @@ private:
std::list<TSurface> marked_for_unregister;
std::shared_ptr<std::list<TSurface>> uncommited_flushes{};
std::list<std::shared_ptr<std::list<TSurface>>> commited_flushes;
std::shared_ptr<std::list<TSurface>> uncommitted_flushes{};
std::list<std::shared_ptr<std::list<TSurface>>> committed_flushes;
StagingCache staging_cache;
std::recursive_mutex mutex;