From 0e4d4b4beba3521dbadfe489b54309ba33dc65f2 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 4 Jul 2021 18:08:49 +0200 Subject: [PATCH] Buffer Cache: Fix High Downloads and don't predownload on Extreme. --- src/tests/video_core/buffer_base.cpp | 2 +- src/video_core/buffer_cache/buffer_base.h | 14 +- src/video_core/buffer_cache/buffer_cache.h | 199 ++++++++++++--------- src/video_core/texture_cache/types.h | 4 +- 4 files changed, 125 insertions(+), 94 deletions(-) diff --git a/src/tests/video_core/buffer_base.cpp b/src/tests/video_core/buffer_base.cpp index edced69bb2..cfcdc22536 100644 --- a/src/tests/video_core/buffer_base.cpp +++ b/src/tests/video_core/buffer_base.cpp @@ -536,7 +536,7 @@ TEST_CASE("BufferBase: Cached write downloads") { REQUIRE(rasterizer.Count() == 63); buffer.MarkRegionAsGpuModified(c + PAGE, PAGE); int num = 0; - buffer.ForEachDownloadRange(c, WORD, [&](u64 offset, u64 size) { ++num; }); + buffer.ForEachDownloadRange(c, WORD, true, [&](u64 offset, u64 size) { ++num; }); buffer.ForEachUploadRange(c, WORD, [&](u64 offset, u64 size) { ++num; }); REQUIRE(num == 0); REQUIRE(!buffer.IsRegionCpuModified(c + PAGE, PAGE)); diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h index b121d36a3c..a56b4c3a8d 100644 --- a/src/video_core/buffer_cache/buffer_base.h +++ b/src/video_core/buffer_cache/buffer_base.h @@ -226,19 +226,19 @@ public: /// Call 'func' for each CPU modified range and unmark those pages as CPU modified template void ForEachUploadRange(VAddr query_cpu_range, u64 size, Func&& func) { - ForEachModifiedRange(query_cpu_range, size, func); + ForEachModifiedRange(query_cpu_range, size, true, func); } /// Call 'func' for each GPU modified range and unmark those pages as GPU modified template - void ForEachDownloadRange(VAddr query_cpu_range, u64 size, Func&& func) { - ForEachModifiedRange(query_cpu_range, size, func); + void ForEachDownloadRange(VAddr query_cpu_range, u64 size, bool clear, Func&& func) { + ForEachModifiedRange(query_cpu_range, size, clear, func); } /// Call 'func' for each GPU modified range and unmark those pages as GPU modified template void ForEachDownloadRange(Func&& func) { - ForEachModifiedRange(cpu_addr, SizeBytes(), func); + ForEachModifiedRange(cpu_addr, SizeBytes(), true, func); } /// Mark buffer as picked @@ -415,7 +415,7 @@ private: * @param func Function to call for each turned off region */ template - void ForEachModifiedRange(VAddr query_cpu_range, s64 size, Func&& func) { + void ForEachModifiedRange(VAddr query_cpu_range, s64 size, bool clear, Func&& func) { static_assert(type != Type::Untracked); const s64 difference = query_cpu_range - cpu_addr; @@ -467,7 +467,9 @@ private: bits = (bits << left_offset) >> left_offset; const u64 current_word = state_words[word_index] & bits; - state_words[word_index] &= ~bits; + if (clear) { + state_words[word_index] &= ~bits; + } if constexpr (type == Type::CPU) { const u64 current_bits = untracked_words[word_index] & bits; diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index cad7f902dc..d28930e807 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -14,6 +14,7 @@ #include #include +#include #include #include "common/common_types.h" @@ -77,6 +78,9 @@ class BufferCache { using Runtime = typename P::Runtime; using Buffer = typename P::Buffer; + using IntervalSet = boost::icl::interval_set; + using IntervalType = typename IntervalSet::interval_type; + struct Empty {}; struct OverlapResult { @@ -153,6 +157,7 @@ public: /// Commit asynchronous downloads void CommitAsyncFlushes(); + void CommitAsyncFlushesHigh(); /// Pop asynchronous downloads void PopAsyncFlushes(); @@ -160,6 +165,9 @@ public: /// Return true when a CPU region is modified from the GPU [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size); + /// Return true when a CPU region is modified from the GPU + [[nodiscard]] bool IsRegionCpuModified(VAddr addr, size_t size); + std::mutex mutex; private: @@ -272,8 +280,6 @@ private: void DeleteBuffer(BufferId buffer_id); - void ReplaceBufferDownloads(BufferId old_buffer_id, BufferId new_buffer_id); - void NotifyBufferDeletion(); [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr) const; @@ -328,8 +334,9 @@ private: std::vector cached_write_buffer_ids; // TODO: This data structure is not optimal and it should be reworked - std::vector uncommitted_downloads; - std::deque> committed_downloads; + IntervalSet uncommitted_ranges; + std::deque committed_ranges; + std::deque> pending_downloads; size_t immediate_buffer_capacity = 0; std::unique_ptr immediate_buffer_alloc; @@ -547,79 +554,98 @@ void BufferCache

::FlushCachedWrites() { template bool BufferCache

::HasUncommittedFlushes() const noexcept { - return !uncommitted_downloads.empty(); + return !uncommitted_ranges.empty(); } template bool BufferCache

::ShouldWaitAsyncFlushes() const noexcept { - return !committed_downloads.empty() && !committed_downloads.front().empty(); + return false; +} + +template +void BufferCache

::CommitAsyncFlushesHigh() { + const IntervalSet& intervals = uncommitted_ranges; + if (intervals.empty()) { + return; + } + MICROPROFILE_SCOPE(GPU_DownloadMemory); + + boost::container::small_vector, 1> downloads; + u64 total_size_bytes = 0; + u64 largest_copy = 0; + for (auto& interval : intervals) { + const std::size_t size = interval.upper() - interval.lower(); + const VAddr cpu_addr = interval.lower(); + const VAddr cpu_addr_end = interval.upper(); + ForEachBufferInRange(cpu_addr, size, [&](BufferId buffer_id, Buffer& buffer) { + boost::container::small_vector copies; + buffer.ForEachDownloadRange(cpu_addr, size, false, [&](u64 range_offset, u64 range_size) { + VAddr cpu_addr_base = buffer.CpuAddr() + range_offset; + VAddr cpu_addr_end2 = cpu_addr_base + range_size; + const s64 difference = s64(cpu_addr_end2 - cpu_addr_end); + cpu_addr_end2 -= u64(std::max(difference, 0)); + const s64 difference2 = s64(cpu_addr - cpu_addr_base); + cpu_addr_base += u64(std::max(difference2, 0)); + const u64 new_size = cpu_addr_end2 - cpu_addr_base; + const u64 new_offset = cpu_addr_base - buffer.CpuAddr(); + ASSERT(!IsRegionCpuModified(cpu_addr_base, new_size)); + downloads.push_back({ + BufferCopy{ + .src_offset = new_offset, + .dst_offset = total_size_bytes, + .size = new_size, + }, + buffer_id, + }); + total_size_bytes += new_size; + buffer.UnmarkRegionAsGpuModified(cpu_addr_base, new_size); + largest_copy = std::max(largest_copy, new_size); + }); + }); + } + if (downloads.empty()) { + return; + } + if constexpr (USE_MEMORY_MAPS) { + auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes); + for (auto& [copy, buffer_id] : downloads) { + // Have in mind the staging buffer offset for the copy + copy.dst_offset += download_staging.offset; + const std::array copies{copy}; + runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies); + } + runtime.Finish(); + for (const auto& [copy, buffer_id] : downloads) { + const Buffer& buffer = slot_buffers[buffer_id]; + const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset; + // Undo the modified offset + const u64 dst_offset = copy.dst_offset - download_staging.offset; + const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset; + cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size); + } + } else { + const std::span immediate_buffer = ImmediateBuffer(largest_copy); + for (const auto& [copy, buffer_id] : downloads) { + Buffer& buffer = slot_buffers[buffer_id]; + buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size)); + const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset; + cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size); + } + } } template void BufferCache

::CommitAsyncFlushes() { - // This is intentionally passing the value by copy - committed_downloads.push_front(uncommitted_downloads); - uncommitted_downloads.clear(); + if (Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::High) { + CommitAsyncFlushesHigh(); + } else { + uncommitted_ranges.clear(); + } } template void BufferCache

::PopAsyncFlushes() { - if (committed_downloads.empty()) { - return; - } - auto scope_exit_pop_download = detail::ScopeExit([this] { committed_downloads.pop_back(); }); - const std::span download_ids = committed_downloads.back(); - if (download_ids.empty()) { - return; - } - MICROPROFILE_SCOPE(GPU_DownloadMemory); - boost::container::small_vector, 1> downloads; - u64 total_size_bytes = 0; - u64 largest_copy = 0; - for (const BufferId buffer_id : download_ids) { - slot_buffers[buffer_id].ForEachDownloadRange([&](u64 range_offset, u64 range_size) { - downloads.push_back({ - BufferCopy{ - .src_offset = range_offset, - .dst_offset = total_size_bytes, - .size = range_size, - }, - buffer_id, - }); - total_size_bytes += range_size; - largest_copy = std::max(largest_copy, range_size); - }); - } - if (downloads.empty()) { - return; - } - if constexpr (USE_MEMORY_MAPS) { - auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes); - for (auto& [copy, buffer_id] : downloads) { - // Have in mind the staging buffer offset for the copy - copy.dst_offset += download_staging.offset; - const std::array copies{copy}; - runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies); - } - runtime.Finish(); - for (const auto& [copy, buffer_id] : downloads) { - const Buffer& buffer = slot_buffers[buffer_id]; - const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset; - // Undo the modified offset - const u64 dst_offset = copy.dst_offset - download_staging.offset; - const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset; - cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size); - } - } else { - const std::span immediate_buffer = ImmediateBuffer(largest_copy); - for (const auto& [copy, buffer_id] : downloads) { - Buffer& buffer = slot_buffers[buffer_id]; - buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size)); - const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset; - cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size); - } - } } template @@ -641,6 +667,25 @@ bool BufferCache

::IsRegionGpuModified(VAddr addr, size_t size) { return false; } +template +bool BufferCache

::IsRegionCpuModified(VAddr addr, size_t size) { + const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); + for (u64 page = addr >> PAGE_BITS; page < page_end;) { + const BufferId image_id = page_table[page]; + if (!image_id) { + ++page; + continue; + } + Buffer& buffer = slot_buffers[image_id]; + if (buffer.IsRegionCpuModified(addr, size)) { + return true; + } + const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); + page = Common::DivCeil(end_addr, PAGE_SIZE); + } + return false; +} + template void BufferCache

::BindHostIndexBuffer() { Buffer& buffer = slot_buffers[index_buffer.buffer_id]; @@ -1010,16 +1055,13 @@ void BufferCache

::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s Buffer& buffer = slot_buffers[buffer_id]; buffer.MarkRegionAsGpuModified(cpu_addr, size); - const bool is_accuracy_high = Settings::IsGPULevelHigh(); + const bool is_accuracy_high = Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::High; const bool is_async = Settings::values.use_asynchronous_gpu_emulation.GetValue(); - if (!is_accuracy_high || !is_async) { + if (!is_async && !is_accuracy_high) { return; } - if (std::ranges::find(uncommitted_downloads, buffer_id) != uncommitted_downloads.end()) { - // Already inserted - return; - } - uncommitted_downloads.push_back(buffer_id); + const IntervalType base_interval{cpu_addr, cpu_addr + size}; + uncommitted_ranges.add(base_interval); } template @@ -1103,7 +1145,6 @@ void BufferCache

::JoinOverlap(BufferId new_buffer_id, BufferId overlap_id, if (!copies.empty()) { runtime.CopyBuffer(slot_buffers[new_buffer_id], overlap, copies); } - ReplaceBufferDownloads(overlap_id, new_buffer_id); DeleteBuffer(overlap_id); } @@ -1244,7 +1285,7 @@ void BufferCache

::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 si boost::container::small_vector copies; u64 total_size_bytes = 0; u64 largest_copy = 0; - buffer.ForEachDownloadRange(cpu_addr, size, [&](u64 range_offset, u64 range_size) { + buffer.ForEachDownloadRange(cpu_addr, size, true, [&](u64 range_offset, u64 range_size) { copies.push_back(BufferCopy{ .src_offset = range_offset, .dst_offset = total_size_bytes, @@ -1315,18 +1356,6 @@ void BufferCache

::DeleteBuffer(BufferId buffer_id) { NotifyBufferDeletion(); } -template -void BufferCache

::ReplaceBufferDownloads(BufferId old_buffer_id, BufferId new_buffer_id) { - const auto replace = [old_buffer_id, new_buffer_id](std::vector& buffers) { - std::ranges::replace(buffers, old_buffer_id, new_buffer_id); - if (auto it = std::ranges::find(buffers, new_buffer_id); it != buffers.end()) { - buffers.erase(std::remove(it + 1, buffers.end(), new_buffer_id), buffers.end()); - } - }; - replace(uncommitted_downloads); - std::ranges::for_each(committed_downloads, replace); -} - template void BufferCache

::NotifyBufferDeletion() { if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) { diff --git a/src/video_core/texture_cache/types.h b/src/video_core/texture_cache/types.h index 9fbdc1ac6c..47a11cb2f8 100644 --- a/src/video_core/texture_cache/types.h +++ b/src/video_core/texture_cache/types.h @@ -133,8 +133,8 @@ struct BufferImageCopy { }; struct BufferCopy { - size_t src_offset; - size_t dst_offset; + u64 src_offset; + u64 dst_offset; size_t size; };