gl_rasterizer_cache: Update to use RasterizerCache base class.
This commit is contained in:
parent
382852418b
commit
a0e1566dc5
3 changed files with 20 additions and 132 deletions
|
@ -571,12 +571,10 @@ void RasterizerOpenGL::NotifyMaxwellRegisterChanged(u32 method) {}
|
|||
|
||||
void RasterizerOpenGL::FlushAll() {
|
||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||
res_cache.FlushRegion(0, Kernel::VMManager::MAX_ADDRESS);
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::FlushRegion(Tegra::GPUVAddr addr, u64 size) {
|
||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||
res_cache.FlushRegion(addr, size);
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
|
||||
|
@ -586,8 +584,7 @@ void RasterizerOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
|
|||
|
||||
void RasterizerOpenGL::FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
|
||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||
res_cache.FlushRegion(addr, size);
|
||||
res_cache.InvalidateRegion(addr, size);
|
||||
InvalidateRegion(addr, size);
|
||||
}
|
||||
|
||||
bool RasterizerOpenGL::AccelerateDisplayTransfer(const void* config) {
|
||||
|
|
|
@ -677,12 +677,6 @@ RasterizerCacheOpenGL::RasterizerCacheOpenGL() {
|
|||
draw_framebuffer.Create();
|
||||
}
|
||||
|
||||
RasterizerCacheOpenGL::~RasterizerCacheOpenGL() {
|
||||
while (!surface_cache.empty()) {
|
||||
UnregisterSurface(surface_cache.begin()->second);
|
||||
}
|
||||
}
|
||||
|
||||
Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextureInfo& config) {
|
||||
return GetSurface(SurfaceParams::CreateForTexture(config));
|
||||
}
|
||||
|
@ -766,27 +760,25 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool pres
|
|||
return {};
|
||||
|
||||
// Look up surface in the cache based on address
|
||||
const auto& search{surface_cache.find(params.addr)};
|
||||
Surface surface;
|
||||
if (search != surface_cache.end()) {
|
||||
surface = search->second;
|
||||
Surface surface{TryGet(params.addr)};
|
||||
if (surface) {
|
||||
if (Settings::values.use_accurate_framebuffers) {
|
||||
// If use_accurate_framebuffers is enabled, always load from memory
|
||||
FlushSurface(surface);
|
||||
UnregisterSurface(surface);
|
||||
Unregister(surface);
|
||||
} else if (surface->GetSurfaceParams().IsCompatibleSurface(params)) {
|
||||
// Use the cached surface as-is
|
||||
return surface;
|
||||
} else if (preserve_contents) {
|
||||
// If surface parameters changed and we care about keeping the previous data, recreate
|
||||
// the surface from the old one
|
||||
UnregisterSurface(surface);
|
||||
Unregister(surface);
|
||||
Surface new_surface{RecreateSurface(surface, params)};
|
||||
RegisterSurface(new_surface);
|
||||
Register(new_surface);
|
||||
return new_surface;
|
||||
} else {
|
||||
// Delete the old surface before creating a new one to prevent collisions.
|
||||
UnregisterSurface(surface);
|
||||
Unregister(surface);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -797,7 +789,7 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool pres
|
|||
if (!surface) {
|
||||
surface = std::make_shared<CachedSurface>(params);
|
||||
ReserveSurface(surface);
|
||||
RegisterSurface(surface);
|
||||
Register(surface);
|
||||
}
|
||||
|
||||
// Only load surface from memory if we care about the contents
|
||||
|
@ -894,7 +886,7 @@ Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr cpu_addr) const {
|
|||
// framebuffer overlaps surfaces.
|
||||
|
||||
std::vector<Surface> surfaces;
|
||||
for (const auto& surface : surface_cache) {
|
||||
for (const auto& surface : GetCache()) {
|
||||
const auto& params = surface.second->GetSurfaceParams();
|
||||
const VAddr surface_cpu_addr = params.GetCpuAddr();
|
||||
if (cpu_addr >= surface_cpu_addr && cpu_addr < (surface_cpu_addr + params.size_in_bytes)) {
|
||||
|
@ -912,51 +904,6 @@ Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr cpu_addr) const {
|
|||
return surfaces[0];
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::FlushRegion(Tegra::GPUVAddr /*addr*/, size_t /*size*/) {
|
||||
// TODO(bunnei): This is unused in the current implementation of the rasterizer cache. We should
|
||||
// probably implement this in the future, but for now, the `use_accurate_framebufers` setting
|
||||
// can be used to always flush.
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, size_t size) {
|
||||
for (auto iter = surface_cache.cbegin(); iter != surface_cache.cend();) {
|
||||
const auto& surface{iter->second};
|
||||
const auto& params{surface->GetSurfaceParams()};
|
||||
|
||||
++iter;
|
||||
|
||||
if (params.IsOverlappingRegion(addr, size)) {
|
||||
UnregisterSurface(surface);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::RegisterSurface(const Surface& surface) {
|
||||
const auto& params{surface->GetSurfaceParams()};
|
||||
const auto& search{surface_cache.find(params.addr)};
|
||||
|
||||
if (search != surface_cache.end()) {
|
||||
// Registered already
|
||||
return;
|
||||
}
|
||||
|
||||
surface_cache[params.addr] = surface;
|
||||
UpdatePagesCachedCount(params.addr, params.size_in_bytes, 1);
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::UnregisterSurface(const Surface& surface) {
|
||||
const auto& params{surface->GetSurfaceParams()};
|
||||
const auto& search{surface_cache.find(params.addr)};
|
||||
|
||||
if (search == surface_cache.end()) {
|
||||
// Unregistered already
|
||||
return;
|
||||
}
|
||||
|
||||
UpdatePagesCachedCount(params.addr, params.size_in_bytes, -1);
|
||||
surface_cache.erase(search);
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::ReserveSurface(const Surface& surface) {
|
||||
const auto& surface_reserve_key{SurfaceReserveKey::Create(surface->GetSurfaceParams())};
|
||||
surface_reserve[surface_reserve_key] = surface;
|
||||
|
@ -966,49 +913,10 @@ Surface RasterizerCacheOpenGL::TryGetReservedSurface(const SurfaceParams& params
|
|||
const auto& surface_reserve_key{SurfaceReserveKey::Create(params)};
|
||||
auto search{surface_reserve.find(surface_reserve_key)};
|
||||
if (search != surface_reserve.end()) {
|
||||
RegisterSurface(search->second);
|
||||
Register(search->second);
|
||||
return search->second;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
template <typename Map, typename Interval>
|
||||
constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
|
||||
return boost::make_iterator_range(map.equal_range(interval));
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) {
|
||||
const u64 num_pages = ((addr + size - 1) >> Tegra::MemoryManager::PAGE_BITS) -
|
||||
(addr >> Tegra::MemoryManager::PAGE_BITS) + 1;
|
||||
const u64 page_start = addr >> Tegra::MemoryManager::PAGE_BITS;
|
||||
const u64 page_end = page_start + num_pages;
|
||||
|
||||
// Interval maps will erase segments if count reaches 0, so if delta is negative we have to
|
||||
// subtract after iterating
|
||||
const auto pages_interval = PageMap::interval_type::right_open(page_start, page_end);
|
||||
if (delta > 0)
|
||||
cached_pages.add({pages_interval, delta});
|
||||
|
||||
for (const auto& pair : RangeFromInterval(cached_pages, pages_interval)) {
|
||||
const auto interval = pair.first & pages_interval;
|
||||
const int count = pair.second;
|
||||
|
||||
const Tegra::GPUVAddr interval_start_addr = boost::icl::first(interval)
|
||||
<< Tegra::MemoryManager::PAGE_BITS;
|
||||
const Tegra::GPUVAddr interval_end_addr = boost::icl::last_next(interval)
|
||||
<< Tegra::MemoryManager::PAGE_BITS;
|
||||
const u64 interval_size = interval_end_addr - interval_start_addr;
|
||||
|
||||
if (delta > 0 && count == delta)
|
||||
Memory::RasterizerMarkRegionCached(interval_start_addr, interval_size, true);
|
||||
else if (delta < 0 && count == -delta)
|
||||
Memory::RasterizerMarkRegionCached(interval_start_addr, interval_size, false);
|
||||
else
|
||||
ASSERT(count >= 0);
|
||||
}
|
||||
|
||||
if (delta < 0)
|
||||
cached_pages.add({pages_interval, delta});
|
||||
}
|
||||
|
||||
} // namespace OpenGL
|
||||
|
|
|
@ -8,12 +8,12 @@
|
|||
#include <map>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <boost/icl/interval_map.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/hash.h"
|
||||
#include "common/math_util.h"
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/rasterizer_cache.h"
|
||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
||||
#include "video_core/textures/texture.h"
|
||||
|
||||
|
@ -22,7 +22,6 @@ namespace OpenGL {
|
|||
class CachedSurface;
|
||||
using Surface = std::shared_ptr<CachedSurface>;
|
||||
using SurfaceSurfaceRect_Tuple = std::tuple<Surface, Surface, MathUtil::Rectangle<u32>>;
|
||||
using PageMap = boost::icl::interval_map<u64, int>;
|
||||
|
||||
struct SurfaceParams {
|
||||
enum class PixelFormat {
|
||||
|
@ -632,11 +631,6 @@ struct SurfaceParams {
|
|||
/// Returns the CPU virtual address for this surface
|
||||
VAddr GetCpuAddr() const;
|
||||
|
||||
/// Returns true if the specified region overlaps with this surface's region in Switch memory
|
||||
bool IsOverlappingRegion(Tegra::GPUVAddr region_addr, size_t region_size) const {
|
||||
return addr <= (region_addr + region_size) && region_addr <= (addr + size_in_bytes);
|
||||
}
|
||||
|
||||
/// Creates SurfaceParams from a texture configuration
|
||||
static SurfaceParams CreateForTexture(const Tegra::Texture::FullTextureInfo& config);
|
||||
|
||||
|
@ -708,6 +702,14 @@ class CachedSurface final {
|
|||
public:
|
||||
CachedSurface(const SurfaceParams& params);
|
||||
|
||||
Tegra::GPUVAddr GetAddr() const {
|
||||
return params.addr;
|
||||
}
|
||||
|
||||
size_t GetSizeInBytes() const {
|
||||
return params.size_in_bytes;
|
||||
}
|
||||
|
||||
const OGLTexture& Texture() const {
|
||||
return texture;
|
||||
}
|
||||
|
@ -737,10 +739,9 @@ private:
|
|||
SurfaceParams params;
|
||||
};
|
||||
|
||||
class RasterizerCacheOpenGL final : NonCopyable {
|
||||
class RasterizerCacheOpenGL final : public RasterizerCache<Surface> {
|
||||
public:
|
||||
RasterizerCacheOpenGL();
|
||||
~RasterizerCacheOpenGL();
|
||||
|
||||
/// Get a surface based on the texture configuration
|
||||
Surface GetTextureSurface(const Tegra::Texture::FullTextureInfo& config);
|
||||
|
@ -755,12 +756,6 @@ public:
|
|||
/// Tries to find a framebuffer GPU address based on the provided CPU address
|
||||
Surface TryFindFramebufferSurface(VAddr cpu_addr) const;
|
||||
|
||||
/// Write any cached resources overlapping the region back to memory (if dirty)
|
||||
void FlushRegion(Tegra::GPUVAddr addr, size_t size);
|
||||
|
||||
/// Mark the specified region as being invalidated
|
||||
void InvalidateRegion(Tegra::GPUVAddr addr, size_t size);
|
||||
|
||||
private:
|
||||
void LoadSurface(const Surface& surface);
|
||||
Surface GetSurface(const SurfaceParams& params, bool preserve_contents = true);
|
||||
|
@ -768,24 +763,12 @@ private:
|
|||
/// Recreates a surface with new parameters
|
||||
Surface RecreateSurface(const Surface& surface, const SurfaceParams& new_params);
|
||||
|
||||
/// Register surface into the cache
|
||||
void RegisterSurface(const Surface& surface);
|
||||
|
||||
/// Remove surface from the cache
|
||||
void UnregisterSurface(const Surface& surface);
|
||||
|
||||
/// Reserves a unique surface that can be reused later
|
||||
void ReserveSurface(const Surface& surface);
|
||||
|
||||
/// Tries to get a reserved surface for the specified parameters
|
||||
Surface TryGetReservedSurface(const SurfaceParams& params);
|
||||
|
||||
/// Increase/decrease the number of surface in pages touching the specified region
|
||||
void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta);
|
||||
|
||||
std::unordered_map<Tegra::GPUVAddr, Surface> surface_cache;
|
||||
PageMap cached_pages;
|
||||
|
||||
/// The surface reserve is a "backup" cache, this is where we put unique surfaces that have
|
||||
/// previously been used. This is to prevent surfaces from being constantly created and
|
||||
/// destroyed when used with different surface parameters.
|
||||
|
|
Loading…
Reference in a new issue