forked from suyu/suyu
gpu: Rewrite virtual memory manager using PageTable.
This commit is contained in:
parent
241563d15c
commit
22d3dfbcd4
13 changed files with 497 additions and 228 deletions
|
@ -16,6 +16,7 @@ void PageTable::Resize(std::size_t address_space_width_in_bits) {
|
||||||
|
|
||||||
pointers.resize(num_page_table_entries);
|
pointers.resize(num_page_table_entries);
|
||||||
attributes.resize(num_page_table_entries);
|
attributes.resize(num_page_table_entries);
|
||||||
|
backing_addr.resize(num_page_table_entries);
|
||||||
|
|
||||||
// The default is a 39-bit address space, which causes an initial 1GB allocation size. If the
|
// The default is a 39-bit address space, which causes an initial 1GB allocation size. If the
|
||||||
// vector size is subsequently decreased (via resize), the vector might not automatically
|
// vector size is subsequently decreased (via resize), the vector might not automatically
|
||||||
|
@ -24,6 +25,7 @@ void PageTable::Resize(std::size_t address_space_width_in_bits) {
|
||||||
|
|
||||||
pointers.shrink_to_fit();
|
pointers.shrink_to_fit();
|
||||||
attributes.shrink_to_fit();
|
attributes.shrink_to_fit();
|
||||||
|
backing_addr.shrink_to_fit();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Common
|
} // namespace Common
|
||||||
|
|
|
@ -21,6 +21,8 @@ enum class PageType : u8 {
|
||||||
RasterizerCachedMemory,
|
RasterizerCachedMemory,
|
||||||
/// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
|
/// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
|
||||||
Special,
|
Special,
|
||||||
|
/// Page is allocated for use.
|
||||||
|
Allocated,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SpecialRegion {
|
struct SpecialRegion {
|
||||||
|
@ -66,7 +68,7 @@ struct PageTable {
|
||||||
* Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is
|
* Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is
|
||||||
* of type `Special`.
|
* of type `Special`.
|
||||||
*/
|
*/
|
||||||
boost::icl::interval_map<VAddr, std::set<SpecialRegion>> special_regions;
|
boost::icl::interval_map<u64, std::set<SpecialRegion>> special_regions;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Vector of fine grained page attributes. If it is set to any value other than `Memory`, then
|
* Vector of fine grained page attributes. If it is set to any value other than `Memory`, then
|
||||||
|
@ -74,6 +76,8 @@ struct PageTable {
|
||||||
*/
|
*/
|
||||||
std::vector<PageType> attributes;
|
std::vector<PageType> attributes;
|
||||||
|
|
||||||
|
std::vector<u64> backing_addr;
|
||||||
|
|
||||||
const std::size_t page_size_in_bits{};
|
const std::size_t page_size_in_bits{};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -173,16 +173,8 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto& system_instance = Core::System::GetInstance();
|
params.offset = Core::System::GetInstance().GPU().MemoryManager().UnmapBuffer(params.offset,
|
||||||
|
itr->second.size);
|
||||||
// Remove this memory region from the rasterizer cache.
|
|
||||||
auto& gpu = system_instance.GPU();
|
|
||||||
auto cpu_addr = gpu.MemoryManager().GpuToCpuAddress(params.offset);
|
|
||||||
ASSERT(cpu_addr);
|
|
||||||
gpu.FlushAndInvalidateRegion(ToCacheAddr(Memory::GetPointer(*cpu_addr)), itr->second.size);
|
|
||||||
|
|
||||||
params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size);
|
|
||||||
|
|
||||||
buffer_mappings.erase(itr->second.offset);
|
buffer_mappings.erase(itr->second.offset);
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
std::memcpy(output.data(), ¶ms, output.size());
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
|
|
||||||
#include "common/bit_field.h"
|
#include "common/bit_field.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "video_core/memory_manager.h"
|
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ void KeplerMemory::ProcessData(u32 data) {
|
||||||
// contain a dirty surface that will have to be written back to memory.
|
// contain a dirty surface that will have to be written back to memory.
|
||||||
const GPUVAddr address{regs.dest.Address() + state.write_offset * sizeof(u32)};
|
const GPUVAddr address{regs.dest.Address() + state.write_offset * sizeof(u32)};
|
||||||
rasterizer.InvalidateRegion(ToCacheAddr(memory_manager.GetPointer(address)), sizeof(u32));
|
rasterizer.InvalidateRegion(ToCacheAddr(memory_manager.GetPointer(address)), sizeof(u32));
|
||||||
memory_manager.Write32(address, data);
|
memory_manager.Write<u32>(address, data);
|
||||||
|
|
||||||
system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
|
system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
|
||||||
|
|
||||||
|
|
|
@ -307,7 +307,7 @@ void Maxwell3D::ProcessQueryGet() {
|
||||||
// Write the current query sequence to the sequence address.
|
// Write the current query sequence to the sequence address.
|
||||||
// TODO(Subv): Find out what happens if you use a long query type but mark it as a short
|
// TODO(Subv): Find out what happens if you use a long query type but mark it as a short
|
||||||
// query.
|
// query.
|
||||||
memory_manager.Write32(sequence_address, sequence);
|
memory_manager.Write<u32>(sequence_address, sequence);
|
||||||
} else {
|
} else {
|
||||||
// Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast
|
// Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast
|
||||||
// GPU, this command may actually take a while to complete in real hardware due to GPU
|
// GPU, this command may actually take a while to complete in real hardware due to GPU
|
||||||
|
@ -395,7 +395,7 @@ void Maxwell3D::ProcessCBData(u32 value) {
|
||||||
|
|
||||||
u8* ptr{memory_manager.GetPointer(address)};
|
u8* ptr{memory_manager.GetPointer(address)};
|
||||||
rasterizer.InvalidateRegion(ToCacheAddr(ptr), sizeof(u32));
|
rasterizer.InvalidateRegion(ToCacheAddr(ptr), sizeof(u32));
|
||||||
memory_manager.Write32(address, value);
|
memory_manager.Write<u32>(address, value);
|
||||||
|
|
||||||
dirty_flags.OnMemoryWrite();
|
dirty_flags.OnMemoryWrite();
|
||||||
|
|
||||||
|
@ -447,7 +447,7 @@ std::vector<Texture::FullTextureInfo> Maxwell3D::GetStageTextures(Regs::ShaderSt
|
||||||
for (GPUVAddr current_texture = tex_info_buffer.address + TextureInfoOffset;
|
for (GPUVAddr current_texture = tex_info_buffer.address + TextureInfoOffset;
|
||||||
current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) {
|
current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) {
|
||||||
|
|
||||||
const Texture::TextureHandle tex_handle{memory_manager.Read32(current_texture)};
|
const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(current_texture)};
|
||||||
|
|
||||||
Texture::FullTextureInfo tex_info{};
|
Texture::FullTextureInfo tex_info{};
|
||||||
// TODO(Subv): Use the shader to determine which textures are actually accessed.
|
// TODO(Subv): Use the shader to determine which textures are actually accessed.
|
||||||
|
@ -482,7 +482,7 @@ Texture::FullTextureInfo Maxwell3D::GetStageTexture(Regs::ShaderStage stage,
|
||||||
|
|
||||||
ASSERT(tex_info_address < tex_info_buffer.address + tex_info_buffer.size);
|
ASSERT(tex_info_address < tex_info_buffer.address + tex_info_buffer.size);
|
||||||
|
|
||||||
const Texture::TextureHandle tex_handle{memory_manager.Read32(tex_info_address)};
|
const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(tex_info_address)};
|
||||||
|
|
||||||
Texture::FullTextureInfo tex_info{};
|
Texture::FullTextureInfo tex_info{};
|
||||||
tex_info.index = static_cast<u32>(offset);
|
tex_info.index = static_cast<u32>(offset);
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include "video_core/engines/maxwell_3d.h"
|
#include "video_core/engines/maxwell_3d.h"
|
||||||
#include "video_core/engines/maxwell_dma.h"
|
#include "video_core/engines/maxwell_dma.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
|
@ -287,7 +288,7 @@ void GPU::ProcessSemaphoreTriggerMethod() {
|
||||||
block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks();
|
block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks();
|
||||||
memory_manager->WriteBlock(regs.smaphore_address.SmaphoreAddress(), &block, sizeof(block));
|
memory_manager->WriteBlock(regs.smaphore_address.SmaphoreAddress(), &block, sizeof(block));
|
||||||
} else {
|
} else {
|
||||||
const u32 word{memory_manager->Read32(regs.smaphore_address.SmaphoreAddress())};
|
const u32 word{memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress())};
|
||||||
if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) ||
|
if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) ||
|
||||||
(op == GpuSemaphoreOperation::AcquireGequal &&
|
(op == GpuSemaphoreOperation::AcquireGequal &&
|
||||||
static_cast<s32>(word - regs.semaphore_sequence) > 0) ||
|
static_cast<s32>(word - regs.semaphore_sequence) > 0) ||
|
||||||
|
@ -314,11 +315,11 @@ void GPU::ProcessSemaphoreTriggerMethod() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::ProcessSemaphoreRelease() {
|
void GPU::ProcessSemaphoreRelease() {
|
||||||
memory_manager->Write32(regs.smaphore_address.SmaphoreAddress(), regs.semaphore_release);
|
memory_manager->Write<u32>(regs.smaphore_address.SmaphoreAddress(), regs.semaphore_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::ProcessSemaphoreAcquire() {
|
void GPU::ProcessSemaphoreAcquire() {
|
||||||
const u32 word = memory_manager->Read32(regs.smaphore_address.SmaphoreAddress());
|
const u32 word = memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress());
|
||||||
const auto value = regs.semaphore_acquire;
|
const auto value = regs.semaphore_acquire;
|
||||||
if (word != value) {
|
if (word != value) {
|
||||||
regs.acquire_active = true;
|
regs.acquire_active = true;
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/hle/service/nvflinger/buffer_queue.h"
|
#include "core/hle/service/nvflinger/buffer_queue.h"
|
||||||
#include "video_core/dma_pusher.h"
|
#include "video_core/dma_pusher.h"
|
||||||
#include "video_core/memory_manager.h"
|
|
||||||
|
|
||||||
using CacheAddr = std::uintptr_t;
|
using CacheAddr = std::uintptr_t;
|
||||||
inline CacheAddr ToCacheAddr(const void* host_ptr) {
|
inline CacheAddr ToCacheAddr(const void* host_ptr) {
|
||||||
|
@ -124,6 +123,8 @@ enum class EngineID {
|
||||||
MAXWELL_DMA_COPY_A = 0xB0B5,
|
MAXWELL_DMA_COPY_A = 0xB0B5,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class MemoryManager;
|
||||||
|
|
||||||
class GPU {
|
class GPU {
|
||||||
public:
|
public:
|
||||||
explicit GPU(Core::System& system, VideoCore::RendererBase& renderer);
|
explicit GPU(Core::System& system, VideoCore::RendererBase& renderer);
|
||||||
|
@ -244,9 +245,8 @@ protected:
|
||||||
private:
|
private:
|
||||||
std::unique_ptr<Tegra::MemoryManager> memory_manager;
|
std::unique_ptr<Tegra::MemoryManager> memory_manager;
|
||||||
|
|
||||||
/// Mapping of command subchannels to their bound engine ids.
|
/// Mapping of command subchannels to their bound engine ids
|
||||||
std::array<EngineID, 8> bound_engines = {};
|
std::array<EngineID, 8> bound_engines = {};
|
||||||
|
|
||||||
/// 3D engine
|
/// 3D engine
|
||||||
std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
|
std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
|
||||||
/// 2D engine
|
/// 2D engine
|
||||||
|
|
|
@ -5,198 +5,164 @@
|
||||||
#include "common/alignment.h"
|
#include "common/alignment.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
|
#include "core/core.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
#include "video_core/gpu.h"
|
||||||
#include "video_core/memory_manager.h"
|
#include "video_core/memory_manager.h"
|
||||||
|
#include "video_core/rasterizer_interface.h"
|
||||||
|
#include "video_core/renderer_base.h"
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
|
|
||||||
MemoryManager::MemoryManager() {
|
MemoryManager::MemoryManager() {
|
||||||
// Mark the first page as reserved, so that 0 is not a valid GPUVAddr. Otherwise, games might
|
std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
|
||||||
// try to use 0 as a valid address, which is also used to mean nullptr. This fixes a bug with
|
std::fill(page_table.attributes.begin(), page_table.attributes.end(),
|
||||||
// Undertale using 0 for a render target.
|
Common::PageType::Unmapped);
|
||||||
PageSlot(0) = static_cast<u64>(PageStatus::Reserved);
|
page_table.Resize(address_space_width);
|
||||||
|
|
||||||
|
// Initialize the map with a single free region covering the entire managed space.
|
||||||
|
VirtualMemoryArea initial_vma;
|
||||||
|
initial_vma.size = address_space_end;
|
||||||
|
vma_map.emplace(initial_vma.base, initial_vma);
|
||||||
|
|
||||||
|
UpdatePageTableForVMA(initial_vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) {
|
GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) {
|
||||||
const std::optional<GPUVAddr> gpu_addr{FindFreeBlock(0, size, align, PageStatus::Unmapped)};
|
const GPUVAddr gpu_addr{
|
||||||
|
FindFreeRegion(address_space_base, size, align, VirtualMemoryArea::Type::Unmapped)};
|
||||||
ASSERT_MSG(gpu_addr, "unable to find available GPU memory");
|
AllocateMemory(gpu_addr, 0, size);
|
||||||
|
|
||||||
for (u64 offset{}; offset < size; offset += PAGE_SIZE) {
|
|
||||||
VAddr& slot{PageSlot(*gpu_addr + offset)};
|
|
||||||
|
|
||||||
ASSERT(slot == static_cast<u64>(PageStatus::Unmapped));
|
|
||||||
|
|
||||||
slot = static_cast<u64>(PageStatus::Allocated);
|
|
||||||
}
|
|
||||||
|
|
||||||
return *gpu_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) {
|
|
||||||
for (u64 offset{}; offset < size; offset += PAGE_SIZE) {
|
|
||||||
VAddr& slot{PageSlot(gpu_addr + offset)};
|
|
||||||
|
|
||||||
ASSERT(slot == static_cast<u64>(PageStatus::Unmapped));
|
|
||||||
|
|
||||||
slot = static_cast<u64>(PageStatus::Allocated);
|
|
||||||
}
|
|
||||||
|
|
||||||
return gpu_addr;
|
return gpu_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
|
GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) {
|
||||||
const std::optional<GPUVAddr> gpu_addr{FindFreeBlock(0, size, PAGE_SIZE, PageStatus::Unmapped)};
|
AllocateMemory(gpu_addr, 0, size);
|
||||||
|
return gpu_addr;
|
||||||
ASSERT_MSG(gpu_addr, "unable to find available GPU memory");
|
|
||||||
|
|
||||||
for (u64 offset{}; offset < size; offset += PAGE_SIZE) {
|
|
||||||
VAddr& slot{PageSlot(*gpu_addr + offset)};
|
|
||||||
|
|
||||||
ASSERT(slot == static_cast<u64>(PageStatus::Unmapped));
|
|
||||||
|
|
||||||
slot = cpu_addr + offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
const MappedRegion region{cpu_addr, *gpu_addr, size};
|
|
||||||
mapped_regions.push_back(region);
|
|
||||||
|
|
||||||
return *gpu_addr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) {
|
GPUVAddr MemoryManager::MapBufferEx(GPUVAddr cpu_addr, u64 size) {
|
||||||
ASSERT((gpu_addr & PAGE_MASK) == 0);
|
const GPUVAddr gpu_addr{
|
||||||
|
FindFreeRegion(address_space_base, size, page_size, VirtualMemoryArea::Type::Unmapped)};
|
||||||
|
MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), ((size + page_mask) & ~page_mask),
|
||||||
|
cpu_addr);
|
||||||
|
return gpu_addr;
|
||||||
|
}
|
||||||
|
|
||||||
if (PageSlot(gpu_addr) != static_cast<u64>(PageStatus::Allocated)) {
|
GPUVAddr MemoryManager::MapBufferEx(GPUVAddr cpu_addr, GPUVAddr gpu_addr, u64 size) {
|
||||||
// Page has been already mapped. In this case, we must find a new area of memory to use that
|
ASSERT((gpu_addr & page_mask) == 0);
|
||||||
// is different than the specified one. Super Mario Odyssey hits this scenario when changing
|
|
||||||
// areas, but we do not want to overwrite the old pages.
|
|
||||||
// TODO(bunnei): We need to write a hardware test to confirm this behavior.
|
|
||||||
|
|
||||||
LOG_ERROR(HW_GPU, "attempting to map addr 0x{:016X}, which is not available!", gpu_addr);
|
MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), ((size + page_mask) & ~page_mask),
|
||||||
|
cpu_addr);
|
||||||
const std::optional<GPUVAddr> new_gpu_addr{
|
|
||||||
FindFreeBlock(gpu_addr, size, PAGE_SIZE, PageStatus::Allocated)};
|
|
||||||
|
|
||||||
ASSERT_MSG(new_gpu_addr, "unable to find available GPU memory");
|
|
||||||
|
|
||||||
gpu_addr = *new_gpu_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u64 offset{}; offset < size; offset += PAGE_SIZE) {
|
|
||||||
VAddr& slot{PageSlot(gpu_addr + offset)};
|
|
||||||
|
|
||||||
ASSERT(slot == static_cast<u64>(PageStatus::Allocated));
|
|
||||||
|
|
||||||
slot = cpu_addr + offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
const MappedRegion region{cpu_addr, gpu_addr, size};
|
|
||||||
mapped_regions.push_back(region);
|
|
||||||
|
|
||||||
return gpu_addr;
|
return gpu_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
|
GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
|
||||||
ASSERT((gpu_addr & PAGE_MASK) == 0);
|
ASSERT((gpu_addr & page_mask) == 0);
|
||||||
|
|
||||||
for (u64 offset{}; offset < size; offset += PAGE_SIZE) {
|
const CacheAddr cache_addr{ToCacheAddr(GetPointer(gpu_addr))};
|
||||||
VAddr& slot{PageSlot(gpu_addr + offset)};
|
Core::System::GetInstance().Renderer().Rasterizer().FlushAndInvalidateRegion(cache_addr, size);
|
||||||
|
|
||||||
ASSERT(slot != static_cast<u64>(PageStatus::Allocated) &&
|
UnmapRange(gpu_addr, ((size + page_mask) & ~page_mask));
|
||||||
slot != static_cast<u64>(PageStatus::Unmapped));
|
|
||||||
|
|
||||||
slot = static_cast<u64>(PageStatus::Unmapped);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete the region mappings that are contained within the unmapped region
|
|
||||||
mapped_regions.erase(std::remove_if(mapped_regions.begin(), mapped_regions.end(),
|
|
||||||
[&](const MappedRegion& region) {
|
|
||||||
return region.gpu_addr <= gpu_addr &&
|
|
||||||
region.gpu_addr + region.size < gpu_addr + size;
|
|
||||||
}),
|
|
||||||
mapped_regions.end());
|
|
||||||
return gpu_addr;
|
return gpu_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
GPUVAddr MemoryManager::GetRegionEnd(GPUVAddr region_start) const {
|
GPUVAddr MemoryManager::FindFreeRegion(GPUVAddr region_start, u64 size, u64 align,
|
||||||
for (const auto& region : mapped_regions) {
|
VirtualMemoryArea::Type vma_type) {
|
||||||
const GPUVAddr region_end{region.gpu_addr + region.size};
|
|
||||||
if (region_start >= region.gpu_addr && region_start < region_end) {
|
|
||||||
return region_end;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<GPUVAddr> MemoryManager::FindFreeBlock(GPUVAddr region_start, u64 size, u64 align,
|
align = (align + page_mask) & ~page_mask;
|
||||||
PageStatus status) {
|
|
||||||
GPUVAddr gpu_addr{region_start};
|
|
||||||
u64 free_space{};
|
|
||||||
align = (align + PAGE_MASK) & ~PAGE_MASK;
|
|
||||||
|
|
||||||
while (gpu_addr + free_space < MAX_ADDRESS) {
|
// Find the first Free VMA.
|
||||||
if (PageSlot(gpu_addr + free_space) == static_cast<u64>(status)) {
|
const GPUVAddr base = region_start;
|
||||||
free_space += PAGE_SIZE;
|
const VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
|
||||||
if (free_space >= size) {
|
if (vma.second.type != vma_type)
|
||||||
return gpu_addr;
|
return false;
|
||||||
}
|
|
||||||
} else {
|
|
||||||
gpu_addr += free_space + PAGE_SIZE;
|
|
||||||
free_space = 0;
|
|
||||||
gpu_addr = Common::AlignUp(gpu_addr, align);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return {};
|
const VAddr vma_end = vma.second.base + vma.second.size;
|
||||||
}
|
return vma_end > base && vma_end >= base + size;
|
||||||
|
});
|
||||||
|
|
||||||
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) {
|
if (vma_handle == vma_map.end()) {
|
||||||
const VAddr base_addr{PageSlot(gpu_addr)};
|
|
||||||
|
|
||||||
if (base_addr == static_cast<u64>(PageStatus::Allocated) ||
|
|
||||||
base_addr == static_cast<u64>(PageStatus::Unmapped) ||
|
|
||||||
base_addr == static_cast<u64>(PageStatus::Reserved)) {
|
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
return base_addr + (gpu_addr & PAGE_MASK);
|
return std::max(base, vma_handle->second.base);
|
||||||
}
|
}
|
||||||
|
|
||||||
u8 MemoryManager::Read8(GPUVAddr addr) {
|
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) {
|
||||||
return Memory::Read8(*GpuToCpuAddress(addr));
|
VAddr cpu_addr = page_table.backing_addr[gpu_addr >> page_bits];
|
||||||
|
if (cpu_addr) {
|
||||||
|
return cpu_addr + (gpu_addr & page_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
u16 MemoryManager::Read16(GPUVAddr addr) {
|
template <typename T>
|
||||||
return Memory::Read16(*GpuToCpuAddress(addr));
|
T MemoryManager::Read(GPUVAddr vaddr) {
|
||||||
|
const u8* page_pointer = page_table.pointers[vaddr >> page_bits];
|
||||||
|
if (page_pointer) {
|
||||||
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
||||||
|
T value;
|
||||||
|
std::memcpy(&value, &page_pointer[vaddr & page_mask], sizeof(T));
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
Common::PageType type = page_table.attributes[vaddr >> page_bits];
|
||||||
|
switch (type) {
|
||||||
|
case Common::PageType::Unmapped:
|
||||||
|
LOG_ERROR(HW_GPU, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
|
||||||
|
return 0;
|
||||||
|
case Common::PageType::Memory:
|
||||||
|
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 MemoryManager::Read32(GPUVAddr addr) {
|
template <typename T>
|
||||||
return Memory::Read32(*GpuToCpuAddress(addr));
|
void MemoryManager::Write(GPUVAddr vaddr, T data) {
|
||||||
|
u8* page_pointer = page_table.pointers[vaddr >> page_bits];
|
||||||
|
if (page_pointer) {
|
||||||
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
||||||
|
std::memcpy(&page_pointer[vaddr & page_mask], &data, sizeof(T));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Common::PageType type = page_table.attributes[vaddr >> page_bits];
|
||||||
|
switch (type) {
|
||||||
|
case Common::PageType::Unmapped:
|
||||||
|
LOG_ERROR(HW_GPU, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
|
||||||
|
static_cast<u32>(data), vaddr);
|
||||||
|
return;
|
||||||
|
case Common::PageType::Memory:
|
||||||
|
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 MemoryManager::Read64(GPUVAddr addr) {
|
template u8 MemoryManager::Read<u8>(GPUVAddr addr);
|
||||||
return Memory::Read64(*GpuToCpuAddress(addr));
|
template u16 MemoryManager::Read<u16>(GPUVAddr addr);
|
||||||
}
|
template u32 MemoryManager::Read<u32>(GPUVAddr addr);
|
||||||
|
template u64 MemoryManager::Read<u64>(GPUVAddr addr);
|
||||||
void MemoryManager::Write8(GPUVAddr addr, u8 data) {
|
template void MemoryManager::Write<u8>(GPUVAddr addr, u8 data);
|
||||||
Memory::Write8(*GpuToCpuAddress(addr), data);
|
template void MemoryManager::Write<u16>(GPUVAddr addr, u16 data);
|
||||||
}
|
template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data);
|
||||||
|
template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data);
|
||||||
void MemoryManager::Write16(GPUVAddr addr, u16 data) {
|
|
||||||
Memory::Write16(*GpuToCpuAddress(addr), data);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemoryManager::Write32(GPUVAddr addr, u32 data) {
|
|
||||||
Memory::Write32(*GpuToCpuAddress(addr), data);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemoryManager::Write64(GPUVAddr addr, u64 data) {
|
|
||||||
Memory::Write64(*GpuToCpuAddress(addr), data);
|
|
||||||
}
|
|
||||||
|
|
||||||
u8* MemoryManager::GetPointer(GPUVAddr addr) {
|
u8* MemoryManager::GetPointer(GPUVAddr addr) {
|
||||||
return Memory::GetPointer(*GpuToCpuAddress(addr));
|
u8* page_pointer = page_table.pointers[addr >> page_bits];
|
||||||
|
if (page_pointer) {
|
||||||
|
return page_pointer + (addr & page_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr);
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size) {
|
void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size) {
|
||||||
|
@ -210,13 +176,251 @@ void MemoryManager::CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t
|
||||||
std::memcpy(GetPointer(dest_addr), GetPointer(src_addr), size);
|
std::memcpy(GetPointer(dest_addr), GetPointer(src_addr), size);
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr& MemoryManager::PageSlot(GPUVAddr gpu_addr) {
|
void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
|
||||||
auto& block{page_table[(gpu_addr >> (PAGE_BITS + PAGE_TABLE_BITS)) & PAGE_TABLE_MASK]};
|
VAddr backing_addr) {
|
||||||
if (!block) {
|
LOG_DEBUG(HW_GPU, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * page_size,
|
||||||
block = std::make_unique<PageBlock>();
|
(base + size) * page_size);
|
||||||
block->fill(static_cast<VAddr>(PageStatus::Unmapped));
|
|
||||||
|
VAddr end = base + size;
|
||||||
|
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
|
||||||
|
base + page_table.pointers.size());
|
||||||
|
|
||||||
|
std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type);
|
||||||
|
|
||||||
|
if (memory == nullptr) {
|
||||||
|
std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory);
|
||||||
|
std::fill(page_table.backing_addr.begin() + base, page_table.backing_addr.begin() + end,
|
||||||
|
backing_addr);
|
||||||
|
} else {
|
||||||
|
while (base != end) {
|
||||||
|
page_table.pointers[base] = memory;
|
||||||
|
page_table.backing_addr[base] = backing_addr;
|
||||||
|
|
||||||
|
base += 1;
|
||||||
|
memory += page_size;
|
||||||
|
backing_addr += page_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryManager::MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr) {
|
||||||
|
ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: {:016X}", size);
|
||||||
|
ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: {:016X}", base);
|
||||||
|
MapPages(base / page_size, size / page_size, target, Common::PageType::Memory, backing_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryManager::UnmapRegion(GPUVAddr base, u64 size) {
|
||||||
|
ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: {:016X}", size);
|
||||||
|
ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: {:016X}", base);
|
||||||
|
MapPages(base / page_size, size / page_size, nullptr, Common::PageType::Unmapped);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
|
||||||
|
ASSERT(base + size == next.base);
|
||||||
|
if (type != next.type) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
if (type == VirtualMemoryArea::Type::Allocated && (offset + size != next.offset)) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
if (type == VirtualMemoryArea::Type::Mapped && backing_memory + size != next.backing_memory) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryManager::VMAHandle MemoryManager::FindVMA(GPUVAddr target) const {
|
||||||
|
if (target >= address_space_end) {
|
||||||
|
return vma_map.end();
|
||||||
|
} else {
|
||||||
|
return std::prev(vma_map.upper_bound(target));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryManager::VMAHandle MemoryManager::AllocateMemory(GPUVAddr target, std::size_t offset,
|
||||||
|
u64 size) {
|
||||||
|
|
||||||
|
// This is the appropriately sized VMA that will turn into our allocation.
|
||||||
|
VMAIter vma_handle = CarveVMA(target, size);
|
||||||
|
VirtualMemoryArea& final_vma = vma_handle->second;
|
||||||
|
ASSERT(final_vma.size == size);
|
||||||
|
|
||||||
|
final_vma.type = VirtualMemoryArea::Type::Allocated;
|
||||||
|
final_vma.offset = offset;
|
||||||
|
UpdatePageTableForVMA(final_vma);
|
||||||
|
|
||||||
|
return MergeAdjacent(vma_handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryManager::VMAHandle MemoryManager::MapBackingMemory(GPUVAddr target, u8* memory, u64 size,
|
||||||
|
VAddr backing_addr) {
|
||||||
|
// This is the appropriately sized VMA that will turn into our allocation.
|
||||||
|
VMAIter vma_handle = CarveVMA(target, size);
|
||||||
|
VirtualMemoryArea& final_vma = vma_handle->second;
|
||||||
|
ASSERT(final_vma.size == size);
|
||||||
|
|
||||||
|
final_vma.type = VirtualMemoryArea::Type::Mapped;
|
||||||
|
final_vma.backing_memory = memory;
|
||||||
|
final_vma.backing_addr = backing_addr;
|
||||||
|
UpdatePageTableForVMA(final_vma);
|
||||||
|
|
||||||
|
return MergeAdjacent(vma_handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryManager::VMAIter MemoryManager::Unmap(VMAIter vma_handle) {
|
||||||
|
VirtualMemoryArea& vma = vma_handle->second;
|
||||||
|
vma.type = VirtualMemoryArea::Type::Allocated;
|
||||||
|
vma.offset = 0;
|
||||||
|
vma.backing_memory = nullptr;
|
||||||
|
|
||||||
|
UpdatePageTableForVMA(vma);
|
||||||
|
|
||||||
|
return MergeAdjacent(vma_handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryManager::UnmapRange(GPUVAddr target, u64 size) {
|
||||||
|
VMAIter vma = CarveVMARange(target, size);
|
||||||
|
const VAddr target_end = target + size;
|
||||||
|
|
||||||
|
const VMAIter end = vma_map.end();
|
||||||
|
// The comparison against the end of the range must be done using addresses since VMAs can be
|
||||||
|
// merged during this process, causing invalidation of the iterators.
|
||||||
|
while (vma != end && vma->second.base < target_end) {
|
||||||
|
vma = std::next(Unmap(vma));
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(FindVMA(target)->second.size >= size);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryManager::VMAIter MemoryManager::StripIterConstness(const VMAHandle& iter) {
|
||||||
|
// This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
|
||||||
|
// non-const access to its container.
|
||||||
|
return vma_map.erase(iter, iter); // Erases an empty range of elements
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryManager::VMAIter MemoryManager::CarveVMA(GPUVAddr base, u64 size) {
|
||||||
|
ASSERT_MSG((size & Tegra::MemoryManager::page_mask) == 0, "non-page aligned size: 0x{:016X}",
|
||||||
|
size);
|
||||||
|
ASSERT_MSG((base & Tegra::MemoryManager::page_mask) == 0, "non-page aligned base: 0x{:016X}",
|
||||||
|
base);
|
||||||
|
|
||||||
|
VMAIter vma_handle = StripIterConstness(FindVMA(base));
|
||||||
|
if (vma_handle == vma_map.end()) {
|
||||||
|
// Target address is outside the range managed by the kernel
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
const VirtualMemoryArea& vma = vma_handle->second;
|
||||||
|
if (vma.type == VirtualMemoryArea::Type::Mapped) {
|
||||||
|
// Region is already allocated
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
const VAddr start_in_vma = base - vma.base;
|
||||||
|
const VAddr end_in_vma = start_in_vma + size;
|
||||||
|
|
||||||
|
if (end_in_vma < vma.size) {
|
||||||
|
// Split VMA at the end of the allocated region
|
||||||
|
SplitVMA(vma_handle, end_in_vma);
|
||||||
|
}
|
||||||
|
if (start_in_vma != 0) {
|
||||||
|
// Split VMA at the start of the allocated region
|
||||||
|
vma_handle = SplitVMA(vma_handle, start_in_vma);
|
||||||
|
}
|
||||||
|
|
||||||
|
return vma_handle;
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryManager::VMAIter MemoryManager::CarveVMARange(GPUVAddr target, u64 size) {
|
||||||
|
ASSERT_MSG((size & Tegra::MemoryManager::page_mask) == 0, "non-page aligned size: 0x{:016X}",
|
||||||
|
size);
|
||||||
|
ASSERT_MSG((target & Tegra::MemoryManager::page_mask) == 0, "non-page aligned base: 0x{:016X}",
|
||||||
|
target);
|
||||||
|
|
||||||
|
const VAddr target_end = target + size;
|
||||||
|
ASSERT(target_end >= target);
|
||||||
|
ASSERT(size > 0);
|
||||||
|
|
||||||
|
VMAIter begin_vma = StripIterConstness(FindVMA(target));
|
||||||
|
const VMAIter i_end = vma_map.lower_bound(target_end);
|
||||||
|
if (std::any_of(begin_vma, i_end, [](const auto& entry) {
|
||||||
|
return entry.second.type == VirtualMemoryArea::Type::Unmapped;
|
||||||
|
})) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (target != begin_vma->second.base) {
|
||||||
|
begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base);
|
||||||
|
}
|
||||||
|
|
||||||
|
VMAIter end_vma = StripIterConstness(FindVMA(target_end));
|
||||||
|
if (end_vma != vma_map.end() && target_end != end_vma->second.base) {
|
||||||
|
end_vma = SplitVMA(end_vma, target_end - end_vma->second.base);
|
||||||
|
}
|
||||||
|
|
||||||
|
return begin_vma;
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryManager::VMAIter MemoryManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
|
||||||
|
VirtualMemoryArea& old_vma = vma_handle->second;
|
||||||
|
VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
|
||||||
|
|
||||||
|
// For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
|
||||||
|
// a bug. This restriction might be removed later.
|
||||||
|
ASSERT(offset_in_vma < old_vma.size);
|
||||||
|
ASSERT(offset_in_vma > 0);
|
||||||
|
|
||||||
|
old_vma.size = offset_in_vma;
|
||||||
|
new_vma.base += offset_in_vma;
|
||||||
|
new_vma.size -= offset_in_vma;
|
||||||
|
|
||||||
|
switch (new_vma.type) {
|
||||||
|
case VirtualMemoryArea::Type::Unmapped:
|
||||||
|
break;
|
||||||
|
case VirtualMemoryArea::Type::Allocated:
|
||||||
|
new_vma.offset += offset_in_vma;
|
||||||
|
break;
|
||||||
|
case VirtualMemoryArea::Type::Mapped:
|
||||||
|
new_vma.backing_memory += offset_in_vma;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(old_vma.CanBeMergedWith(new_vma));
|
||||||
|
|
||||||
|
return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryManager::VMAIter MemoryManager::MergeAdjacent(VMAIter iter) {
|
||||||
|
const VMAIter next_vma = std::next(iter);
|
||||||
|
if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
|
||||||
|
iter->second.size += next_vma->second.size;
|
||||||
|
vma_map.erase(next_vma);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (iter != vma_map.begin()) {
|
||||||
|
VMAIter prev_vma = std::prev(iter);
|
||||||
|
if (prev_vma->second.CanBeMergedWith(iter->second)) {
|
||||||
|
prev_vma->second.size += iter->second.size;
|
||||||
|
vma_map.erase(iter);
|
||||||
|
iter = prev_vma;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return iter;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
|
||||||
|
switch (vma.type) {
|
||||||
|
case VirtualMemoryArea::Type::Unmapped:
|
||||||
|
UnmapRegion(vma.base, vma.size);
|
||||||
|
break;
|
||||||
|
case VirtualMemoryArea::Type::Allocated:
|
||||||
|
MapMemoryRegion(vma.base, vma.size, nullptr, vma.backing_addr);
|
||||||
|
break;
|
||||||
|
case VirtualMemoryArea::Type::Mapped:
|
||||||
|
MapMemoryRegion(vma.base, vma.size, vma.backing_memory, vma.backing_addr);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
return (*block)[(gpu_addr >> PAGE_BITS) & PAGE_BLOCK_MASK];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Tegra
|
} // namespace Tegra
|
||||||
|
|
|
@ -1,79 +1,147 @@
|
||||||
// Copyright 2018 yuzu emulator team
|
// Copyright 2018 yuzu emulator team
|
||||||
// Licensed under GPLv2 or any later version
|
// Licensed under GPLv2 or any later version
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <array>
|
#include <map>
|
||||||
#include <memory>
|
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "common/page_table.h"
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space
|
||||||
|
* with homogeneous attributes across its extents. In this particular implementation each VMA is
|
||||||
|
* also backed by a single host memory allocation.
|
||||||
|
*/
|
||||||
|
struct VirtualMemoryArea {
|
||||||
|
enum class Type : u8 {
|
||||||
|
Unmapped,
|
||||||
|
Allocated,
|
||||||
|
Mapped,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Virtual base address of the region.
|
||||||
|
GPUVAddr base{};
|
||||||
|
/// Size of the region.
|
||||||
|
u64 size{};
|
||||||
|
/// Memory area mapping type.
|
||||||
|
Type type{Type::Unmapped};
|
||||||
|
/// CPU memory mapped address corresponding to this memory area.
|
||||||
|
VAddr backing_addr{};
|
||||||
|
/// Offset into the backing_memory the mapping starts from.
|
||||||
|
std::size_t offset{};
|
||||||
|
/// Pointer backing this VMA.
|
||||||
|
u8* backing_memory{};
|
||||||
|
|
||||||
|
/// Tests if this area can be merged to the right with `next`.
|
||||||
|
bool CanBeMergedWith(const VirtualMemoryArea& next) const;
|
||||||
|
};
|
||||||
|
|
||||||
class MemoryManager final {
|
class MemoryManager final {
|
||||||
public:
|
public:
|
||||||
MemoryManager();
|
MemoryManager();
|
||||||
|
|
||||||
GPUVAddr AllocateSpace(u64 size, u64 align);
|
GPUVAddr AllocateSpace(u64 size, u64 align);
|
||||||
GPUVAddr AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align);
|
GPUVAddr AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align);
|
||||||
GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size);
|
GPUVAddr MapBufferEx(GPUVAddr cpu_addr, u64 size);
|
||||||
GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size);
|
GPUVAddr MapBufferEx(GPUVAddr cpu_addr, GPUVAddr gpu_addr, u64 size);
|
||||||
GPUVAddr UnmapBuffer(GPUVAddr gpu_addr, u64 size);
|
GPUVAddr UnmapBuffer(GPUVAddr gpu_addr, u64 size);
|
||||||
GPUVAddr GetRegionEnd(GPUVAddr region_start) const;
|
|
||||||
std::optional<VAddr> GpuToCpuAddress(GPUVAddr gpu_addr);
|
std::optional<VAddr> GpuToCpuAddress(GPUVAddr gpu_addr);
|
||||||
|
|
||||||
static constexpr u64 PAGE_BITS = 16;
|
template <typename T>
|
||||||
static constexpr u64 PAGE_SIZE = 1 << PAGE_BITS;
|
T Read(GPUVAddr vaddr);
|
||||||
static constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
|
|
||||||
|
|
||||||
u8 Read8(GPUVAddr addr);
|
template <typename T>
|
||||||
u16 Read16(GPUVAddr addr);
|
void Write(GPUVAddr vaddr, T data);
|
||||||
u32 Read32(GPUVAddr addr);
|
|
||||||
u64 Read64(GPUVAddr addr);
|
|
||||||
|
|
||||||
void Write8(GPUVAddr addr, u8 data);
|
|
||||||
void Write16(GPUVAddr addr, u16 data);
|
|
||||||
void Write32(GPUVAddr addr, u32 data);
|
|
||||||
void Write64(GPUVAddr addr, u64 data);
|
|
||||||
|
|
||||||
u8* GetPointer(GPUVAddr vaddr);
|
u8* GetPointer(GPUVAddr vaddr);
|
||||||
|
|
||||||
void ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size);
|
void ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size);
|
||||||
void WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size);
|
void WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size);
|
||||||
void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size);
|
void CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
enum class PageStatus : u64 {
|
using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>;
|
||||||
Unmapped = 0xFFFFFFFFFFFFFFFFULL,
|
using VMAHandle = VMAMap::const_iterator;
|
||||||
Allocated = 0xFFFFFFFFFFFFFFFEULL,
|
using VMAIter = VMAMap::iterator;
|
||||||
Reserved = 0xFFFFFFFFFFFFFFFDULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
std::optional<GPUVAddr> FindFreeBlock(GPUVAddr region_start, u64 size, u64 align,
|
void MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
|
||||||
PageStatus status);
|
VAddr backing_addr = 0);
|
||||||
VAddr& PageSlot(GPUVAddr gpu_addr);
|
void MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr);
|
||||||
|
void UnmapRegion(GPUVAddr base, u64 size);
|
||||||
|
|
||||||
static constexpr u64 MAX_ADDRESS{0x10000000000ULL};
|
/// Finds the VMA in which the given address is included in, or `vma_map.end()`.
|
||||||
static constexpr u64 PAGE_TABLE_BITS{10};
|
VMAHandle FindVMA(GPUVAddr target) const;
|
||||||
static constexpr u64 PAGE_TABLE_SIZE{1 << PAGE_TABLE_BITS};
|
|
||||||
static constexpr u64 PAGE_TABLE_MASK{PAGE_TABLE_SIZE - 1};
|
|
||||||
static constexpr u64 PAGE_BLOCK_BITS{14};
|
|
||||||
static constexpr u64 PAGE_BLOCK_SIZE{1 << PAGE_BLOCK_BITS};
|
|
||||||
static constexpr u64 PAGE_BLOCK_MASK{PAGE_BLOCK_SIZE - 1};
|
|
||||||
|
|
||||||
using PageBlock = std::array<VAddr, PAGE_BLOCK_SIZE>;
|
VMAHandle AllocateMemory(GPUVAddr target, std::size_t offset, u64 size);
|
||||||
std::array<std::unique_ptr<PageBlock>, PAGE_TABLE_SIZE> page_table{};
|
|
||||||
|
|
||||||
struct MappedRegion {
|
/**
|
||||||
VAddr cpu_addr;
|
* Maps an unmanaged host memory pointer at a given address.
|
||||||
GPUVAddr gpu_addr;
|
*
|
||||||
u64 size;
|
* @param target The guest address to start the mapping at.
|
||||||
};
|
* @param memory The memory to be mapped.
|
||||||
|
* @param size Size of the mapping.
|
||||||
|
* @param state MemoryState tag to attach to the VMA.
|
||||||
|
*/
|
||||||
|
VMAHandle MapBackingMemory(GPUVAddr target, u8* memory, u64 size, VAddr backing_addr);
|
||||||
|
|
||||||
std::vector<MappedRegion> mapped_regions;
|
/// Unmaps a range of addresses, splitting VMAs as necessary.
|
||||||
|
void UnmapRange(GPUVAddr target, u64 size);
|
||||||
|
|
||||||
|
/// Converts a VMAHandle to a mutable VMAIter.
|
||||||
|
VMAIter StripIterConstness(const VMAHandle& iter);
|
||||||
|
|
||||||
|
/// Unmaps the given VMA.
|
||||||
|
VMAIter Unmap(VMAIter vma);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
|
||||||
|
* the appropriate error checking.
|
||||||
|
*/
|
||||||
|
VMAIter CarveVMA(GPUVAddr base, u64 size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each
|
||||||
|
* end of the range.
|
||||||
|
*/
|
||||||
|
VMAIter CarveVMARange(GPUVAddr base, u64 size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Splits a VMA in two, at the specified offset.
|
||||||
|
* @returns the right side of the split, with the original iterator becoming the left side.
|
||||||
|
*/
|
||||||
|
VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks for and merges the specified VMA with adjacent ones if possible.
|
||||||
|
* @returns the merged VMA or the original if no merging was possible.
|
||||||
|
*/
|
||||||
|
VMAIter MergeAdjacent(VMAIter vma);
|
||||||
|
|
||||||
|
/// Updates the pages corresponding to this VMA so they match the VMA's attributes.
|
||||||
|
void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
|
||||||
|
|
||||||
|
GPUVAddr FindFreeRegion(GPUVAddr region_start, u64 size, u64 align,
|
||||||
|
VirtualMemoryArea::Type vma_type);
|
||||||
|
|
||||||
|
private:
|
||||||
|
static constexpr u64 page_bits{16};
|
||||||
|
static constexpr u64 page_size{1 << page_bits};
|
||||||
|
static constexpr u64 page_mask{page_size - 1};
|
||||||
|
|
||||||
|
/// Address space in bits, this is fairly arbitrary but sufficiently large.
|
||||||
|
static constexpr u32 address_space_width = 39;
|
||||||
|
/// Start address for mapping, this is fairly arbitrary but must be non-zero.
|
||||||
|
static constexpr GPUVAddr address_space_base = 0x100000;
|
||||||
|
/// End of address space, based on address space in bits.
|
||||||
|
static constexpr GPUVAddr address_space_end = 1ULL << address_space_width;
|
||||||
|
|
||||||
|
Common::PageTable page_table{page_bits};
|
||||||
|
VMAMap vma_map;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Tegra
|
} // namespace Tegra
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "video_core/engines/fermi_2d.h"
|
#include "video_core/engines/fermi_2d.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
#include "video_core/memory_manager.h"
|
|
||||||
|
|
||||||
namespace VideoCore {
|
namespace VideoCore {
|
||||||
|
|
||||||
|
|
|
@ -76,8 +76,8 @@ GlobalRegion GlobalRegionCacheOpenGL::GetGlobalRegion(
|
||||||
const auto cbufs{gpu.Maxwell3D().state.shader_stages[static_cast<u64>(stage)]};
|
const auto cbufs{gpu.Maxwell3D().state.shader_stages[static_cast<u64>(stage)]};
|
||||||
const auto addr{cbufs.const_buffers[global_region.GetCbufIndex()].address +
|
const auto addr{cbufs.const_buffers[global_region.GetCbufIndex()].address +
|
||||||
global_region.GetCbufOffset()};
|
global_region.GetCbufOffset()};
|
||||||
const auto actual_addr{memory_manager.Read64(addr)};
|
const auto actual_addr{memory_manager.Read<u64>(addr)};
|
||||||
const auto size{memory_manager.Read32(addr + 8)};
|
const auto size{memory_manager.Read<u32>(addr + 8)};
|
||||||
|
|
||||||
// Look up global region in the cache based on address
|
// Look up global region in the cache based on address
|
||||||
const auto& host_ptr{memory_manager.GetPointer(actual_addr)};
|
const auto& host_ptr{memory_manager.GetPointer(actual_addr)};
|
||||||
|
|
|
@ -610,11 +610,11 @@ CachedSurface::CachedSurface(const SurfaceParams& params)
|
||||||
// check is necessary to prevent flushing from overwriting unmapped memory.
|
// check is necessary to prevent flushing from overwriting unmapped memory.
|
||||||
|
|
||||||
auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()};
|
auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()};
|
||||||
const u64 max_size{memory_manager.GetRegionEnd(params.gpu_addr) - params.gpu_addr};
|
// const u64 max_size{memory_manager.GetRegionEnd(params.gpu_addr) - params.gpu_addr};
|
||||||
if (cached_size_in_bytes > max_size) {
|
// if (cached_size_in_bytes > max_size) {
|
||||||
LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", params.size_in_bytes, max_size);
|
// LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", params.size_in_bytes,
|
||||||
cached_size_in_bytes = max_size;
|
// max_size); cached_size_in_bytes = max_size;
|
||||||
}
|
//}
|
||||||
|
|
||||||
cpu_addr = *memory_manager.GpuToCpuAddress(params.gpu_addr);
|
cpu_addr = *memory_manager.GpuToCpuAddress(params.gpu_addr);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue