From 7e94e544f40ee8fc58fdea7c71fe48d8edd6fcb6 Mon Sep 17 00:00:00 2001 From: Markus Wick Date: Sun, 12 Jan 2020 16:51:07 +0100 Subject: [PATCH 1/3] core/loaders: Simplify PhysicalMemory usage. It is currently a std::vector, however we might want to replace it with a more fancy allocator. So we can't use the C++ iterators any more. --- src/core/loader/elf.cpp | 3 ++- src/core/loader/kip.cpp | 5 +++-- src/core/loader/nso.cpp | 12 +++++++----- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp index f1795fdd62..8908e5328e 100644 --- a/src/core/loader/elf.cpp +++ b/src/core/loader/elf.cpp @@ -335,7 +335,8 @@ Kernel::CodeSet ElfReader::LoadInto(VAddr vaddr) { codeset_segment->addr = segment_addr; codeset_segment->size = aligned_size; - memcpy(&program_image[current_image_position], GetSegmentPtr(i), p->p_filesz); + std::memcpy(program_image.data() + current_image_position, GetSegmentPtr(i), + p->p_filesz); current_image_position += aligned_size; } } diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp index 474b55cb13..092103abed 100644 --- a/src/core/loader/kip.cpp +++ b/src/core/loader/kip.cpp @@ -2,6 +2,7 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include #include "core/file_sys/kernel_executable.h" #include "core/file_sys/program_metadata.h" #include "core/gdbstub/gdbstub.h" @@ -76,8 +77,8 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::Process& process) { segment.addr = offset; segment.offset = offset; segment.size = PageAlignSize(static_cast(data.size())); - program_image.resize(offset); - program_image.insert(program_image.end(), data.begin(), data.end()); + program_image.resize(offset + data.size()); + std::memcpy(program_image.data() + offset, data.data(), data.size()); }; load_segment(codeset.CodeSegment(), kip->GetTextSection(), kip->GetTextOffset()); diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp index f629892aea..515c5accbd 100644 --- a/src/core/loader/nso.cpp +++ b/src/core/loader/nso.cpp @@ -3,6 +3,7 @@ // Refer to the license.txt file included. #include +#include #include #include "common/common_funcs.h" @@ -96,8 +97,9 @@ std::optional AppLoader_NSO::LoadModule(Kernel::Process& process, if (nso_header.IsSegmentCompressed(i)) { data = DecompressSegment(data, nso_header.segments[i]); } - program_image.resize(nso_header.segments[i].location); - program_image.insert(program_image.end(), data.begin(), data.end()); + program_image.resize(nso_header.segments[i].location + data.size()); + std::memcpy(program_image.data() + nso_header.segments[i].location, data.data(), + data.size()); codeset.segments[i].addr = nso_header.segments[i].location; codeset.segments[i].offset = nso_header.segments[i].location; codeset.segments[i].size = PageAlignSize(static_cast(data.size())); @@ -139,12 +141,12 @@ std::optional AppLoader_NSO::LoadModule(Kernel::Process& process, std::vector pi_header; pi_header.insert(pi_header.begin(), reinterpret_cast(&nso_header), reinterpret_cast(&nso_header) + sizeof(NSOHeader)); - pi_header.insert(pi_header.begin() + sizeof(NSOHeader), program_image.begin(), - program_image.end()); + pi_header.insert(pi_header.begin() + sizeof(NSOHeader), program_image.data(), + program_image.data() + program_image.size()); pi_header = pm->PatchNSO(pi_header, file.GetName()); - std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.begin()); + std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.data()); } // Apply cheats if they exist and the program has a valid title ID From 55103da066ce7cde79d1872cb3b5058565621ef3 Mon Sep 17 00:00:00 2001 From: Markus Wick Date: Sun, 12 Jan 2020 16:55:51 +0100 Subject: [PATCH 2/3] core/hle: Simplify PhysicalMemory usage in vm_manager. --- src/core/hle/kernel/vm_manager.cpp | 34 ++++++++++-------------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index a9a20ef76d..d223b4eccc 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -3,6 +3,7 @@ // Refer to the license.txt file included. #include +#include #include #include #include "common/alignment.h" @@ -269,18 +270,9 @@ ResultVal VMManager::SetHeapSize(u64 size) { // If necessary, expand backing vector to cover new heap extents in // the case of allocating. Otherwise, shrink the backing memory, // if a smaller heap has been requested. - const u64 old_heap_size = GetCurrentHeapSize(); - if (size > old_heap_size) { - const u64 alloc_size = size - old_heap_size; - - heap_memory->insert(heap_memory->end(), alloc_size, 0); - RefreshMemoryBlockMappings(heap_memory.get()); - } else if (size < old_heap_size) { - heap_memory->resize(size); - heap_memory->shrink_to_fit(); - - RefreshMemoryBlockMappings(heap_memory.get()); - } + heap_memory->resize(size); + heap_memory->shrink_to_fit(); + RefreshMemoryBlockMappings(heap_memory.get()); heap_end = heap_region_base + size; ASSERT(GetCurrentHeapSize() == heap_memory->size()); @@ -752,24 +744,20 @@ void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryAre // Always merge allocated memory blocks, even when they don't share the same backing block. if (left.type == VMAType::AllocatedMemoryBlock && (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) { - const auto right_begin = right.backing_block->begin() + right.offset; - const auto right_end = right_begin + right.size; // Check if we can save work. if (left.offset == 0 && left.size == left.backing_block->size()) { // Fast case: left is an entire backing block. - left.backing_block->insert(left.backing_block->end(), right_begin, right_end); + left.backing_block->resize(left.size + right.size); + std::memcpy(left.backing_block->data() + left.size, + right.backing_block->data() + right.offset, right.size); } else { // Slow case: make a new memory block for left and right. - const auto left_begin = left.backing_block->begin() + left.offset; - const auto left_end = left_begin + left.size; - const auto left_size = static_cast(std::distance(left_begin, left_end)); - const auto right_size = static_cast(std::distance(right_begin, right_end)); - auto new_memory = std::make_shared(); - new_memory->reserve(left_size + right_size); - new_memory->insert(new_memory->end(), left_begin, left_end); - new_memory->insert(new_memory->end(), right_begin, right_end); + new_memory->resize(left.size + right.size); + std::memcpy(new_memory->data(), left.backing_block->data() + left.offset, left.size); + std::memcpy(new_memory->data() + left.size, right.backing_block->data() + right.offset, + right.size); left.backing_block = std::move(new_memory); left.offset = 0; From 56672b8c9809d8f5585d208f5b85549bc3fe2a0e Mon Sep 17 00:00:00 2001 From: Markus Wick Date: Sun, 12 Jan 2020 17:04:15 +0100 Subject: [PATCH 3/3] core/memory: Create a special MapMemoryRegion for physical memory. This allows us to create a fastmem arena within the memory.cpp helpers. --- src/core/hle/kernel/physical_memory.h | 5 ++++- src/core/hle/kernel/vm_manager.cpp | 3 +-- src/core/memory.cpp | 11 +++++++++++ src/core/memory.h | 16 +++++++++++++++- 4 files changed, 31 insertions(+), 4 deletions(-) diff --git a/src/core/hle/kernel/physical_memory.h b/src/core/hle/kernel/physical_memory.h index 0905653102..b689e8e8b1 100644 --- a/src/core/hle/kernel/physical_memory.h +++ b/src/core/hle/kernel/physical_memory.h @@ -14,6 +14,9 @@ namespace Kernel { // - Second to ensure all host backing memory used is aligned to 256 bytes due // to strict alignment restrictions on GPU memory. -using PhysicalMemory = std::vector>; +using PhysicalMemoryVector = std::vector>; +class PhysicalMemory final : public PhysicalMemoryVector { + using PhysicalMemoryVector::PhysicalMemoryVector; +}; } // namespace Kernel diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index d223b4eccc..0b3500fce5 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -780,8 +780,7 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { memory.UnmapRegion(page_table, vma.base, vma.size); break; case VMAType::AllocatedMemoryBlock: - memory.MapMemoryRegion(page_table, vma.base, vma.size, - vma.backing_block->data() + vma.offset); + memory.MapMemoryRegion(page_table, vma.base, vma.size, *vma.backing_block, vma.offset); break; case VMAType::BackingMemory: memory.MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory); diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 3c2a29d9b8..f0888327f2 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -14,6 +14,7 @@ #include "common/swap.h" #include "core/arm/arm_interface.h" #include "core/core.h" +#include "core/hle/kernel/physical_memory.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/vm_manager.h" #include "core/memory.h" @@ -38,6 +39,11 @@ struct Memory::Impl { system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); } + void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, + Kernel::PhysicalMemory& memory, VAddr offset) { + MapMemoryRegion(page_table, base, size, memory.data() + offset); + } + void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); @@ -601,6 +607,11 @@ void Memory::SetCurrentPageTable(Kernel::Process& process) { impl->SetCurrentPageTable(process); } +void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, + Kernel::PhysicalMemory& memory, VAddr offset) { + impl->MapMemoryRegion(page_table, base, size, memory, offset); +} + void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { impl->MapMemoryRegion(page_table, base, size, target); } diff --git a/src/core/memory.h b/src/core/memory.h index 1428a6d609..8913a9da42 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -19,8 +19,9 @@ class System; } namespace Kernel { +class PhysicalMemory; class Process; -} +} // namespace Kernel namespace Memory { @@ -65,6 +66,19 @@ public: */ void SetCurrentPageTable(Kernel::Process& process); + /** + * Maps an physical buffer onto a region of the emulated process address space. + * + * @param page_table The page table of the emulated process. + * @param base The address to start mapping at. Must be page-aligned. + * @param size The amount of bytes to map. Must be page-aligned. + * @param memory Physical buffer with the memory backing the mapping. Must be of length + * at least `size + offset`. + * @param offset The offset within the physical memory. Must be page-aligned. + */ + void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, + Kernel::PhysicalMemory& memory, VAddr offset); + /** * Maps an allocated buffer onto a region of the emulated process address space. *