1
0
Fork 1
forked from suyu/suyu

Kernel: Add VMManager to manage process address spaces

This enables more dynamic management of the process address space,
compared to just directly configuring the page table for major areas.

This will serve as the foundation upon which the rest of the Kernel
memory management functions will be built.
This commit is contained in:
Yuri Kunde Schlesner 2015-05-21 00:37:07 -03:00
parent ad883db7a9
commit 0a60aa75c2
6 changed files with 492 additions and 16 deletions

View file

@ -36,6 +36,7 @@ set(SRCS
hle/kernel/shared_memory.cpp
hle/kernel/thread.cpp
hle/kernel/timer.cpp
hle/kernel/vm_manager.cpp
hle/service/ac_u.cpp
hle/service/act_u.cpp
hle/service/am_app.cpp
@ -147,6 +148,7 @@ set(HEADERS
hle/kernel/shared_memory.h
hle/kernel/thread.h
hle/kernel/timer.h
hle/kernel/vm_manager.h
hle/result.h
hle/service/ac_u.h
hle/service/act_u.h

View file

@ -0,0 +1,245 @@
// Copyright 2015 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/memory_setup.h"
namespace Kernel {
bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
ASSERT(base + size == next.base);
if (permissions != next.permissions ||
meminfo_state != next.meminfo_state ||
type != next.type) {
return false;
}
if (type == VMAType::AllocatedMemoryBlock &&
(backing_block != next.backing_block || offset + size != next.offset)) {
return false;
}
if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
return false;
}
if (type == VMAType::MMIO && paddr + size != next.paddr) {
return false;
}
return true;
}
VMManager::VMManager() {
Reset();
}
void VMManager::Reset() {
vma_map.clear();
// Initialize the map with a single free region covering the entire managed space.
VirtualMemoryArea initial_vma;
initial_vma.size = MAX_ADDRESS;
vma_map.emplace(initial_vma.base, initial_vma);
UpdatePageTableForVMA(initial_vma);
}
VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
return std::prev(vma_map.upper_bound(target));
}
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
std::shared_ptr<std::vector<u8>> block, u32 offset, u32 size, MemoryState state) {
ASSERT(block != nullptr);
ASSERT(offset + size <= block->size());
// This is the appropriately sized VMA that will turn into our allocation.
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
VirtualMemoryArea& final_vma = vma_handle->second;
ASSERT(final_vma.size == size);
final_vma.type = VMAType::AllocatedMemoryBlock;
final_vma.permissions = VMAPermission::ReadWrite;
final_vma.meminfo_state = state;
final_vma.backing_block = block;
final_vma.offset = offset;
UpdatePageTableForVMA(final_vma);
return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
}
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8 * memory, u32 size, MemoryState state) {
ASSERT(memory != nullptr);
// This is the appropriately sized VMA that will turn into our allocation.
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
VirtualMemoryArea& final_vma = vma_handle->second;
ASSERT(final_vma.size == size);
final_vma.type = VMAType::BackingMemory;
final_vma.permissions = VMAPermission::ReadWrite;
final_vma.meminfo_state = state;
final_vma.backing_memory = memory;
UpdatePageTableForVMA(final_vma);
return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
}
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state) {
// This is the appropriately sized VMA that will turn into our allocation.
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
VirtualMemoryArea& final_vma = vma_handle->second;
ASSERT(final_vma.size == size);
final_vma.type = VMAType::MMIO;
final_vma.permissions = VMAPermission::ReadWrite;
final_vma.meminfo_state = state;
final_vma.paddr = paddr;
UpdatePageTableForVMA(final_vma);
return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
}
void VMManager::Unmap(VMAHandle vma_handle) {
VMAIter iter = StripIterConstness(vma_handle);
VirtualMemoryArea& vma = iter->second;
vma.type = VMAType::Free;
vma.permissions = VMAPermission::None;
vma.meminfo_state = MemoryState::Free;
vma.backing_block = nullptr;
vma.offset = 0;
vma.backing_memory = nullptr;
vma.paddr = 0;
UpdatePageTableForVMA(vma);
MergeAdjacent(iter);
}
void VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
VMAIter iter = StripIterConstness(vma_handle);
VirtualMemoryArea& vma = iter->second;
vma.permissions = new_perms;
UpdatePageTableForVMA(vma);
MergeAdjacent(iter);
}
VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle & iter) {
// This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
// non-const access to its container.
return vma_map.erase(iter, iter); // Erases an empty range of elements
}
ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: %8X", size);
ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: %08X", base);
VMAIter vma_handle = StripIterConstness(FindVMA(base));
if (vma_handle == vma_map.end()) {
// Target address is outside the range managed by the kernel
return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
ErrorSummary::InvalidArgument, ErrorLevel::Usage); // 0xE0E01BF5
}
VirtualMemoryArea& vma = vma_handle->second;
if (vma.type != VMAType::Free) {
// Region is already allocated
return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5
}
u32 start_in_vma = base - vma.base;
u32 end_in_vma = start_in_vma + size;
if (end_in_vma > vma.size) {
// Requested allocation doesn't fit inside VMA
return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5
}
if (end_in_vma != vma.size) {
// Split VMA at the end of the allocated region
SplitVMA(vma_handle, end_in_vma);
}
if (start_in_vma != 0) {
// Split VMA at the start of the allocated region
vma_handle = SplitVMA(vma_handle, start_in_vma);
}
return MakeResult<VMAIter>(vma_handle);
}
VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) {
VirtualMemoryArea& old_vma = vma_handle->second;
VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
// For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
// a bug. This restriction might be removed later.
ASSERT(offset_in_vma < old_vma.size);
ASSERT(offset_in_vma > 0);
old_vma.size = offset_in_vma;
new_vma.base += offset_in_vma;
new_vma.size -= offset_in_vma;
switch (new_vma.type) {
case VMAType::Free:
break;
case VMAType::AllocatedMemoryBlock:
new_vma.offset += offset_in_vma;
break;
case VMAType::BackingMemory:
new_vma.backing_memory += offset_in_vma;
break;
case VMAType::MMIO:
new_vma.paddr += offset_in_vma;
break;
}
ASSERT(old_vma.CanBeMergedWith(new_vma));
return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
}
VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
VMAIter next_vma = std::next(iter);
if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
iter->second.size += next_vma->second.size;
vma_map.erase(next_vma);
}
if (iter != vma_map.begin()) {
VMAIter prev_vma = std::prev(iter);
if (prev_vma->second.CanBeMergedWith(iter->second)) {
prev_vma->second.size += iter->second.size;
vma_map.erase(iter);
iter = prev_vma;
}
}
return iter;
}
void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
switch (vma.type) {
case VMAType::Free:
Memory::UnmapRegion(vma.base, vma.size);
break;
case VMAType::AllocatedMemoryBlock:
Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_block->data() + vma.offset);
break;
case VMAType::BackingMemory:
Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_memory);
break;
case VMAType::MMIO:
// TODO(yuriks): Add support for MMIO handlers.
Memory::MapIoRegion(vma.base, vma.size);
break;
}
}
}

View file

@ -0,0 +1,200 @@
// Copyright 2015 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "common/common_types.h"
#include "core/hle/result.h"
namespace Kernel {
enum class VMAType : u8 {
/// VMA represents an unmapped region of the address space.
Free,
/// VMA is backed by a ref-counted allocate memory block.
AllocatedMemoryBlock,
/// VMA is backed by a raw, unmanaged pointer.
BackingMemory,
/// VMA is mapped to MMIO registers at a fixed PAddr.
MMIO,
// TODO(yuriks): Implement MemoryAlias to support MAP/UNMAP
};
/// Permissions for mapped memory blocks
enum class VMAPermission : u8 {
None = 0,
Read = 1,
Write = 2,
Execute = 4,
ReadWrite = Read | Write,
ReadExecute = Read | Execute,
WriteExecute = Write | Execute,
ReadWriteExecute = Read | Write | Execute,
};
/// Set of values returned in MemoryInfo.state by svcQueryMemory.
enum class MemoryState : u8 {
Free = 0,
Reserved = 1,
IO = 2,
Static = 3,
Code = 4,
Private = 5,
Shared = 6,
Continuous = 7,
Aliased = 8,
Alias = 9,
AliasCode = 10,
Locked = 11,
};
/**
* Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space
* with homogeneous attributes across its extents. In this particular implementation each VMA is
* also backed by a single host memory allocation.
*/
struct VirtualMemoryArea {
/// Virtual base address of the region.
VAddr base = 0;
/// Size of the region.
u32 size = 0;
VMAType type = VMAType::Free;
VMAPermission permissions = VMAPermission::None;
/// Tag returned by svcQueryMemory. Not otherwise used.
MemoryState meminfo_state = MemoryState::Free;
// Settings for type = AllocatedMemoryBlock
/// Memory block backing this VMA.
std::shared_ptr<std::vector<u8>> backing_block = nullptr;
/// Offset into the backing_memory the mapping starts from.
u32 offset = 0;
// Settings for type = BackingMemory
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
u8* backing_memory = nullptr;
// Settings for type = MMIO
/// Physical address of the register area this VMA maps to.
PAddr paddr = 0;
/// Tests if this area can be merged to the right with `next`.
bool CanBeMergedWith(const VirtualMemoryArea& next) const;
};
/**
* Manages a process' virtual addressing space. This class maintains a list of allocated and free
* regions in the address space, along with their attributes, and allows kernel clients to
* manipulate it, adjusting the page table to match.
*
* This is similar in idea and purpose to the VM manager present in operating system kernels, with
* the main difference being that it doesn't have to support swapping or memory mapping of files.
* The implementation is also simplified by not having to allocate page frames. See these articles
* about the Linux kernel for an explantion of the concept and implementation:
* - http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/
* - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/
*/
class VMManager {
// TODO(yuriks): Make page tables switchable to support multiple VMManagers
public:
/**
* The maximum amount of address space managed by the kernel. Addresses above this are never used.
* @note This is the limit used by the New 3DS kernel. Old 3DS used 0x20000000.
*/
static const u32 MAX_ADDRESS = 0x40000000;
/**
* A map covering the entirety of the managed address space, keyed by the `base` field of each
* VMA. It must always be modified by splitting or merging VMAs, so that the invariant
* `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be
* merged when possible so that no two similar and adjacent regions exist that have not been
* merged.
*/
std::map<VAddr, VirtualMemoryArea> vma_map;
using VMAHandle = decltype(vma_map)::const_iterator;
VMManager();
/// Clears the address space map, re-initializing with a single free area.
void Reset();
/// Finds the VMA in which the given address is included in, or `vma_map.end()`.
VMAHandle FindVMA(VAddr target) const;
// TODO(yuriks): Should these functions actually return the handle?
/**
* Maps part of a ref-counted block of memory at a given address.
*
* @param target The guest address to start the mapping at.
* @param block The block to be mapped.
* @param offset Offset into `block` to map from.
* @param size Size of the mapping.
* @param state MemoryState tag to attach to the VMA.
*/
ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
u32 offset, u32 size, MemoryState state);
/**
* Maps an unmanaged host memory pointer at a given address.
*
* @param target The guest address to start the mapping at.
* @param memory The memory to be mapped.
* @param size Size of the mapping.
* @param state MemoryState tag to attach to the VMA.
*/
ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u32 size, MemoryState state);
/**
* Maps a memory-mapped IO region at a given address.
*
* @param target The guest address to start the mapping at.
* @param paddr The physical address where the registers are present.
* @param size Size of the mapping.
* @param state MemoryState tag to attach to the VMA.
*/
ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state);
/// Unmaps the given VMA.
void Unmap(VMAHandle vma);
/// Changes the permissions of the given VMA.
void Reprotect(VMAHandle vma, VMAPermission new_perms);
private:
using VMAIter = decltype(vma_map)::iterator;
/// Converts a VMAHandle to a mutable VMAIter.
VMAIter StripIterConstness(const VMAHandle& iter);
/**
* Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
* the appropriate error checking.
*/
ResultVal<VMAIter> CarveVMA(VAddr base, u32 size);
/**
* Splits a VMA in two, at the specified offset.
* @returns the right side of the split, with the original iterator becoming the left side.
*/
VMAIter SplitVMA(VMAIter vma, u32 offset_in_vma);
/**
* Checks for and merges the specified VMA with adjacent ones if possible.
* @returns the merged VMA or the original if no merging was possible.
*/
VMAIter MergeAdjacent(VMAIter vma);
/// Updates the pages corresponding to this VMA so they match the VMA's attributes.
void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
};
}

View file

@ -8,6 +8,10 @@
#include "common/logging/log.h"
#include "core/hle/config_mem.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/hle/result.h"
#include "core/hle/shared_page.h"
#include "core/mem_map.h"
#include "core/memory.h"
@ -31,17 +35,18 @@ struct MemoryArea {
u8** ptr;
u32 base;
u32 size;
const char* name;
};
// We don't declare the IO regions in here since its handled by other means.
static MemoryArea memory_areas[] = {
{&g_exefs_code, PROCESS_IMAGE_VADDR, PROCESS_IMAGE_MAX_SIZE},
{&g_heap, HEAP_VADDR, HEAP_SIZE },
{&g_shared_mem, SHARED_MEMORY_VADDR, SHARED_MEMORY_SIZE },
{&g_heap_linear, LINEAR_HEAP_VADDR, LINEAR_HEAP_SIZE },
{&g_vram, VRAM_VADDR, VRAM_SIZE },
{&g_dsp_mem, DSP_RAM_VADDR, DSP_RAM_SIZE },
{&g_tls_mem, TLS_AREA_VADDR, TLS_AREA_SIZE },
{&g_exefs_code, PROCESS_IMAGE_VADDR, PROCESS_IMAGE_MAX_SIZE, "Process Image"},
{&g_heap, HEAP_VADDR, HEAP_SIZE, "Heap"},
{&g_shared_mem, SHARED_MEMORY_VADDR, SHARED_MEMORY_SIZE, "Shared Memory"},
{&g_heap_linear, LINEAR_HEAP_VADDR, LINEAR_HEAP_SIZE, "Linear Heap"},
{&g_vram, VRAM_VADDR, VRAM_SIZE, "VRAM"},
{&g_dsp_mem, DSP_RAM_VADDR, DSP_RAM_SIZE, "DSP RAM"},
{&g_tls_mem, TLS_AREA_VADDR, TLS_AREA_SIZE, "TLS Area"},
};
/// Represents a block of memory mapped by ControlMemory/MapMemoryBlock
@ -135,15 +140,27 @@ VAddr PhysicalToVirtualAddress(const PAddr addr) {
return addr | 0x80000000;
}
// TODO(yuriks): Move this into Process
static Kernel::VMManager address_space;
void Init() {
using namespace Kernel;
InitMemoryMap();
for (MemoryArea& area : memory_areas) {
*area.ptr = new u8[area.size];
MapMemoryRegion(area.base, area.size, *area.ptr);
auto block = std::make_shared<std::vector<u8>>(area.size);
*area.ptr = block->data(); // TODO(yuriks): Remove
address_space.MapMemoryBlock(area.base, std::move(block), 0, area.size, MemoryState::Private).Unwrap();
}
MapMemoryRegion(CONFIG_MEMORY_VADDR, CONFIG_MEMORY_SIZE, (u8*)&ConfigMem::config_mem);
MapMemoryRegion(SHARED_PAGE_VADDR, SHARED_PAGE_SIZE, (u8*)&SharedPage::shared_page);
auto cfg_mem_vma = address_space.MapBackingMemory(CONFIG_MEMORY_VADDR,
(u8*)&ConfigMem::config_mem, CONFIG_MEMORY_SIZE, MemoryState::Shared).MoveFrom();
address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
auto shared_page_vma = address_space.MapBackingMemory(SHARED_PAGE_VADDR,
(u8*)&SharedPage::shared_page, SHARED_PAGE_SIZE, MemoryState::Shared).MoveFrom();
address_space.Reprotect(shared_page_vma, VMAPermission::Read);
LOG_DEBUG(HW_Memory, "initialized OK, RAM at %p", g_heap);
}
@ -152,8 +169,9 @@ void Shutdown() {
heap_map.clear();
heap_linear_map.clear();
address_space.Reset();
for (MemoryArea& area : memory_areas) {
delete[] *area.ptr;
*area.ptr = nullptr;
}

View file

@ -14,12 +14,10 @@
#include "core/hw/hw.h"
#include "core/mem_map.h"
#include "core/memory.h"
#include "core/memory_setup.h"
namespace Memory {
const u32 PAGE_MASK = PAGE_SIZE - 1;
const int PAGE_BITS = 12;
enum class PageType {
/// Page is unmapped and should cause an access error.
Unmapped,
@ -64,7 +62,7 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
while (base != end) {
ASSERT_MSG(base < PageTable::NUM_ENTRIES, "out of range mapping at %08X", base);
if (current_page_table->attributes[base] != PageType::Unmapped) {
if (current_page_table->attributes[base] != PageType::Unmapped && type != PageType::Unmapped) {
LOG_ERROR(HW_Memory, "overlapping memory ranges at %08X", base * PAGE_SIZE);
}
current_page_table->attributes[base] = type;
@ -92,6 +90,12 @@ void MapIoRegion(VAddr base, u32 size) {
MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
}
void UnmapRegion(VAddr base, u32 size) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
}
template <typename T>
T Read(const VAddr vaddr) {
const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];

View file

@ -6,8 +6,13 @@
#include "common/common_types.h"
#include "core/memory.h"
namespace Memory {
const u32 PAGE_MASK = PAGE_SIZE - 1;
const int PAGE_BITS = 12;
void InitMemoryMap();
/**
@ -26,4 +31,6 @@ void MapMemoryRegion(VAddr base, u32 size, u8* target);
*/
void MapIoRegion(VAddr base, u32 size);
void UnmapRegion(VAddr base, u32 size);
}