3
0
Fork 0
forked from suyu/suyu

hle: Fix QueryMemory response for MemoryInfo.

This commit is contained in:
bunnei 2017-10-19 23:00:46 -04:00
parent 716e5cf070
commit dcd6bb82f7
7 changed files with 34 additions and 152 deletions

View file

@ -133,25 +133,13 @@ void Wrap() {
Memory::Write64(PARAM(0), memory_info.base_address);
Memory::Write64(PARAM(0) + 8, memory_info.size);
Memory::Write64(PARAM(0) + 16, memory_info.permission);
Memory::Write64(PARAM(0) + 24, memory_info.state);
Memory::Write32(PARAM(0) + 16, memory_info.type);
Memory::Write32(PARAM(0) + 20, memory_info.attributes);
Memory::Write32(PARAM(0) + 24, memory_info.permission);
FuncReturn(retval);
}
template <ResultCode func(MemoryInfo*, PageInfo*, Kernel::Handle, u32)>
void Wrap() {
MemoryInfo memory_info = {};
PageInfo page_info = {};
u32 retval = func(&memory_info, &page_info, PARAM(2), PARAM(3)).raw;
Core::CPU().SetReg(1, memory_info.base_address);
Core::CPU().SetReg(2, memory_info.size);
Core::CPU().SetReg(3, memory_info.permission);
Core::CPU().SetReg(4, memory_info.state);
Core::CPU().SetReg(5, page_info.flags);
FuncReturn(retval);
}
template <ResultCode func(s32*, u32)>
void Wrap() {
s32 param_1 = 0;

View file

@ -96,75 +96,9 @@ MemoryRegionInfo* GetMemoryRegion(MemoryRegion region) {
}
void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mapping) {
using namespace Memory;
struct MemoryArea {
VAddr vaddr_base;
PAddr paddr_base;
u32 size;
};
// The order of entries in this array is important. The VRAM and IO VAddr ranges overlap, and
// VRAM must be tried first.
static constexpr MemoryArea memory_areas[] = {
{VRAM_VADDR, VRAM_PADDR, VRAM_SIZE},
{IO_AREA_VADDR, IO_AREA_PADDR, IO_AREA_SIZE},
{DSP_RAM_VADDR, DSP_RAM_PADDR, DSP_RAM_SIZE},
{N3DS_EXTRA_RAM_VADDR, N3DS_EXTRA_RAM_PADDR, N3DS_EXTRA_RAM_SIZE - 0x20000},
};
VAddr mapping_limit = mapping.address + mapping.size;
if (mapping_limit < mapping.address) {
LOG_CRITICAL(Loader, "Mapping size overflowed: address=0x%08" PRIX32 " size=0x%" PRIX32,
mapping.address, mapping.size);
return;
}
auto area =
std::find_if(std::begin(memory_areas), std::end(memory_areas), [&](const auto& area) {
return mapping.address >= area.vaddr_base &&
mapping_limit <= area.vaddr_base + area.size;
});
if (area == std::end(memory_areas)) {
LOG_ERROR(Loader, "Unhandled special mapping: address=0x%08" PRIX32 " size=0x%" PRIX32
" read_only=%d unk_flag=%d",
mapping.address, mapping.size, mapping.read_only, mapping.unk_flag);
return;
}
u32 offset_into_region = mapping.address - area->vaddr_base;
if (area->paddr_base == IO_AREA_PADDR) {
LOG_ERROR(Loader, "MMIO mappings are not supported yet. phys_addr=0x%08" PRIX32,
area->paddr_base + offset_into_region);
return;
}
u8* target_pointer = Memory::GetPhysicalPointer(area->paddr_base + offset_into_region);
// TODO(yuriks): This flag seems to have some other effect, but it's unknown what
MemoryState memory_state = mapping.unk_flag ? MemoryState::Static : MemoryState::IO;
auto vma =
address_space.MapBackingMemory(mapping.address, target_pointer, mapping.size, memory_state)
.Unwrap();
address_space.Reprotect(vma,
mapping.read_only ? VMAPermission::Read : VMAPermission::ReadWrite);
}
void MapSharedPages(VMManager& address_space) {
auto cfg_mem_vma = address_space
.MapBackingMemory(Memory::CONFIG_MEMORY_VADDR,
reinterpret_cast<u8*>(&ConfigMem::config_mem),
Memory::CONFIG_MEMORY_SIZE, MemoryState::Shared)
.Unwrap();
address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
auto shared_page_vma = address_space
.MapBackingMemory(Memory::SHARED_PAGE_VADDR,
reinterpret_cast<u8*>(&SharedPage::shared_page),
Memory::SHARED_PAGE_SIZE, MemoryState::Shared)
.Unwrap();
address_space.Reprotect(shared_page_vma, VMAPermission::Read);
}
} // namespace Kernel

View file

@ -117,7 +117,7 @@ void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) {
vm_manager
.MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size,
std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size,
MemoryState::Locked)
MemoryState::Heap)
.Unwrap();
misc_memory_used += stack_size;
memory_region->used += stack_size;
@ -148,7 +148,7 @@ void Process::LoadModule(SharedPtr<CodeSet> module_, VAddr base_addr) {
};
// Map CodeSet segments
MapSegment(module_->code, VMAPermission::ReadWrite, MemoryState::Private);
MapSegment(module_->code, VMAPermission::ReadExecute, MemoryState::Code);
MapSegment(module_->rodata, VMAPermission::Read, MemoryState::Static);
MapSegment(module_->data, VMAPermission::ReadWrite, MemoryState::Static);
}
@ -193,7 +193,7 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per
ASSERT(heap_end - heap_start == heap_memory->size());
CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start,
size, MemoryState::Private));
size, MemoryState::Heap));
vm_manager.Reprotect(vma, perms);
heap_used += size;
@ -223,40 +223,8 @@ ResultCode Process::HeapFree(VAddr target, u32 size) {
}
ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission perms) {
auto& linheap_memory = memory_region->linear_heap_memory;
VAddr heap_end = GetLinearHeapBase() + (u32)linheap_memory->size();
// Games and homebrew only ever seem to pass 0 here (which lets the kernel decide the address),
// but explicit addresses are also accepted and respected.
if (target == 0) {
target = heap_end;
}
if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || target > heap_end ||
target + size < target) {
return ERR_INVALID_ADDRESS;
}
// Expansion of the linear heap is only allowed if you do an allocation immediately at its
// end. It's possible to free gaps in the middle of the heap and then reallocate them later,
// but expansions are only allowed at the end.
if (target == heap_end) {
linheap_memory->insert(linheap_memory->end(), size, 0);
vm_manager.RefreshMemoryBlockMappings(linheap_memory.get());
}
// TODO(yuriks): As is, this lets processes map memory allocated by other processes from the
// same region. It is unknown if or how the 3DS kernel checks against this.
size_t offset = target - GetLinearHeapBase();
CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size,
MemoryState::Continuous));
vm_manager.Reprotect(vma, perms);
linear_heap_used += size;
memory_region->used += size;
return MakeResult<VAddr>(target);
UNIMPLEMENTED();
return {};
}
ResultCode Process::LinearFree(VAddr target, u32 size) {

View file

@ -446,7 +446,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
// Map the page to the current process' address space.
// TODO(Subv): Find the correct MemoryState for this region.
vm_manager.MapMemoryBlock(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
linheap_memory, offset, Memory::PAGE_SIZE, MemoryState::Static);
linheap_memory, offset, Memory::PAGE_SIZE, MemoryState::ThreadLocalStorage);
}
// Mark the slot as used

View file

@ -40,19 +40,16 @@ enum class VMAPermission : u8 {
};
/// Set of values returned in MemoryInfo.state by svcQueryMemory.
enum class MemoryState : u8 {
enum class MemoryState : u32 {
Free = 0,
Reserved = 1,
IO = 2,
Static = 3,
Code = 4,
Private = 5,
IO = 1,
Normal = 2,
Code = 3,
Static = 4,
Heap = 5,
Shared = 6,
Continuous = 7,
Aliased = 8,
Alias = 9,
AliasCode = 10,
Locked = 11,
Mapped = 6,
ThreadLocalStorage = 12,
};
/**

View file

@ -110,28 +110,22 @@ static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_i
Kernel::Handle process_handle, u64 addr) {
using Kernel::Process;
Kernel::SharedPtr<Process> process = Kernel::g_handle_table.Get<Process>(process_handle);
if (process == nullptr)
if (process == nullptr) {
return ERR_INVALID_HANDLE;
auto vma = process->vm_manager.FindVMA(addr);
if (vma == Kernel::g_current_process->vm_manager.vma_map.end())
{
//return Kernel::ERR_INVALID_ADDRESS;
memory_info->base_address = 0;
memory_info->permission = static_cast<u64>(Kernel::VMAPermission::None);
memory_info->size = 0;
memory_info->state = static_cast<u64>(Kernel::MemoryState::Free);
return RESULT_SUCCESS;
}
memory_info->base_address = vma->second.base;
memory_info->permission = static_cast<u64>(vma->second.permissions);
memory_info->size = vma->second.size;
memory_info->state = static_cast<u64>(vma->second.meminfo_state);
auto vma = process->vm_manager.FindVMA(addr);
memory_info->attributes = 0;
if (vma == Kernel::g_current_process->vm_manager.vma_map.end()) {
memory_info->base_address = 0;
memory_info->permission = static_cast<u32>(Kernel::VMAPermission::None);
memory_info->size = 0;
memory_info->type = static_cast<u32>(Kernel::MemoryState::Free);
} else {
memory_info->base_address = vma->second.base;
memory_info->permission = static_cast<u32>(vma->second.permissions);
memory_info->size = vma->second.size;
memory_info->type = static_cast<u32>(vma->second.meminfo_state);
}
LOG_TRACE(Kernel_SVC, "called process=0x%08X addr=%llx", process_handle, addr);
return RESULT_SUCCESS;
}

View file

@ -12,8 +12,9 @@
struct MemoryInfo {
u64 base_address;
u64 size;
u64 permission;
u64 state;
u32 type;
u32 attributes;
u32 permission;
};
struct PageInfo {