2020-04-05 21:28:31 +02:00
|
|
|
// Copyright 2020 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
|
|
|
|
#include "common/alignment.h"
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/common_types.h"
|
|
|
|
#include "common/scope_exit.h"
|
2022-01-12 06:57:01 +01:00
|
|
|
#include "core/core.h"
|
|
|
|
#include "core/device_memory.h"
|
2021-02-13 02:38:40 +01:00
|
|
|
#include "core/hle/kernel/k_memory_manager.h"
|
2021-02-13 02:26:01 +01:00
|
|
|
#include "core/hle/kernel/k_page_linked_list.h"
|
2021-02-13 00:43:01 +01:00
|
|
|
#include "core/hle/kernel/svc_results.h"
|
2020-04-05 21:28:31 +02:00
|
|
|
|
2021-02-13 02:38:40 +01:00
|
|
|
namespace Kernel {
|
2020-04-05 21:28:31 +02:00
|
|
|
|
2022-01-12 06:57:01 +01:00
|
|
|
KMemoryManager::KMemoryManager(Core::System& system_) : system{system_} {}
|
|
|
|
|
2021-02-13 02:38:40 +01:00
|
|
|
std::size_t KMemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto size{end_address - start_address};
|
2020-04-05 21:28:31 +02:00
|
|
|
|
|
|
|
// Calculate metadata sizes
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto ref_count_size{(size / PageSize) * sizeof(u16)};
|
|
|
|
const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)};
|
|
|
|
const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
|
2021-02-13 02:58:31 +01:00
|
|
|
const auto page_heap_size{KPageHeap::CalculateManagementOverheadSize(size)};
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto total_metadata_size{manager_size + page_heap_size};
|
2020-04-05 21:28:31 +02:00
|
|
|
ASSERT(manager_size <= total_metadata_size);
|
|
|
|
ASSERT(Common::IsAligned(total_metadata_size, PageSize));
|
|
|
|
|
|
|
|
// Setup region
|
|
|
|
pool = new_pool;
|
|
|
|
|
|
|
|
// Initialize the manager's KPageHeap
|
|
|
|
heap.Initialize(start_address, size, page_heap_size);
|
|
|
|
|
|
|
|
// Free the memory to the heap
|
|
|
|
heap.Free(start_address, size / PageSize);
|
|
|
|
|
|
|
|
// Update the heap's used size
|
|
|
|
heap.UpdateUsedSize();
|
|
|
|
|
|
|
|
return total_metadata_size;
|
|
|
|
}
|
|
|
|
|
2021-02-13 02:38:40 +01:00
|
|
|
void KMemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
|
2020-04-05 21:28:31 +02:00
|
|
|
ASSERT(pool < Pool::Count);
|
|
|
|
managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
|
|
|
|
}
|
|
|
|
|
2021-02-13 02:38:40 +01:00
|
|
|
VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size_t align_pages,
|
|
|
|
u32 option) {
|
2020-04-05 21:28:31 +02:00
|
|
|
// Early return if we're allocating no pages
|
|
|
|
if (num_pages == 0) {
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lock the pool that we're allocating from
|
2021-02-13 00:29:25 +01:00
|
|
|
const auto [pool, dir] = DecodeOption(option);
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto pool_index{static_cast<std::size_t>(pool)};
|
2020-04-05 21:28:31 +02:00
|
|
|
std::lock_guard lock{pool_locks[pool_index]};
|
|
|
|
|
|
|
|
// Choose a heap based on our page size request
|
2021-02-13 02:58:31 +01:00
|
|
|
const s32 heap_index{KPageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
|
2020-04-05 21:28:31 +02:00
|
|
|
|
|
|
|
// Loop, trying to iterate from each block
|
|
|
|
// TODO (bunnei): Support multiple managers
|
|
|
|
Impl& chosen_manager{managers[pool_index]};
|
2021-02-12 03:48:02 +01:00
|
|
|
VAddr allocated_block{chosen_manager.AllocateBlock(heap_index, false)};
|
2020-04-05 21:28:31 +02:00
|
|
|
|
|
|
|
// If we failed to allocate, quit now
|
|
|
|
if (!allocated_block) {
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we allocated more than we need, free some
|
2021-02-13 02:58:31 +01:00
|
|
|
const auto allocated_pages{KPageHeap::GetBlockNumPages(heap_index)};
|
2020-04-05 21:28:31 +02:00
|
|
|
if (allocated_pages > num_pages) {
|
|
|
|
chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
|
|
|
}
|
|
|
|
|
|
|
|
return allocated_block;
|
|
|
|
}
|
|
|
|
|
2021-02-13 02:38:40 +01:00
|
|
|
ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
2022-01-12 06:57:01 +01:00
|
|
|
Direction dir, u32 heap_fill_value) {
|
2020-04-05 21:28:31 +02:00
|
|
|
ASSERT(page_list.GetNumPages() == 0);
|
|
|
|
|
|
|
|
// Early return if we're allocating no pages
|
|
|
|
if (num_pages == 0) {
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-05 21:28:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lock the pool that we're allocating from
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto pool_index{static_cast<std::size_t>(pool)};
|
2020-04-05 21:28:31 +02:00
|
|
|
std::lock_guard lock{pool_locks[pool_index]};
|
|
|
|
|
|
|
|
// Choose a heap based on our page size request
|
2021-02-13 02:58:31 +01:00
|
|
|
const s32 heap_index{KPageHeap::GetBlockIndex(num_pages)};
|
2020-04-05 21:28:31 +02:00
|
|
|
if (heap_index < 0) {
|
2021-02-13 00:43:01 +01:00
|
|
|
return ResultOutOfMemory;
|
2020-04-05 21:28:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO (bunnei): Support multiple managers
|
|
|
|
Impl& chosen_manager{managers[pool_index]};
|
|
|
|
|
|
|
|
// Ensure that we don't leave anything un-freed
|
|
|
|
auto group_guard = detail::ScopeExit([&] {
|
|
|
|
for (const auto& it : page_list.Nodes()) {
|
2020-06-17 13:30:06 +02:00
|
|
|
const auto min_num_pages{std::min<size_t>(
|
2020-04-05 21:28:31 +02:00
|
|
|
it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
|
2020-05-03 19:29:06 +02:00
|
|
|
chosen_manager.Free(it.GetAddress(), min_num_pages);
|
2020-04-05 21:28:31 +02:00
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Keep allocating until we've allocated all our pages
|
|
|
|
for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
|
2021-02-13 02:58:31 +01:00
|
|
|
const auto pages_per_alloc{KPageHeap::GetBlockNumPages(index)};
|
2020-04-05 21:28:31 +02:00
|
|
|
|
|
|
|
while (num_pages >= pages_per_alloc) {
|
|
|
|
// Allocate a block
|
2021-02-12 03:48:02 +01:00
|
|
|
VAddr allocated_block{chosen_manager.AllocateBlock(index, false)};
|
2020-04-17 06:59:08 +02:00
|
|
|
if (!allocated_block) {
|
2020-04-05 21:28:31 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Safely add it to our group
|
|
|
|
{
|
|
|
|
auto block_guard = detail::ScopeExit(
|
|
|
|
[&] { chosen_manager.Free(allocated_block, pages_per_alloc); });
|
|
|
|
|
|
|
|
if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)};
|
|
|
|
result.IsError()) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
block_guard.Cancel();
|
|
|
|
}
|
|
|
|
|
|
|
|
num_pages -= pages_per_alloc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-12 06:57:01 +01:00
|
|
|
// Clear allocated memory.
|
|
|
|
for (const auto& it : page_list.Nodes()) {
|
|
|
|
std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value,
|
|
|
|
it.GetSize());
|
|
|
|
}
|
|
|
|
|
2020-04-05 21:28:31 +02:00
|
|
|
// Only succeed if we allocated as many pages as we wanted
|
|
|
|
if (num_pages) {
|
2021-02-13 00:43:01 +01:00
|
|
|
return ResultOutOfMemory;
|
2020-04-05 21:28:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// We succeeded!
|
|
|
|
group_guard.Cancel();
|
2022-01-12 06:57:01 +01:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-05 21:28:31 +02:00
|
|
|
}
|
|
|
|
|
2021-02-13 02:38:40 +01:00
|
|
|
ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
2022-01-12 06:57:01 +01:00
|
|
|
Direction dir, u32 heap_fill_value) {
|
2020-04-05 21:28:31 +02:00
|
|
|
// Early return if we're freeing no pages
|
|
|
|
if (!num_pages) {
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-05 21:28:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lock the pool that we're freeing from
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto pool_index{static_cast<std::size_t>(pool)};
|
2020-04-05 21:28:31 +02:00
|
|
|
std::lock_guard lock{pool_locks[pool_index]};
|
|
|
|
|
|
|
|
// TODO (bunnei): Support multiple managers
|
|
|
|
Impl& chosen_manager{managers[pool_index]};
|
|
|
|
|
|
|
|
// Free all of the pages
|
|
|
|
for (const auto& it : page_list.Nodes()) {
|
2020-06-17 13:30:06 +02:00
|
|
|
const auto min_num_pages{std::min<size_t>(
|
2020-04-05 21:28:31 +02:00
|
|
|
it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
|
2020-05-03 19:29:06 +02:00
|
|
|
chosen_manager.Free(it.GetAddress(), min_num_pages);
|
2020-04-05 21:28:31 +02:00
|
|
|
}
|
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-05 21:28:31 +02:00
|
|
|
}
|
|
|
|
|
2021-02-19 02:55:46 +01:00
|
|
|
std::size_t KMemoryManager::Impl::CalculateManagementOverheadSize(std::size_t region_size) {
|
|
|
|
const std::size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
|
|
|
|
const std::size_t optimize_map_size =
|
|
|
|
(Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
|
|
|
|
Common::BitSize<u64>()) *
|
|
|
|
sizeof(u64);
|
|
|
|
const std::size_t manager_meta_size =
|
|
|
|
Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
|
|
|
|
const std::size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size);
|
|
|
|
return manager_meta_size + page_heap_size;
|
|
|
|
}
|
|
|
|
|
2021-02-13 02:38:40 +01:00
|
|
|
} // namespace Kernel
|