2020-04-05 21:14:26 +02:00
|
|
|
// Copyright 2020 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2020-05-03 19:01:01 +02:00
|
|
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
|
|
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
2020-04-15 03:50:19 +02:00
|
|
|
|
2020-04-05 21:14:26 +02:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <array>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "common/alignment.h"
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/bit_util.h"
|
|
|
|
#include "common/common_funcs.h"
|
|
|
|
#include "common/common_types.h"
|
|
|
|
#include "core/hle/kernel/memory/memory_types.h"
|
|
|
|
|
|
|
|
namespace Kernel::Memory {
|
|
|
|
|
|
|
|
class PageHeap final : NonCopyable {
|
|
|
|
public:
|
|
|
|
static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto target_pages{std::max(num_pages, align_pages)};
|
2020-04-05 21:14:26 +02:00
|
|
|
for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
2020-04-17 06:59:08 +02:00
|
|
|
if (target_pages <=
|
|
|
|
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
2020-04-05 21:14:26 +02:00
|
|
|
return static_cast<s32>(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
|
|
|
|
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
|
2020-10-15 20:49:45 +02:00
|
|
|
const auto shift_index = static_cast<std::size_t>(i);
|
|
|
|
if (num_pages >=
|
|
|
|
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[shift_index]) / PageSize) {
|
2020-04-05 21:14:26 +02:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static constexpr std::size_t GetBlockSize(std::size_t index) {
|
2020-04-17 06:59:08 +02:00
|
|
|
return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
|
2020-04-05 21:14:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static constexpr std::size_t GetBlockNumPages(std::size_t index) {
|
|
|
|
return GetBlockSize(index) / PageSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
static constexpr std::size_t NumMemoryBlockPageShifts{7};
|
|
|
|
static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
|
2020-04-17 06:59:08 +02:00
|
|
|
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
|
|
|
|
};
|
2020-04-05 21:14:26 +02:00
|
|
|
|
|
|
|
class Block final : NonCopyable {
|
|
|
|
private:
|
|
|
|
class Bitmap final : NonCopyable {
|
|
|
|
public:
|
|
|
|
static constexpr std::size_t MaxDepth{4};
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::array<u64*, MaxDepth> bit_storages{};
|
|
|
|
std::size_t num_bits{};
|
|
|
|
std::size_t used_depths{};
|
|
|
|
|
|
|
|
public:
|
|
|
|
constexpr Bitmap() = default;
|
|
|
|
|
|
|
|
constexpr std::size_t GetNumBits() const {
|
|
|
|
return num_bits;
|
|
|
|
}
|
|
|
|
constexpr s32 GetHighestDepthIndex() const {
|
|
|
|
return static_cast<s32>(used_depths) - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr u64* Initialize(u64* storage, std::size_t size) {
|
|
|
|
//* Initially, everything is un-set
|
|
|
|
num_bits = 0;
|
|
|
|
|
|
|
|
// Calculate the needed bitmap depth
|
|
|
|
used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
|
|
|
|
ASSERT(used_depths <= MaxDepth);
|
|
|
|
|
|
|
|
// Set the bitmap pointers
|
|
|
|
for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
|
2020-10-15 20:49:45 +02:00
|
|
|
bit_storages[static_cast<std::size_t>(depth)] = storage;
|
2020-04-05 21:14:26 +02:00
|
|
|
size = Common::AlignUp(size, 64) / 64;
|
|
|
|
storage += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return storage;
|
|
|
|
}
|
|
|
|
|
|
|
|
s64 FindFreeBlock() const {
|
|
|
|
uintptr_t offset{};
|
|
|
|
s32 depth{};
|
|
|
|
|
|
|
|
do {
|
2020-10-15 20:49:45 +02:00
|
|
|
const u64 v{bit_storages[static_cast<std::size_t>(depth)][offset]};
|
2020-04-05 21:14:26 +02:00
|
|
|
if (v == 0) {
|
|
|
|
// Non-zero depth indicates that a previous level had a free block
|
|
|
|
ASSERT(depth == 0);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
offset = offset * 64 + Common::CountTrailingZeroes64(v);
|
|
|
|
++depth;
|
|
|
|
} while (depth < static_cast<s32>(used_depths));
|
|
|
|
|
|
|
|
return static_cast<s64>(offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr void SetBit(std::size_t offset) {
|
|
|
|
SetBit(GetHighestDepthIndex(), offset);
|
|
|
|
num_bits++;
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr void ClearBit(std::size_t offset) {
|
|
|
|
ClearBit(GetHighestDepthIndex(), offset);
|
|
|
|
num_bits--;
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr bool ClearRange(std::size_t offset, std::size_t count) {
|
|
|
|
const s32 depth{GetHighestDepthIndex()};
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto bit_ind{offset / 64};
|
2020-10-15 20:49:45 +02:00
|
|
|
u64* bits{bit_storages[static_cast<std::size_t>(depth)]};
|
2020-04-05 21:14:26 +02:00
|
|
|
if (count < 64) {
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto shift{offset % 64};
|
2020-04-05 21:14:26 +02:00
|
|
|
ASSERT(shift + count <= 64);
|
|
|
|
// Check that all the bits are set
|
2020-04-17 06:59:08 +02:00
|
|
|
const u64 mask{((1ULL << count) - 1) << shift};
|
2020-04-05 21:14:26 +02:00
|
|
|
u64 v{bits[bit_ind]};
|
|
|
|
if ((v & mask) != mask) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear the bits
|
|
|
|
v &= ~mask;
|
|
|
|
bits[bit_ind] = v;
|
|
|
|
if (v == 0) {
|
|
|
|
ClearBit(depth - 1, bit_ind);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ASSERT(offset % 64 == 0);
|
|
|
|
ASSERT(count % 64 == 0);
|
|
|
|
// Check that all the bits are set
|
|
|
|
std::size_t remaining{count};
|
|
|
|
std::size_t i = 0;
|
|
|
|
do {
|
|
|
|
if (bits[bit_ind + i++] != ~u64(0)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
remaining -= 64;
|
|
|
|
} while (remaining > 0);
|
|
|
|
|
|
|
|
// Clear the bits
|
|
|
|
remaining = count;
|
|
|
|
i = 0;
|
|
|
|
do {
|
|
|
|
bits[bit_ind + i] = 0;
|
|
|
|
ClearBit(depth - 1, bit_ind + i);
|
|
|
|
i++;
|
|
|
|
remaining -= 64;
|
|
|
|
} while (remaining > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
num_bits -= count;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
constexpr void SetBit(s32 depth, std::size_t offset) {
|
|
|
|
while (depth >= 0) {
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto ind{offset / 64};
|
|
|
|
const auto which{offset % 64};
|
|
|
|
const u64 mask{1ULL << which};
|
2020-04-05 21:14:26 +02:00
|
|
|
|
2020-10-15 20:49:45 +02:00
|
|
|
u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])};
|
2020-04-05 21:14:26 +02:00
|
|
|
const u64 v{*bit};
|
|
|
|
ASSERT((v & mask) == 0);
|
|
|
|
*bit = v | mask;
|
2020-10-15 20:49:45 +02:00
|
|
|
if (v != 0) {
|
2020-04-05 21:14:26 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
offset = ind;
|
|
|
|
depth--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr void ClearBit(s32 depth, std::size_t offset) {
|
|
|
|
while (depth >= 0) {
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto ind{offset / 64};
|
|
|
|
const auto which{offset % 64};
|
|
|
|
const u64 mask{1ULL << which};
|
2020-04-05 21:14:26 +02:00
|
|
|
|
2020-10-15 20:49:45 +02:00
|
|
|
u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])};
|
2020-04-05 21:14:26 +02:00
|
|
|
u64 v{*bit};
|
|
|
|
ASSERT((v & mask) != 0);
|
|
|
|
v &= ~mask;
|
|
|
|
*bit = v;
|
2020-10-15 20:49:45 +02:00
|
|
|
if (v != 0) {
|
2020-04-05 21:14:26 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
offset = ind;
|
|
|
|
depth--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
static constexpr s32 GetRequiredDepth(std::size_t region_size) {
|
|
|
|
s32 depth = 0;
|
|
|
|
while (true) {
|
|
|
|
region_size /= 64;
|
|
|
|
depth++;
|
|
|
|
if (region_size == 0) {
|
|
|
|
return depth;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size) {
|
|
|
|
std::size_t overhead_bits = 0;
|
|
|
|
for (s32 depth{GetRequiredDepth(region_size) - 1}; depth >= 0; depth--) {
|
|
|
|
region_size = Common::AlignUp(region_size, 64) / 64;
|
|
|
|
overhead_bits += region_size;
|
|
|
|
}
|
|
|
|
return overhead_bits * sizeof(u64);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
private:
|
|
|
|
Bitmap bitmap;
|
|
|
|
VAddr heap_address{};
|
|
|
|
uintptr_t end_offset{};
|
|
|
|
std::size_t block_shift{};
|
|
|
|
std::size_t next_block_shift{};
|
|
|
|
|
|
|
|
public:
|
|
|
|
constexpr Block() = default;
|
|
|
|
|
|
|
|
constexpr std::size_t GetShift() const {
|
|
|
|
return block_shift;
|
|
|
|
}
|
|
|
|
constexpr std::size_t GetNextShift() const {
|
|
|
|
return next_block_shift;
|
|
|
|
}
|
|
|
|
constexpr std::size_t GetSize() const {
|
2020-04-17 06:59:08 +02:00
|
|
|
return static_cast<std::size_t>(1) << GetShift();
|
2020-04-05 21:14:26 +02:00
|
|
|
}
|
|
|
|
constexpr std::size_t GetNumPages() const {
|
|
|
|
return GetSize() / PageSize;
|
|
|
|
}
|
|
|
|
constexpr std::size_t GetNumFreeBlocks() const {
|
|
|
|
return bitmap.GetNumBits();
|
|
|
|
}
|
|
|
|
constexpr std::size_t GetNumFreePages() const {
|
|
|
|
return GetNumFreeBlocks() * GetNumPages();
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
|
|
|
|
u64* bit_storage) {
|
|
|
|
// Set shifts
|
|
|
|
block_shift = bs;
|
|
|
|
next_block_shift = nbs;
|
|
|
|
|
|
|
|
// Align up the address
|
|
|
|
VAddr end{addr + size};
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
|
|
|
|
: (1ULL << block_shift)};
|
2020-04-05 21:14:26 +02:00
|
|
|
addr = Common::AlignDown((addr), align);
|
|
|
|
end = Common::AlignUp((end), align);
|
|
|
|
|
|
|
|
heap_address = addr;
|
2020-04-17 06:59:08 +02:00
|
|
|
end_offset = (end - addr) / (1ULL << block_shift);
|
2020-04-05 21:14:26 +02:00
|
|
|
return bitmap.Initialize(bit_storage, end_offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr VAddr PushBlock(VAddr address) {
|
|
|
|
// Set the bit for the free block
|
|
|
|
std::size_t offset{(address - heap_address) >> GetShift()};
|
|
|
|
bitmap.SetBit(offset);
|
|
|
|
|
|
|
|
// If we have a next shift, try to clear the blocks below and return the address
|
|
|
|
if (GetNextShift()) {
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto diff{1ULL << (GetNextShift() - GetShift())};
|
2020-04-05 21:14:26 +02:00
|
|
|
offset = Common::AlignDown(offset, diff);
|
|
|
|
if (bitmap.ClearRange(offset, diff)) {
|
|
|
|
return heap_address + (offset << GetShift());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We couldn't coalesce, or we're already as big as possible
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
VAddr PopBlock() {
|
|
|
|
// Find a free block
|
|
|
|
const s64 soffset{bitmap.FindFreeBlock()};
|
|
|
|
if (soffset < 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto offset{static_cast<std::size_t>(soffset)};
|
2020-04-05 21:14:26 +02:00
|
|
|
|
|
|
|
// Update our tracking and return it
|
|
|
|
bitmap.ClearBit(offset);
|
|
|
|
return heap_address + (offset << GetShift());
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size,
|
|
|
|
std::size_t cur_block_shift,
|
|
|
|
std::size_t next_block_shift) {
|
2020-04-17 06:59:08 +02:00
|
|
|
const auto cur_block_size{(1ULL << cur_block_shift)};
|
|
|
|
const auto next_block_size{(1ULL << next_block_shift)};
|
|
|
|
const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
|
2020-04-05 21:14:26 +02:00
|
|
|
return Bitmap::CalculateMetadataOverheadSize(
|
|
|
|
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
public:
|
|
|
|
PageHeap() = default;
|
|
|
|
|
|
|
|
constexpr VAddr GetAddress() const {
|
|
|
|
return heap_address;
|
|
|
|
}
|
|
|
|
constexpr std::size_t GetSize() const {
|
|
|
|
return heap_size;
|
|
|
|
}
|
|
|
|
constexpr VAddr GetEndAddress() const {
|
|
|
|
return GetAddress() + GetSize();
|
|
|
|
}
|
|
|
|
constexpr std::size_t GetPageOffset(VAddr block) const {
|
|
|
|
return (block - GetAddress()) / PageSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
|
|
|
|
VAddr AllocateBlock(s32 index);
|
|
|
|
void Free(VAddr addr, std::size_t num_pages);
|
|
|
|
|
|
|
|
void UpdateUsedSize() {
|
|
|
|
used_size = heap_size - (GetNumFreePages() * PageSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::size_t CalculateMetadataOverheadSize(std::size_t region_size);
|
|
|
|
|
|
|
|
private:
|
|
|
|
constexpr std::size_t GetNumFreePages() const {
|
|
|
|
std::size_t num_free{};
|
|
|
|
|
|
|
|
for (const auto& block : blocks) {
|
|
|
|
num_free += block.GetNumFreePages();
|
|
|
|
}
|
|
|
|
|
|
|
|
return num_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
void FreeBlock(VAddr block, s32 index);
|
|
|
|
|
|
|
|
VAddr heap_address{};
|
|
|
|
std::size_t heap_size{};
|
|
|
|
std::size_t used_size{};
|
|
|
|
std::array<Block, NumMemoryBlockPageShifts> blocks{};
|
|
|
|
std::vector<u64> metadata;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace Kernel::Memory
|