2019-02-19 04:46:06 +01:00
|
|
|
// Copyright 2018 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <optional>
|
|
|
|
#include <tuple>
|
|
|
|
#include <vector>
|
2020-01-06 21:14:41 +01:00
|
|
|
|
2019-02-19 04:46:06 +01:00
|
|
|
#include "common/alignment.h"
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/common_types.h"
|
|
|
|
#include "common/logging/log.h"
|
|
|
|
#include "video_core/renderer_vulkan/vk_device.h"
|
|
|
|
#include "video_core/renderer_vulkan/vk_memory_manager.h"
|
2020-03-27 05:33:21 +01:00
|
|
|
#include "video_core/renderer_vulkan/wrapper.h"
|
2019-02-19 04:46:06 +01:00
|
|
|
|
|
|
|
namespace Vulkan {
|
|
|
|
|
2020-01-06 21:14:41 +01:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
u64 GetAllocationChunkSize(u64 required_size) {
|
|
|
|
static constexpr u64 sizes[] = {16ULL << 20, 32ULL << 20, 64ULL << 20, 128ULL << 20};
|
|
|
|
auto it = std::lower_bound(std::begin(sizes), std::end(sizes), required_size);
|
|
|
|
return it != std::end(sizes) ? *it : Common::AlignUp(required_size, 256ULL << 20);
|
|
|
|
}
|
|
|
|
|
|
|
|
} // Anonymous namespace
|
2019-02-19 04:46:06 +01:00
|
|
|
|
|
|
|
class VKMemoryAllocation final {
|
|
|
|
public:
|
|
|
|
explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory,
|
2020-03-27 05:33:21 +01:00
|
|
|
VkMemoryPropertyFlags properties, u64 allocation_size, u32 type)
|
|
|
|
: device{device}, memory{std::move(memory)}, properties{properties},
|
|
|
|
allocation_size{allocation_size}, shifted_type{ShiftType(type)} {}
|
2019-02-19 04:46:06 +01:00
|
|
|
|
2020-03-27 05:33:21 +01:00
|
|
|
VKMemoryCommit Commit(VkDeviceSize commit_size, VkDeviceSize alignment) {
|
2020-01-06 21:14:41 +01:00
|
|
|
auto found = TryFindFreeSection(free_iterator, allocation_size,
|
|
|
|
static_cast<u64>(commit_size), static_cast<u64>(alignment));
|
2019-02-19 04:46:06 +01:00
|
|
|
if (!found) {
|
|
|
|
found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size),
|
|
|
|
static_cast<u64>(alignment));
|
|
|
|
if (!found) {
|
|
|
|
// Signal out of memory, it'll try to do more allocations.
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
2020-01-06 21:14:41 +01:00
|
|
|
auto commit = std::make_unique<VKMemoryCommitImpl>(device, this, memory, *found,
|
2019-02-19 04:46:06 +01:00
|
|
|
*found + commit_size);
|
|
|
|
commits.push_back(commit.get());
|
|
|
|
|
|
|
|
// Last commit's address is highly probable to be free.
|
|
|
|
free_iterator = *found + commit_size;
|
|
|
|
|
|
|
|
return commit;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Free(const VKMemoryCommitImpl* commit) {
|
|
|
|
ASSERT(commit);
|
2020-01-06 21:14:41 +01:00
|
|
|
|
|
|
|
const auto it = std::find(std::begin(commits), std::end(commits), commit);
|
2019-02-19 04:46:06 +01:00
|
|
|
if (it == commits.end()) {
|
2020-01-06 21:14:41 +01:00
|
|
|
UNREACHABLE_MSG("Freeing unallocated commit!");
|
2019-02-19 04:46:06 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
commits.erase(it);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns whether this allocation is compatible with the arguments.
|
2020-03-27 05:33:21 +01:00
|
|
|
bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const {
|
|
|
|
return (wanted_properties & properties) && (type_mask & shifted_type) != 0;
|
2019-02-19 04:46:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
static constexpr u32 ShiftType(u32 type) {
|
|
|
|
return 1U << type;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A memory allocator, it may return a free region between "start" and "end" with the solicited
|
2020-01-06 21:14:41 +01:00
|
|
|
/// requirements.
|
2019-02-19 04:46:06 +01:00
|
|
|
std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const {
|
2020-01-06 21:14:41 +01:00
|
|
|
u64 iterator = Common::AlignUp(start, alignment);
|
|
|
|
while (iterator + size <= end) {
|
|
|
|
const u64 try_left = iterator;
|
2019-02-19 04:46:06 +01:00
|
|
|
const u64 try_right = try_left + size;
|
|
|
|
|
|
|
|
bool overlap = false;
|
|
|
|
for (const auto& commit : commits) {
|
|
|
|
const auto [commit_left, commit_right] = commit->interval;
|
|
|
|
if (try_left < commit_right && commit_left < try_right) {
|
|
|
|
// There's an overlap, continue the search where the overlapping commit ends.
|
2020-01-06 21:14:41 +01:00
|
|
|
iterator = Common::AlignUp(commit_right, alignment);
|
2019-02-19 04:46:06 +01:00
|
|
|
overlap = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!overlap) {
|
|
|
|
// A free address has been found.
|
|
|
|
return try_left;
|
|
|
|
}
|
|
|
|
}
|
2020-01-06 21:14:41 +01:00
|
|
|
|
2019-02-19 04:46:06 +01:00
|
|
|
// No free regions where found, return an empty optional.
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
|
2020-03-27 05:33:21 +01:00
|
|
|
const VKDevice& device; ///< Vulkan device.
|
|
|
|
const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
|
|
|
|
const VkMemoryPropertyFlags properties; ///< Vulkan properties.
|
|
|
|
const u64 allocation_size; ///< Size of this allocation.
|
|
|
|
const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
|
2019-02-19 04:46:06 +01:00
|
|
|
|
|
|
|
/// Hints where the next free region is likely going to be.
|
|
|
|
u64 free_iterator{};
|
|
|
|
|
|
|
|
/// Stores all commits done from this allocation.
|
|
|
|
std::vector<const VKMemoryCommitImpl*> commits;
|
|
|
|
};
|
|
|
|
|
|
|
|
VKMemoryManager::VKMemoryManager(const VKDevice& device)
|
2020-03-27 05:33:21 +01:00
|
|
|
: device{device}, properties{device.GetPhysical().GetMemoryProperties()},
|
2020-01-06 21:14:41 +01:00
|
|
|
is_memory_unified{GetMemoryUnified(properties)} {}
|
2019-02-19 04:46:06 +01:00
|
|
|
|
|
|
|
VKMemoryManager::~VKMemoryManager() = default;
|
|
|
|
|
2020-03-27 05:33:21 +01:00
|
|
|
VKMemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements,
|
2020-01-06 21:14:41 +01:00
|
|
|
bool host_visible) {
|
|
|
|
const u64 chunk_size = GetAllocationChunkSize(requirements.size);
|
2019-02-19 04:46:06 +01:00
|
|
|
|
|
|
|
// When a host visible commit is asked, search for host visible and coherent, otherwise search
|
|
|
|
// for a fast device local type.
|
2020-03-27 05:33:21 +01:00
|
|
|
const VkMemoryPropertyFlags wanted_properties =
|
|
|
|
host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
|
|
|
|
: VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
2019-02-19 04:46:06 +01:00
|
|
|
|
2020-01-06 21:14:41 +01:00
|
|
|
if (auto commit = TryAllocCommit(requirements, wanted_properties)) {
|
2019-02-19 04:46:06 +01:00
|
|
|
return commit;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Commit has failed, allocate more memory.
|
2020-01-06 21:14:41 +01:00
|
|
|
if (!AllocMemory(wanted_properties, requirements.memoryTypeBits, chunk_size)) {
|
|
|
|
// TODO(Rodrigo): Handle these situations in some way like flushing to guest memory.
|
|
|
|
// Allocation has failed, panic.
|
|
|
|
UNREACHABLE_MSG("Ran out of VRAM!");
|
|
|
|
return {};
|
2019-02-19 04:46:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Commit again, this time it won't fail since there's a fresh allocation above. If it does,
|
|
|
|
// there's a bug.
|
2020-01-06 21:14:41 +01:00
|
|
|
auto commit = TryAllocCommit(requirements, wanted_properties);
|
2019-02-19 04:46:06 +01:00
|
|
|
ASSERT(commit);
|
|
|
|
return commit;
|
|
|
|
}
|
|
|
|
|
2020-03-27 05:33:21 +01:00
|
|
|
VKMemoryCommit VKMemoryManager::Commit(const vk::Buffer& buffer, bool host_visible) {
|
|
|
|
auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible);
|
|
|
|
buffer.BindMemory(commit->GetMemory(), commit->GetOffset());
|
2019-02-19 04:46:06 +01:00
|
|
|
return commit;
|
|
|
|
}
|
|
|
|
|
2020-03-27 05:33:21 +01:00
|
|
|
VKMemoryCommit VKMemoryManager::Commit(const vk::Image& image, bool host_visible) {
|
|
|
|
auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible);
|
|
|
|
image.BindMemory(commit->GetMemory(), commit->GetOffset());
|
2019-02-19 04:46:06 +01:00
|
|
|
return commit;
|
|
|
|
}
|
|
|
|
|
2020-03-27 05:33:21 +01:00
|
|
|
bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask,
|
2019-02-19 04:46:06 +01:00
|
|
|
u64 size) {
|
2020-01-06 21:14:41 +01:00
|
|
|
const u32 type = [&] {
|
|
|
|
for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
|
|
|
|
const auto flags = properties.memoryTypes[type_index].propertyFlags;
|
2019-02-19 04:46:06 +01:00
|
|
|
if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) {
|
|
|
|
// The type matches in type and in the wanted properties.
|
|
|
|
return type_index;
|
|
|
|
}
|
|
|
|
}
|
2020-01-06 21:14:41 +01:00
|
|
|
UNREACHABLE_MSG("Couldn't find a compatible memory type!");
|
|
|
|
return 0U;
|
2019-02-19 04:46:06 +01:00
|
|
|
}();
|
|
|
|
|
|
|
|
// Try to allocate found type.
|
2020-03-27 05:33:21 +01:00
|
|
|
VkMemoryAllocateInfo memory_ai;
|
|
|
|
memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
|
|
|
memory_ai.pNext = nullptr;
|
|
|
|
memory_ai.allocationSize = size;
|
|
|
|
memory_ai.memoryTypeIndex = type;
|
|
|
|
|
|
|
|
vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory(memory_ai);
|
|
|
|
if (!memory) {
|
|
|
|
LOG_CRITICAL(Render_Vulkan, "Device allocation failed!");
|
2019-02-19 04:46:06 +01:00
|
|
|
return false;
|
|
|
|
}
|
2020-03-27 05:33:21 +01:00
|
|
|
|
|
|
|
allocations.push_back(std::make_unique<VKMemoryAllocation>(device, std::move(memory),
|
|
|
|
wanted_properties, size, type));
|
2019-02-19 04:46:06 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-03-27 05:33:21 +01:00
|
|
|
VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requirements,
|
|
|
|
VkMemoryPropertyFlags wanted_properties) {
|
2020-01-06 21:14:41 +01:00
|
|
|
for (auto& allocation : allocations) {
|
|
|
|
if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (auto commit = allocation->Commit(requirements.size, requirements.alignment)) {
|
|
|
|
return commit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-03-27 05:33:21 +01:00
|
|
|
bool VKMemoryManager::GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties) {
|
2020-01-06 21:14:41 +01:00
|
|
|
for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) {
|
2020-03-27 05:33:21 +01:00
|
|
|
if (!(properties.memoryHeaps[heap_index].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)) {
|
2019-02-19 04:46:06 +01:00
|
|
|
// Memory is considered unified when heaps are device local only.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-01-06 21:14:41 +01:00
|
|
|
VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
|
2020-03-27 05:33:21 +01:00
|
|
|
const vk::DeviceMemory& memory, u64 begin, u64 end)
|
|
|
|
: device{device}, memory{memory}, interval{begin, end}, allocation{allocation} {}
|
2019-02-19 04:46:06 +01:00
|
|
|
|
|
|
|
VKMemoryCommitImpl::~VKMemoryCommitImpl() {
|
|
|
|
allocation->Free(this);
|
|
|
|
}
|
|
|
|
|
2020-01-06 21:14:41 +01:00
|
|
|
MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const {
|
2020-03-27 05:33:21 +01:00
|
|
|
return MemoryMap{this, memory.Map(interval.first + offset_, size)};
|
2020-01-06 21:14:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void VKMemoryCommitImpl::Unmap() const {
|
2020-03-27 05:33:21 +01:00
|
|
|
memory.Unmap();
|
2020-01-06 21:14:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
MemoryMap VKMemoryCommitImpl::Map() const {
|
|
|
|
return Map(interval.second - interval.first);
|
2019-02-19 04:46:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace Vulkan
|