1
0
Fork 0
forked from suyu/suyu

renderer_vulkan: Make unconditional use of VK_KHR_timeline_semaphore

This reworks how host<->device synchronization works on the Vulkan
backend. Instead of "protecting" resources with a fence and signalling
these as free when the fence is known to be signalled by the host GPU,
use timeline semaphores.

Vulkan timeline semaphores allow use to work on a subset of D3D12
fences. As far as we are concerned, timeline semaphores are a value set
by the host or the device that can be waited by either of them.

Taking advantange of this, we can have a monolithically increasing
atomic value for each submission to the graphics queue. Instead of
protecting resources with a fence, we simply store the current logical
tick (the atomic value stored in CPU memory). When we want to know if a
resource is free, it can be compared to the current GPU tick.

This greatly simplifies resource management code and the free status of
resources should have less false negatives.

To workaround bugs in validation layers, when these are attached there's
a thread waiting for timeline semaphores.
This commit is contained in:
ReinUsesLisp 2020-09-10 03:43:30 -03:00
parent 1eae35621e
commit 58b0ae84b5
42 changed files with 639 additions and 815 deletions

View file

@ -190,6 +190,8 @@ if (ENABLE_VULKAN)
renderer_vulkan/vk_blit_screen.h renderer_vulkan/vk_blit_screen.h
renderer_vulkan/vk_buffer_cache.cpp renderer_vulkan/vk_buffer_cache.cpp
renderer_vulkan/vk_buffer_cache.h renderer_vulkan/vk_buffer_cache.h
renderer_vulkan/vk_command_pool.cpp
renderer_vulkan/vk_command_pool.h
renderer_vulkan/vk_compute_pass.cpp renderer_vulkan/vk_compute_pass.cpp
renderer_vulkan/vk_compute_pass.h renderer_vulkan/vk_compute_pass.h
renderer_vulkan/vk_compute_pipeline.cpp renderer_vulkan/vk_compute_pipeline.cpp
@ -204,6 +206,8 @@ if (ENABLE_VULKAN)
renderer_vulkan/vk_graphics_pipeline.h renderer_vulkan/vk_graphics_pipeline.h
renderer_vulkan/vk_image.cpp renderer_vulkan/vk_image.cpp
renderer_vulkan/vk_image.h renderer_vulkan/vk_image.h
renderer_vulkan/vk_master_semaphore.cpp
renderer_vulkan/vk_master_semaphore.h
renderer_vulkan/vk_memory_manager.cpp renderer_vulkan/vk_memory_manager.cpp
renderer_vulkan/vk_memory_manager.h renderer_vulkan/vk_memory_manager.h
renderer_vulkan/vk_pipeline_cache.cpp renderer_vulkan/vk_pipeline_cache.cpp
@ -214,8 +218,8 @@ if (ENABLE_VULKAN)
renderer_vulkan/vk_rasterizer.h renderer_vulkan/vk_rasterizer.h
renderer_vulkan/vk_renderpass_cache.cpp renderer_vulkan/vk_renderpass_cache.cpp
renderer_vulkan/vk_renderpass_cache.h renderer_vulkan/vk_renderpass_cache.h
renderer_vulkan/vk_resource_manager.cpp renderer_vulkan/vk_resource_pool.cpp
renderer_vulkan/vk_resource_manager.h renderer_vulkan/vk_resource_pool.h
renderer_vulkan/vk_sampler_cache.cpp renderer_vulkan/vk_sampler_cache.cpp
renderer_vulkan/vk_sampler_cache.h renderer_vulkan/vk_sampler_cache.h
renderer_vulkan/vk_scheduler.cpp renderer_vulkan/vk_scheduler.cpp

View file

@ -91,8 +91,7 @@ private:
std::shared_ptr<HostCounter> last; std::shared_ptr<HostCounter> last;
}; };
template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter, template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
class QueryPool>
class QueryCacheBase { class QueryCacheBase {
public: public:
explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_, explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
@ -206,9 +205,6 @@ public:
committed_flushes.pop_front(); committed_flushes.pop_front();
} }
protected:
std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
private: private:
/// Flushes a memory range to guest memory and removes it from the cache. /// Flushes a memory range to guest memory and removes it from the cache.
void FlushAndRemoveRegion(VAddr addr, std::size_t size) { void FlushAndRemoveRegion(VAddr addr, std::size_t size) {

View file

@ -32,10 +32,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
QueryCache::QueryCache(RasterizerOpenGL& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, QueryCache::QueryCache(RasterizerOpenGL& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d,
Tegra::MemoryManager& gpu_memory) Tegra::MemoryManager& gpu_memory)
: VideoCommon::QueryCacheBase< : VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter>(
QueryCache, CachedQuery, CounterStream, HostCounter, rasterizer, maxwell3d, gpu_memory),
std::vector<OGLQuery>>{static_cast<VideoCore::RasterizerInterface&>(rasterizer),
maxwell3d, gpu_memory},
gl_rasterizer{rasterizer} {} gl_rasterizer{rasterizer} {}
QueryCache::~QueryCache() = default; QueryCache::~QueryCache() = default;
@ -91,6 +89,8 @@ u64 HostCounter::BlockingQuery() const {
CachedQuery::CachedQuery(QueryCache& cache, VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr) CachedQuery::CachedQuery(QueryCache& cache, VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr)
: VideoCommon::CachedQueryBase<HostCounter>{cpu_addr, host_ptr}, cache{&cache}, type{type} {} : VideoCommon::CachedQueryBase<HostCounter>{cpu_addr, host_ptr}, cache{&cache}, type{type} {}
CachedQuery::~CachedQuery() = default;
CachedQuery::CachedQuery(CachedQuery&& rhs) noexcept CachedQuery::CachedQuery(CachedQuery&& rhs) noexcept
: VideoCommon::CachedQueryBase<HostCounter>(std::move(rhs)), cache{rhs.cache}, type{rhs.type} {} : VideoCommon::CachedQueryBase<HostCounter>(std::move(rhs)), cache{rhs.cache}, type{rhs.type} {}

View file

@ -26,8 +26,8 @@ class RasterizerOpenGL;
using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>; using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
class QueryCache final : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, class QueryCache final
HostCounter, std::vector<OGLQuery>> { : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
public: public:
explicit QueryCache(RasterizerOpenGL& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, explicit QueryCache(RasterizerOpenGL& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d,
Tegra::MemoryManager& gpu_memory); Tegra::MemoryManager& gpu_memory);
@ -41,6 +41,7 @@ public:
private: private:
RasterizerOpenGL& gl_rasterizer; RasterizerOpenGL& gl_rasterizer;
std::array<std::vector<OGLQuery>, VideoCore::NumQueryTypes> query_pools;
}; };
class HostCounter final : public VideoCommon::HostCounterBase<QueryCache, HostCounter> { class HostCounter final : public VideoCommon::HostCounterBase<QueryCache, HostCounter> {
@ -63,10 +64,12 @@ class CachedQuery final : public VideoCommon::CachedQueryBase<HostCounter> {
public: public:
explicit CachedQuery(QueryCache& cache, VideoCore::QueryType type, VAddr cpu_addr, explicit CachedQuery(QueryCache& cache, VideoCore::QueryType type, VAddr cpu_addr,
u8* host_ptr); u8* host_ptr);
CachedQuery(CachedQuery&& rhs) noexcept; ~CachedQuery() override;
CachedQuery(const CachedQuery&) = delete;
CachedQuery(CachedQuery&& rhs) noexcept;
CachedQuery& operator=(CachedQuery&& rhs) noexcept; CachedQuery& operator=(CachedQuery&& rhs) noexcept;
CachedQuery(const CachedQuery&) = delete;
CachedQuery& operator=(const CachedQuery&) = delete; CachedQuery& operator=(const CachedQuery&) = delete;
void Flush() override; void Flush() override;

View file

@ -25,9 +25,9 @@
#include "video_core/renderer_vulkan/renderer_vulkan.h" #include "video_core/renderer_vulkan/renderer_vulkan.h"
#include "video_core/renderer_vulkan/vk_blit_screen.h" #include "video_core/renderer_vulkan/vk_blit_screen.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h" #include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h" #include "video_core/renderer_vulkan/vk_state_tracker.h"
#include "video_core/renderer_vulkan/vk_swapchain.h" #include "video_core/renderer_vulkan/vk_swapchain.h"
@ -56,7 +56,7 @@ VkBool32 DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
VkDebugUtilsMessageTypeFlagsEXT type, VkDebugUtilsMessageTypeFlagsEXT type,
const VkDebugUtilsMessengerCallbackDataEXT* data, const VkDebugUtilsMessengerCallbackDataEXT* data,
[[maybe_unused]] void* user_data) { [[maybe_unused]] void* user_data) {
const char* message{data->pMessage}; const char* const message{data->pMessage};
if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) { if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
LOG_CRITICAL(Render_Vulkan, "{}", message); LOG_CRITICAL(Render_Vulkan, "{}", message);
@ -269,11 +269,11 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
scheduler->WaitWorker(); scheduler->WaitWorker();
swapchain->AcquireNextImage(); swapchain->AcquireNextImage();
const auto [fence, render_semaphore] = blit_screen->Draw(*framebuffer, use_accelerated); const VkSemaphore render_semaphore = blit_screen->Draw(*framebuffer, use_accelerated);
scheduler->Flush(false, render_semaphore); scheduler->Flush(render_semaphore);
if (swapchain->Present(render_semaphore, fence)) { if (swapchain->Present(render_semaphore)) {
blit_screen->Recreate(); blit_screen->Recreate();
} }
@ -300,23 +300,21 @@ bool RendererVulkan::Init() {
memory_manager = std::make_unique<VKMemoryManager>(*device); memory_manager = std::make_unique<VKMemoryManager>(*device);
resource_manager = std::make_unique<VKResourceManager>(*device);
const auto& framebuffer = render_window.GetFramebufferLayout();
swapchain = std::make_unique<VKSwapchain>(*surface, *device);
swapchain->Create(framebuffer.width, framebuffer.height, false);
state_tracker = std::make_unique<StateTracker>(gpu); state_tracker = std::make_unique<StateTracker>(gpu);
scheduler = std::make_unique<VKScheduler>(*device, *resource_manager, *state_tracker); scheduler = std::make_unique<VKScheduler>(*device, *state_tracker);
rasterizer = std::make_unique<RasterizerVulkan>( const auto& framebuffer = render_window.GetFramebufferLayout();
render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, *device, swapchain = std::make_unique<VKSwapchain>(*surface, *device, *scheduler);
*resource_manager, *memory_manager, *state_tracker, *scheduler); swapchain->Create(framebuffer.width, framebuffer.height, false);
blit_screen = std::make_unique<VKBlitScreen>(cpu_memory, render_window, *rasterizer, *device, rasterizer = std::make_unique<RasterizerVulkan>(render_window, gpu, gpu.MemoryManager(),
*resource_manager, *memory_manager, *swapchain, cpu_memory, screen_info, *device,
*scheduler, screen_info); *memory_manager, *state_tracker, *scheduler);
blit_screen =
std::make_unique<VKBlitScreen>(cpu_memory, render_window, *rasterizer, *device,
*memory_manager, *swapchain, *scheduler, screen_info);
return true; return true;
} }
@ -334,7 +332,6 @@ void RendererVulkan::ShutDown() {
scheduler.reset(); scheduler.reset();
swapchain.reset(); swapchain.reset();
memory_manager.reset(); memory_manager.reset();
resource_manager.reset();
device.reset(); device.reset();
} }

View file

@ -30,9 +30,7 @@ namespace Vulkan {
class StateTracker; class StateTracker;
class VKBlitScreen; class VKBlitScreen;
class VKDevice; class VKDevice;
class VKFence;
class VKMemoryManager; class VKMemoryManager;
class VKResourceManager;
class VKSwapchain; class VKSwapchain;
class VKScheduler; class VKScheduler;
class VKImage; class VKImage;
@ -82,11 +80,10 @@ private:
vk::DebugCallback debug_callback; vk::DebugCallback debug_callback;
std::unique_ptr<VKDevice> device; std::unique_ptr<VKDevice> device;
std::unique_ptr<VKSwapchain> swapchain;
std::unique_ptr<VKMemoryManager> memory_manager; std::unique_ptr<VKMemoryManager> memory_manager;
std::unique_ptr<VKResourceManager> resource_manager;
std::unique_ptr<StateTracker> state_tracker; std::unique_ptr<StateTracker> state_tracker;
std::unique_ptr<VKScheduler> scheduler; std::unique_ptr<VKScheduler> scheduler;
std::unique_ptr<VKSwapchain> swapchain;
std::unique_ptr<VKBlitScreen> blit_screen; std::unique_ptr<VKBlitScreen> blit_screen;
}; };

View file

@ -12,11 +12,9 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/math_util.h" #include "common/math_util.h"
#include "core/core.h" #include "core/core.h"
#include "core/frontend/emu_window.h" #include "core/frontend/emu_window.h"
#include "core/memory.h" #include "core/memory.h"
#include "video_core/gpu.h" #include "video_core/gpu.h"
#include "video_core/morton.h" #include "video_core/morton.h"
#include "video_core/rasterizer_interface.h" #include "video_core/rasterizer_interface.h"
@ -24,8 +22,8 @@
#include "video_core/renderer_vulkan/vk_blit_screen.h" #include "video_core/renderer_vulkan/vk_blit_screen.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_image.h" #include "video_core/renderer_vulkan/vk_image.h"
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_util.h" #include "video_core/renderer_vulkan/vk_shader_util.h"
#include "video_core/renderer_vulkan/vk_swapchain.h" #include "video_core/renderer_vulkan/vk_swapchain.h"
@ -213,16 +211,12 @@ struct VKBlitScreen::BufferData {
VKBlitScreen::VKBlitScreen(Core::Memory::Memory& cpu_memory_, VKBlitScreen::VKBlitScreen(Core::Memory::Memory& cpu_memory_,
Core::Frontend::EmuWindow& render_window_, Core::Frontend::EmuWindow& render_window_,
VideoCore::RasterizerInterface& rasterizer_, const VKDevice& device_, VideoCore::RasterizerInterface& rasterizer_, const VKDevice& device_,
VKResourceManager& resource_manager_, VKMemoryManager& memory_manager_, VKMemoryManager& memory_manager_, VKSwapchain& swapchain_,
VKSwapchain& swapchain_, VKScheduler& scheduler_, VKScheduler& scheduler_, const VKScreenInfo& screen_info_)
const VKScreenInfo& screen_info_) : cpu_memory{cpu_memory_}, render_window{render_window_}, rasterizer{rasterizer_},
: cpu_memory{cpu_memory_}, render_window{render_window_}, device{device_}, memory_manager{memory_manager_}, swapchain{swapchain_},
rasterizer{rasterizer_}, device{device_}, resource_manager{resource_manager_}, scheduler{scheduler_}, image_count{swapchain.GetImageCount()}, screen_info{screen_info_} {
memory_manager{memory_manager_}, swapchain{swapchain_}, scheduler{scheduler_}, resource_ticks.resize(image_count);
image_count{swapchain.GetImageCount()}, screen_info{screen_info_} {
watches.resize(image_count);
std::generate(watches.begin(), watches.end(),
[]() { return std::make_unique<VKFenceWatch>(); });
CreateStaticResources(); CreateStaticResources();
CreateDynamicResources(); CreateDynamicResources();
@ -234,15 +228,16 @@ void VKBlitScreen::Recreate() {
CreateDynamicResources(); CreateDynamicResources();
} }
std::tuple<VKFence&, VkSemaphore> VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool use_accelerated) {
bool use_accelerated) {
RefreshResources(framebuffer); RefreshResources(framebuffer);
// Finish any pending renderpass // Finish any pending renderpass
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
const std::size_t image_index = swapchain.GetImageIndex(); const std::size_t image_index = swapchain.GetImageIndex();
watches[image_index]->Watch(scheduler.GetFence());
scheduler.Wait(resource_ticks[image_index]);
resource_ticks[image_index] = scheduler.CurrentTick();
VKImage* blit_image = use_accelerated ? screen_info.image : raw_images[image_index].get(); VKImage* blit_image = use_accelerated ? screen_info.image : raw_images[image_index].get();
@ -345,7 +340,7 @@ std::tuple<VKFence&, VkSemaphore> VKBlitScreen::Draw(const Tegra::FramebufferCon
cmdbuf.EndRenderPass(); cmdbuf.EndRenderPass();
}); });
return {scheduler.GetFence(), *semaphores[image_index]}; return *semaphores[image_index];
} }
void VKBlitScreen::CreateStaticResources() { void VKBlitScreen::CreateStaticResources() {
@ -713,7 +708,7 @@ void VKBlitScreen::CreateFramebuffers() {
void VKBlitScreen::ReleaseRawImages() { void VKBlitScreen::ReleaseRawImages() {
for (std::size_t i = 0; i < raw_images.size(); ++i) { for (std::size_t i = 0; i < raw_images.size(); ++i) {
watches[i]->Wait(); scheduler.Wait(resource_ticks.at(i));
} }
raw_images.clear(); raw_images.clear();
raw_buffer_commits.clear(); raw_buffer_commits.clear();

View file

@ -5,10 +5,8 @@
#pragma once #pragma once
#include <memory> #include <memory>
#include <tuple>
#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Core { namespace Core {
@ -34,9 +32,9 @@ class RasterizerInterface;
namespace Vulkan { namespace Vulkan {
struct ScreenInfo; struct ScreenInfo;
class RasterizerVulkan; class RasterizerVulkan;
class VKDevice; class VKDevice;
class VKFence;
class VKImage; class VKImage;
class VKScheduler; class VKScheduler;
class VKSwapchain; class VKSwapchain;
@ -46,14 +44,13 @@ public:
explicit VKBlitScreen(Core::Memory::Memory& cpu_memory, explicit VKBlitScreen(Core::Memory::Memory& cpu_memory,
Core::Frontend::EmuWindow& render_window, Core::Frontend::EmuWindow& render_window,
VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, VideoCore::RasterizerInterface& rasterizer, const VKDevice& device,
VKResourceManager& resource_manager, VKMemoryManager& memory_manager, VKMemoryManager& memory_manager, VKSwapchain& swapchain,
VKSwapchain& swapchain, VKScheduler& scheduler, VKScheduler& scheduler, const VKScreenInfo& screen_info);
const VKScreenInfo& screen_info);
~VKBlitScreen(); ~VKBlitScreen();
void Recreate(); void Recreate();
std::tuple<VKFence&, VkSemaphore> Draw(const Tegra::FramebufferConfig& framebuffer, [[nodiscard]] VkSemaphore Draw(const Tegra::FramebufferConfig& framebuffer,
bool use_accelerated); bool use_accelerated);
private: private:
@ -90,7 +87,6 @@ private:
Core::Frontend::EmuWindow& render_window; Core::Frontend::EmuWindow& render_window;
VideoCore::RasterizerInterface& rasterizer; VideoCore::RasterizerInterface& rasterizer;
const VKDevice& device; const VKDevice& device;
VKResourceManager& resource_manager;
VKMemoryManager& memory_manager; VKMemoryManager& memory_manager;
VKSwapchain& swapchain; VKSwapchain& swapchain;
VKScheduler& scheduler; VKScheduler& scheduler;
@ -111,7 +107,7 @@ private:
vk::Buffer buffer; vk::Buffer buffer;
VKMemoryCommit buffer_commit; VKMemoryCommit buffer_commit;
std::vector<std::unique_ptr<VKFenceWatch>> watches; std::vector<u64> resource_ticks;
std::vector<vk::Semaphore> semaphores; std::vector<vk::Semaphore> semaphores;
std::vector<std::unique_ptr<VKImage>> raw_images; std::vector<std::unique_ptr<VKImage>> raw_images;

View file

@ -0,0 +1,41 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <cstddef>
#include "video_core/renderer_vulkan/vk_command_pool.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
constexpr size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
CommandPool::CommandPool(MasterSemaphore& master_semaphore, const VKDevice& device)
: ResourcePool(master_semaphore, COMMAND_BUFFER_POOL_SIZE), device{device} {}
CommandPool::~CommandPool() = default;
void CommandPool::Allocate(size_t begin, size_t end) {
// Command buffers are going to be commited, recorded, executed every single usage cycle.
// They are also going to be reseted when commited.
Pool& pool = pools.emplace_back();
pool.handle = device.GetLogical().CreateCommandPool({
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = nullptr,
.flags =
VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
.queueFamilyIndex = device.GetGraphicsFamily(),
});
pool.cmdbufs = pool.handle.Allocate(COMMAND_BUFFER_POOL_SIZE);
}
VkCommandBuffer CommandPool::Commit() {
const size_t index = CommitResource();
const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE;
const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE;
return pools[pool_index].cmdbufs[sub_index];
}
} // namespace Vulkan

View file

@ -0,0 +1,35 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <cstddef>
#include <vector>
#include "video_core/renderer_vulkan/vk_resource_pool.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
class MasterSemaphore;
class VKDevice;
class CommandPool final : public ResourcePool {
public:
explicit CommandPool(MasterSemaphore& master_semaphore, const VKDevice& device);
virtual ~CommandPool();
void Allocate(size_t begin, size_t end) override;
VkCommandBuffer Commit();
private:
struct Pool {
vk::CommandPool handle;
vk::CommandBuffers cmdbufs;
};
const VKDevice& device;
std::vector<Pool> pools;
};
} // namespace Vulkan

View file

@ -112,7 +112,8 @@ constexpr u8 quad_array[] = {
0xf9, 0x00, 0x02, 0x00, 0x21, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x23, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, 0x21, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x23, 0x00, 0x00, 0x00,
0xf9, 0x00, 0x02, 0x00, 0x4b, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x4e, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, 0x4b, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x4e, 0x00, 0x00, 0x00,
0xf9, 0x00, 0x02, 0x00, 0x4c, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x4b, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, 0x4c, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x4b, 0x00, 0x00, 0x00,
0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00}; 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
};
VkDescriptorSetLayoutBinding BuildQuadArrayPassDescriptorSetLayoutBinding() { VkDescriptorSetLayoutBinding BuildQuadArrayPassDescriptorSetLayoutBinding() {
return { return {
@ -218,7 +219,8 @@ constexpr u8 uint8_pass[] = {
0x2a, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
0x24, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x03, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x03, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00,
0xf9, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00,
0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00}; 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
};
// Quad indexed SPIR-V module. Generated from the "shaders/" directory. // Quad indexed SPIR-V module. Generated from the "shaders/" directory.
constexpr u8 QUAD_INDEXED_SPV[] = { constexpr u8 QUAD_INDEXED_SPV[] = {
@ -341,7 +343,8 @@ constexpr u8 QUAD_INDEXED_SPV[] = {
0xf9, 0x00, 0x02, 0x00, 0x35, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x37, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, 0x35, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x37, 0x00, 0x00, 0x00,
0xf9, 0x00, 0x02, 0x00, 0x73, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x76, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, 0x73, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x76, 0x00, 0x00, 0x00,
0xf9, 0x00, 0x02, 0x00, 0x74, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x73, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, 0x74, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x73, 0x00, 0x00, 0x00,
0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00}; 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
};
std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBindings() { std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBindings() {
return {{ return {{
@ -448,12 +451,12 @@ VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descripto
VKComputePass::~VKComputePass() = default; VKComputePass::~VKComputePass() = default;
VkDescriptorSet VKComputePass::CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue, VkDescriptorSet VKComputePass::CommitDescriptorSet(
VKFence& fence) { VKUpdateDescriptorQueue& update_descriptor_queue) {
if (!descriptor_template) { if (!descriptor_template) {
return nullptr; return nullptr;
} }
const auto set = descriptor_allocator->Commit(fence); const VkDescriptorSet set = descriptor_allocator->Commit();
update_descriptor_queue.Send(*descriptor_template, set); update_descriptor_queue.Send(*descriptor_template, set);
return set; return set;
} }
@ -477,7 +480,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32
update_descriptor_queue.Acquire(); update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
@ -520,13 +523,13 @@ Uint8Pass::~Uint8Pass() = default;
std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer, std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
u64 src_offset) { u64 src_offset) {
const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16)); const u32 staging_size = static_cast<u32>(num_vertices * sizeof(u16));
auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
update_descriptor_queue.Acquire(); update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices); update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set,
@ -589,7 +592,7 @@ std::pair<VkBuffer, u64> QuadIndexedPass::Assemble(
update_descriptor_queue.Acquire(); update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size); update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set,

View file

@ -15,7 +15,6 @@
namespace Vulkan { namespace Vulkan {
class VKDevice; class VKDevice;
class VKFence;
class VKScheduler; class VKScheduler;
class VKStagingBufferPool; class VKStagingBufferPool;
class VKUpdateDescriptorQueue; class VKUpdateDescriptorQueue;
@ -30,8 +29,7 @@ public:
~VKComputePass(); ~VKComputePass();
protected: protected:
VkDescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue, VkDescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue);
VKFence& fence);
vk::DescriptorUpdateTemplateKHR descriptor_template; vk::DescriptorUpdateTemplateKHR descriptor_template;
vk::PipelineLayout layout; vk::PipelineLayout layout;

View file

@ -32,7 +32,7 @@ VkDescriptorSet VKComputePipeline::CommitDescriptorSet() {
if (!descriptor_template) { if (!descriptor_template) {
return {}; return {};
} }
const auto set = descriptor_allocator.Commit(scheduler.GetFence()); const VkDescriptorSet set = descriptor_allocator.Commit();
update_descriptor_queue.Send(*descriptor_template, set); update_descriptor_queue.Send(*descriptor_template, set);
return set; return set;
} }

View file

@ -7,7 +7,8 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_pool.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/wrapper.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -15,14 +16,15 @@ namespace Vulkan {
// Prefer small grow rates to avoid saturating the descriptor pool with barely used pipelines. // Prefer small grow rates to avoid saturating the descriptor pool with barely used pipelines.
constexpr std::size_t SETS_GROW_RATE = 0x20; constexpr std::size_t SETS_GROW_RATE = 0x20;
DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool, DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool_,
VkDescriptorSetLayout layout) VkDescriptorSetLayout layout_)
: VKFencedPool{SETS_GROW_RATE}, descriptor_pool{descriptor_pool}, layout{layout} {} : ResourcePool(descriptor_pool_.master_semaphore, SETS_GROW_RATE),
descriptor_pool{descriptor_pool_}, layout{layout_} {}
DescriptorAllocator::~DescriptorAllocator() = default; DescriptorAllocator::~DescriptorAllocator() = default;
VkDescriptorSet DescriptorAllocator::Commit(VKFence& fence) { VkDescriptorSet DescriptorAllocator::Commit() {
const std::size_t index = CommitResource(fence); const std::size_t index = CommitResource();
return descriptors_allocations[index / SETS_GROW_RATE][index % SETS_GROW_RATE]; return descriptors_allocations[index / SETS_GROW_RATE][index % SETS_GROW_RATE];
} }
@ -30,8 +32,9 @@ void DescriptorAllocator::Allocate(std::size_t begin, std::size_t end) {
descriptors_allocations.push_back(descriptor_pool.AllocateDescriptors(layout, end - begin)); descriptors_allocations.push_back(descriptor_pool.AllocateDescriptors(layout, end - begin));
} }
VKDescriptorPool::VKDescriptorPool(const VKDevice& device) VKDescriptorPool::VKDescriptorPool(const VKDevice& device_, VKScheduler& scheduler)
: device{device}, active_pool{AllocateNewPool()} {} : device{device_}, master_semaphore{scheduler.GetMasterSemaphore()}, active_pool{
AllocateNewPool()} {}
VKDescriptorPool::~VKDescriptorPool() = default; VKDescriptorPool::~VKDescriptorPool() = default;

View file

@ -6,21 +6,24 @@
#include <vector> #include <vector>
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_pool.h"
#include "video_core/renderer_vulkan/wrapper.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
class VKDevice;
class VKDescriptorPool; class VKDescriptorPool;
class VKScheduler;
class DescriptorAllocator final : public VKFencedPool { class DescriptorAllocator final : public ResourcePool {
public: public:
explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, VkDescriptorSetLayout layout); explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, VkDescriptorSetLayout layout);
~DescriptorAllocator() override; ~DescriptorAllocator() override;
DescriptorAllocator& operator=(const DescriptorAllocator&) = delete;
DescriptorAllocator(const DescriptorAllocator&) = delete; DescriptorAllocator(const DescriptorAllocator&) = delete;
VkDescriptorSet Commit(VKFence& fence); VkDescriptorSet Commit();
protected: protected:
void Allocate(std::size_t begin, std::size_t end) override; void Allocate(std::size_t begin, std::size_t end) override;
@ -36,15 +39,19 @@ class VKDescriptorPool final {
friend DescriptorAllocator; friend DescriptorAllocator;
public: public:
explicit VKDescriptorPool(const VKDevice& device); explicit VKDescriptorPool(const VKDevice& device, VKScheduler& scheduler);
~VKDescriptorPool(); ~VKDescriptorPool();
VKDescriptorPool(const VKDescriptorPool&) = delete;
VKDescriptorPool& operator=(const VKDescriptorPool&) = delete;
private: private:
vk::DescriptorPool* AllocateNewPool(); vk::DescriptorPool* AllocateNewPool();
vk::DescriptorSets AllocateDescriptors(VkDescriptorSetLayout layout, std::size_t count); vk::DescriptorSets AllocateDescriptors(VkDescriptorSetLayout layout, std::size_t count);
const VKDevice& device; const VKDevice& device;
MasterSemaphore& master_semaphore;
std::vector<vk::DescriptorPool> pools; std::vector<vk::DescriptorPool> pools;
vk::DescriptorPool* active_pool; vk::DescriptorPool* active_pool;

View file

@ -42,6 +42,7 @@ constexpr std::array REQUIRED_EXTENSIONS{
VK_KHR_8BIT_STORAGE_EXTENSION_NAME, VK_KHR_8BIT_STORAGE_EXTENSION_NAME,
VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME, VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME,
VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME, VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME,
VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME, VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME, VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
@ -250,6 +251,13 @@ bool VKDevice::Create() {
.inheritedQueries = false, .inheritedQueries = false,
}; };
VkPhysicalDeviceTimelineSemaphoreFeaturesKHR timeline_semaphore{
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR,
.pNext = nullptr,
.timelineSemaphore = true,
};
SetNext(next, timeline_semaphore);
VkPhysicalDevice16BitStorageFeaturesKHR bit16_storage{ VkPhysicalDevice16BitStorageFeaturesKHR bit16_storage{
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR, .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR,
.pNext = nullptr, .pNext = nullptr,

View file

@ -29,8 +29,8 @@ void InnerFence::Queue() {
} }
ASSERT(!event); ASSERT(!event);
event = device.GetLogical().CreateNewEvent(); event = device.GetLogical().CreateEvent();
ticks = scheduler.Ticks(); ticks = scheduler.CurrentTick();
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([event = *event](vk::CommandBuffer cmdbuf) { scheduler.Record([event = *event](vk::CommandBuffer cmdbuf) {
@ -52,7 +52,7 @@ void InnerFence::Wait() {
} }
ASSERT(event); ASSERT(event);
if (ticks >= scheduler.Ticks()) { if (ticks >= scheduler.CurrentTick()) {
scheduler.Flush(); scheduler.Flush();
} }
while (!IsEventSignalled()) { while (!IsEventSignalled()) {

View file

@ -93,7 +93,7 @@ VkDescriptorSet VKGraphicsPipeline::CommitDescriptorSet() {
if (!descriptor_template) { if (!descriptor_template) {
return {}; return {};
} }
const auto set = descriptor_allocator.Commit(scheduler.GetFence()); const VkDescriptorSet set = descriptor_allocator.Commit();
update_descriptor_queue.Send(*descriptor_template, set); update_descriptor_queue.Send(*descriptor_template, set);
return set; return set;
} }

View file

@ -0,0 +1,56 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <atomic>
#include <chrono>
#include "core/settings.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
using namespace std::chrono_literals;
MasterSemaphore::MasterSemaphore(const VKDevice& device) {
static constexpr VkSemaphoreTypeCreateInfoKHR semaphore_type_ci{
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR,
.pNext = nullptr,
.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR,
.initialValue = 0,
};
static constexpr VkSemaphoreCreateInfo semaphore_ci{
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
.pNext = &semaphore_type_ci,
.flags = 0,
};
semaphore = device.GetLogical().CreateSemaphore(semaphore_ci);
if (!Settings::values.renderer_debug) {
return;
}
// Validation layers have a bug where they fail to track resource usage when using timeline
// semaphores and synchronizing with GetSemaphoreCounterValueKHR. To workaround this issue, have
// a separate thread waiting for each timeline semaphore value.
debug_thread = std::thread([this] {
u64 counter = 0;
while (!shutdown) {
if (semaphore.Wait(counter, 10'000'000)) {
++counter;
}
}
});
}
MasterSemaphore::~MasterSemaphore() {
shutdown = true;
// This thread might not be started
if (debug_thread.joinable()) {
debug_thread.join();
}
}
} // namespace Vulkan

View file

@ -0,0 +1,70 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include <thread>
#include "common/common_types.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
class VKDevice;
class MasterSemaphore {
public:
explicit MasterSemaphore(const VKDevice& device);
~MasterSemaphore();
/// Returns the current logical tick.
[[nodiscard]] u64 CurrentTick() const noexcept {
return current_tick;
}
/// Returns the timeline semaphore handle.
[[nodiscard]] VkSemaphore Handle() const noexcept {
return *semaphore;
}
/// Returns true when a tick has been hit by the GPU.
[[nodiscard]] bool IsFree(u64 tick) {
return gpu_tick >= tick;
}
/// Advance to the logical tick.
void NextTick() noexcept {
++current_tick;
}
/// Refresh the known GPU tick
void Refresh() {
gpu_tick = semaphore.GetCounter();
}
/// Waits for a tick to be hit on the GPU
void Wait(u64 tick) {
// No need to wait if the GPU is ahead of the tick
if (IsFree(tick)) {
return;
}
// Update the GPU tick and try again
Refresh();
if (IsFree(tick)) {
return;
}
// If none of the above is hit, fallback to a regular wait
semaphore.Wait(tick);
}
private:
vk::Semaphore semaphore; ///< Timeline semaphore.
std::atomic<u64> gpu_tick{0}; ///< Current known GPU tick.
std::atomic<u64> current_tick{1}; ///< Current logical tick.
std::atomic<bool> shutdown{false}; ///< True when the object is being destroyed.
std::thread debug_thread; ///< Debug thread to workaround validation layer bugs.
};
} // namespace Vulkan

View file

@ -38,7 +38,6 @@ class RasterizerVulkan;
class VKComputePipeline; class VKComputePipeline;
class VKDescriptorPool; class VKDescriptorPool;
class VKDevice; class VKDevice;
class VKFence;
class VKScheduler; class VKScheduler;
class VKUpdateDescriptorQueue; class VKUpdateDescriptorQueue;

View file

@ -9,35 +9,33 @@
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_query_cache.h" #include "video_core/renderer_vulkan/vk_query_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_pool.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/wrapper.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
using VideoCore::QueryType;
namespace { namespace {
constexpr std::array QUERY_TARGETS = {VK_QUERY_TYPE_OCCLUSION}; constexpr std::array QUERY_TARGETS = {VK_QUERY_TYPE_OCCLUSION};
constexpr VkQueryType GetTarget(VideoCore::QueryType type) { constexpr VkQueryType GetTarget(QueryType type) {
return QUERY_TARGETS[static_cast<std::size_t>(type)]; return QUERY_TARGETS[static_cast<std::size_t>(type)];
} }
} // Anonymous namespace } // Anonymous namespace
QueryPool::QueryPool() : VKFencedPool{GROW_STEP} {} QueryPool::QueryPool(const VKDevice& device_, VKScheduler& scheduler, QueryType type_)
: ResourcePool{scheduler.GetMasterSemaphore(), GROW_STEP}, device{device_}, type{type_} {}
QueryPool::~QueryPool() = default; QueryPool::~QueryPool() = default;
void QueryPool::Initialize(const VKDevice& device_, VideoCore::QueryType type_) { std::pair<VkQueryPool, u32> QueryPool::Commit() {
device = &device_;
type = type_;
}
std::pair<VkQueryPool, u32> QueryPool::Commit(VKFence& fence) {
std::size_t index; std::size_t index;
do { do {
index = CommitResource(fence); index = CommitResource();
} while (usage[index]); } while (usage[index]);
usage[index] = true; usage[index] = true;
@ -47,7 +45,7 @@ std::pair<VkQueryPool, u32> QueryPool::Commit(VKFence& fence) {
void QueryPool::Allocate(std::size_t begin, std::size_t end) { void QueryPool::Allocate(std::size_t begin, std::size_t end) {
usage.resize(end); usage.resize(end);
pools.push_back(device->GetLogical().CreateQueryPool({ pools.push_back(device.GetLogical().CreateQueryPool({
.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
@ -71,28 +69,27 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
VKQueryCache::VKQueryCache(VideoCore::RasterizerInterface& rasterizer, VKQueryCache::VKQueryCache(VideoCore::RasterizerInterface& rasterizer,
Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
const VKDevice& device, VKScheduler& scheduler) const VKDevice& device, VKScheduler& scheduler)
: VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter, : VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream,
QueryPool>{rasterizer, maxwell3d, gpu_memory}, HostCounter>{rasterizer, maxwell3d, gpu_memory},
device{device}, scheduler{scheduler} { device{device}, scheduler{scheduler}, query_pools{
for (std::size_t i = 0; i < static_cast<std::size_t>(VideoCore::NumQueryTypes); ++i) { QueryPool{device, scheduler,
query_pools[i].Initialize(device, static_cast<VideoCore::QueryType>(i)); QueryType::SamplesPassed},
} } {}
}
VKQueryCache::~VKQueryCache() = default; VKQueryCache::~VKQueryCache() = default;
std::pair<VkQueryPool, u32> VKQueryCache::AllocateQuery(VideoCore::QueryType type) { std::pair<VkQueryPool, u32> VKQueryCache::AllocateQuery(QueryType type) {
return query_pools[static_cast<std::size_t>(type)].Commit(scheduler.GetFence()); return query_pools[static_cast<std::size_t>(type)].Commit();
} }
void VKQueryCache::Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query) { void VKQueryCache::Reserve(QueryType type, std::pair<VkQueryPool, u32> query) {
query_pools[static_cast<std::size_t>(type)].Reserve(query); query_pools[static_cast<std::size_t>(type)].Reserve(query);
} }
HostCounter::HostCounter(VKQueryCache& cache, std::shared_ptr<HostCounter> dependency, HostCounter::HostCounter(VKQueryCache& cache, std::shared_ptr<HostCounter> dependency,
VideoCore::QueryType type) QueryType type)
: VideoCommon::HostCounterBase<VKQueryCache, HostCounter>{std::move(dependency)}, cache{cache}, : VideoCommon::HostCounterBase<VKQueryCache, HostCounter>{std::move(dependency)}, cache{cache},
type{type}, query{cache.AllocateQuery(type)}, ticks{cache.Scheduler().Ticks()} { type{type}, query{cache.AllocateQuery(type)}, tick{cache.Scheduler().CurrentTick()} {
const vk::Device* logical = &cache.Device().GetLogical(); const vk::Device* logical = &cache.Device().GetLogical();
cache.Scheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) { cache.Scheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) {
logical->ResetQueryPoolEXT(query.first, query.second, 1); logical->ResetQueryPoolEXT(query.first, query.second, 1);
@ -110,7 +107,7 @@ void HostCounter::EndQuery() {
} }
u64 HostCounter::BlockingQuery() const { u64 HostCounter::BlockingQuery() const {
if (ticks >= cache.Scheduler().Ticks()) { if (tick >= cache.Scheduler().CurrentTick()) {
cache.Scheduler().Flush(); cache.Scheduler().Flush();
} }
u64 data; u64 data;

View file

@ -11,7 +11,7 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/query_cache.h" #include "video_core/query_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_pool.h"
#include "video_core/renderer_vulkan/wrapper.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace VideoCore { namespace VideoCore {
@ -28,14 +28,12 @@ class VKScheduler;
using CounterStream = VideoCommon::CounterStreamBase<VKQueryCache, HostCounter>; using CounterStream = VideoCommon::CounterStreamBase<VKQueryCache, HostCounter>;
class QueryPool final : public VKFencedPool { class QueryPool final : public ResourcePool {
public: public:
explicit QueryPool(); explicit QueryPool(const VKDevice& device, VKScheduler& scheduler, VideoCore::QueryType type);
~QueryPool() override; ~QueryPool() override;
void Initialize(const VKDevice& device, VideoCore::QueryType type); std::pair<VkQueryPool, u32> Commit();
std::pair<VkQueryPool, u32> Commit(VKFence& fence);
void Reserve(std::pair<VkQueryPool, u32> query); void Reserve(std::pair<VkQueryPool, u32> query);
@ -45,16 +43,15 @@ protected:
private: private:
static constexpr std::size_t GROW_STEP = 512; static constexpr std::size_t GROW_STEP = 512;
const VKDevice* device = nullptr; const VKDevice& device;
VideoCore::QueryType type = {}; const VideoCore::QueryType type;
std::vector<vk::QueryPool> pools; std::vector<vk::QueryPool> pools;
std::vector<bool> usage; std::vector<bool> usage;
}; };
class VKQueryCache final class VKQueryCache final
: public VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter, : public VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter> {
QueryPool> {
public: public:
explicit VKQueryCache(VideoCore::RasterizerInterface& rasterizer, explicit VKQueryCache(VideoCore::RasterizerInterface& rasterizer,
Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
@ -76,6 +73,7 @@ public:
private: private:
const VKDevice& device; const VKDevice& device;
VKScheduler& scheduler; VKScheduler& scheduler;
std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
}; };
class HostCounter final : public VideoCommon::HostCounterBase<VKQueryCache, HostCounter> { class HostCounter final : public VideoCommon::HostCounterBase<VKQueryCache, HostCounter> {
@ -92,7 +90,7 @@ private:
VKQueryCache& cache; VKQueryCache& cache;
const VideoCore::QueryType type; const VideoCore::QueryType type;
const std::pair<VkQueryPool, u32> query; const std::pair<VkQueryPool, u32> query;
const u64 ticks; const u64 tick;
}; };
class CachedQuery : public VideoCommon::CachedQueryBase<HostCounter> { class CachedQuery : public VideoCommon::CachedQueryBase<HostCounter> {

View file

@ -31,7 +31,6 @@
#include "video_core/renderer_vulkan/vk_pipeline_cache.h" #include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h" #include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h" #include "video_core/renderer_vulkan/vk_renderpass_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_sampler_cache.h" #include "video_core/renderer_vulkan/vk_sampler_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
@ -384,27 +383,25 @@ void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const {
RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu_, RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu_,
Tegra::MemoryManager& gpu_memory_, Tegra::MemoryManager& gpu_memory_,
Core::Memory::Memory& cpu_memory, VKScreenInfo& screen_info_, Core::Memory::Memory& cpu_memory, VKScreenInfo& screen_info_,
const VKDevice& device_, VKResourceManager& resource_manager_, const VKDevice& device_, VKMemoryManager& memory_manager_,
VKMemoryManager& memory_manager_, StateTracker& state_tracker_, StateTracker& state_tracker_, VKScheduler& scheduler_)
VKScheduler& scheduler_)
: RasterizerAccelerated(cpu_memory), gpu(gpu_), gpu_memory(gpu_memory_), : RasterizerAccelerated(cpu_memory), gpu(gpu_), gpu_memory(gpu_memory_),
maxwell3d(gpu.Maxwell3D()), kepler_compute(gpu.KeplerCompute()), screen_info(screen_info_), maxwell3d(gpu.Maxwell3D()), kepler_compute(gpu.KeplerCompute()), screen_info(screen_info_),
device(device_), resource_manager(resource_manager_), memory_manager(memory_manager_), device(device_), memory_manager(memory_manager_), state_tracker(state_tracker_),
state_tracker(state_tracker_), scheduler(scheduler_), scheduler(scheduler_), staging_pool(device, memory_manager, scheduler),
staging_pool(device, memory_manager, scheduler), descriptor_pool(device), descriptor_pool(device, scheduler_), update_descriptor_queue(device, scheduler),
update_descriptor_queue(device, scheduler), renderpass_cache(device), renderpass_cache(device),
quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
texture_cache(*this, maxwell3d, gpu_memory, device, resource_manager, memory_manager, texture_cache(*this, maxwell3d, gpu_memory, device, memory_manager, scheduler, staging_pool),
scheduler, staging_pool),
pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler, pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
descriptor_pool, update_descriptor_queue, renderpass_cache), descriptor_pool, update_descriptor_queue, renderpass_cache),
buffer_cache(*this, gpu_memory, cpu_memory, device, memory_manager, scheduler, staging_pool), buffer_cache(*this, gpu_memory, cpu_memory, device, memory_manager, scheduler, staging_pool),
sampler_cache(device), query_cache(*this, maxwell3d, gpu_memory, device, scheduler), sampler_cache(device), query_cache(*this, maxwell3d, gpu_memory, device, scheduler),
fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, device, fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, device,
scheduler), scheduler),
wfi_event(device.GetLogical().CreateNewEvent()), async_shaders(emu_window) { wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window) {
scheduler.SetQueryCache(query_cache); scheduler.SetQueryCache(query_cache);
if (device.UseAsynchronousShaders()) { if (device.UseAsynchronousShaders()) {
async_shaders.AllocateWorkers(); async_shaders.AllocateWorkers();

View file

@ -25,7 +25,6 @@
#include "video_core/renderer_vulkan/vk_pipeline_cache.h" #include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_query_cache.h" #include "video_core/renderer_vulkan/vk_query_cache.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h" #include "video_core/renderer_vulkan/vk_renderpass_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_sampler_cache.h" #include "video_core/renderer_vulkan/vk_sampler_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
@ -109,8 +108,8 @@ public:
explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu, explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu,
Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
VKScreenInfo& screen_info, const VKDevice& device, VKScreenInfo& screen_info, const VKDevice& device,
VKResourceManager& resource_manager, VKMemoryManager& memory_manager, VKMemoryManager& memory_manager, StateTracker& state_tracker,
StateTracker& state_tracker, VKScheduler& scheduler); VKScheduler& scheduler);
~RasterizerVulkan() override; ~RasterizerVulkan() override;
void Draw(bool is_indexed, bool is_instanced) override; void Draw(bool is_indexed, bool is_instanced) override;
@ -286,7 +285,6 @@ private:
VKScreenInfo& screen_info; VKScreenInfo& screen_info;
const VKDevice& device; const VKDevice& device;
VKResourceManager& resource_manager;
VKMemoryManager& memory_manager; VKMemoryManager& memory_manager;
StateTracker& state_tracker; StateTracker& state_tracker;
VKScheduler& scheduler; VKScheduler& scheduler;

View file

@ -1,311 +0,0 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <optional>
#include "common/assert.h"
#include "common/logging/log.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
namespace {
// TODO(Rodrigo): Fine tune these numbers.
constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
constexpr std::size_t FENCES_GROW_STEP = 0x40;
constexpr VkFenceCreateInfo BuildFenceCreateInfo() {
return {
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
};
}
} // Anonymous namespace
class CommandBufferPool final : public VKFencedPool {
public:
explicit CommandBufferPool(const VKDevice& device)
: VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
void Allocate(std::size_t begin, std::size_t end) override {
// Command buffers are going to be commited, recorded, executed every single usage cycle.
// They are also going to be reseted when commited.
Pool& pool = pools.emplace_back();
pool.handle = device.GetLogical().CreateCommandPool({
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
.queueFamilyIndex = device.GetGraphicsFamily(),
});
pool.cmdbufs = pool.handle.Allocate(COMMAND_BUFFER_POOL_SIZE);
}
VkCommandBuffer Commit(VKFence& fence) {
const std::size_t index = CommitResource(fence);
const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE;
const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE;
return pools[pool_index].cmdbufs[sub_index];
}
private:
struct Pool {
vk::CommandPool handle;
vk::CommandBuffers cmdbufs;
};
const VKDevice& device;
std::vector<Pool> pools;
};
VKResource::VKResource() = default;
VKResource::~VKResource() = default;
VKFence::VKFence(const VKDevice& device)
: device{device}, handle{device.GetLogical().CreateFence(BuildFenceCreateInfo())} {}
VKFence::~VKFence() = default;
void VKFence::Wait() {
switch (const VkResult result = handle.Wait()) {
case VK_SUCCESS:
return;
case VK_ERROR_DEVICE_LOST:
device.ReportLoss();
[[fallthrough]];
default:
throw vk::Exception(result);
}
}
void VKFence::Release() {
ASSERT(is_owned);
is_owned = false;
}
void VKFence::Commit() {
is_owned = true;
is_used = true;
}
bool VKFence::Tick(bool gpu_wait, bool owner_wait) {
if (!is_used) {
// If a fence is not used it's always free.
return true;
}
if (is_owned && !owner_wait) {
// The fence is still being owned (Release has not been called) and ownership wait has
// not been asked.
return false;
}
if (gpu_wait) {
// Wait for the fence if it has been requested.
(void)handle.Wait();
} else {
if (handle.GetStatus() != VK_SUCCESS) {
// Vulkan fence is not ready, not much it can do here
return false;
}
}
// Broadcast resources their free state.
for (auto* resource : protected_resources) {
resource->OnFenceRemoval(this);
}
protected_resources.clear();
// Prepare fence for reusage.
handle.Reset();
is_used = false;
return true;
}
void VKFence::Protect(VKResource* resource) {
protected_resources.push_back(resource);
}
void VKFence::Unprotect(VKResource* resource) {
const auto it = std::find(protected_resources.begin(), protected_resources.end(), resource);
ASSERT(it != protected_resources.end());
resource->OnFenceRemoval(this);
protected_resources.erase(it);
}
void VKFence::RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept {
std::replace(std::begin(protected_resources), std::end(protected_resources), old_resource,
new_resource);
}
VKFenceWatch::VKFenceWatch() = default;
VKFenceWatch::VKFenceWatch(VKFence& initial_fence) {
Watch(initial_fence);
}
VKFenceWatch::VKFenceWatch(VKFenceWatch&& rhs) noexcept {
fence = std::exchange(rhs.fence, nullptr);
if (fence) {
fence->RedirectProtection(&rhs, this);
}
}
VKFenceWatch& VKFenceWatch::operator=(VKFenceWatch&& rhs) noexcept {
fence = std::exchange(rhs.fence, nullptr);
if (fence) {
fence->RedirectProtection(&rhs, this);
}
return *this;
}
VKFenceWatch::~VKFenceWatch() {
if (fence) {
fence->Unprotect(this);
}
}
void VKFenceWatch::Wait() {
if (fence == nullptr) {
return;
}
fence->Wait();
fence->Unprotect(this);
}
void VKFenceWatch::Watch(VKFence& new_fence) {
Wait();
fence = &new_fence;
fence->Protect(this);
}
bool VKFenceWatch::TryWatch(VKFence& new_fence) {
if (fence) {
return false;
}
fence = &new_fence;
fence->Protect(this);
return true;
}
void VKFenceWatch::OnFenceRemoval(VKFence* signaling_fence) {
ASSERT_MSG(signaling_fence == fence, "Removing the wrong fence");
fence = nullptr;
}
VKFencedPool::VKFencedPool(std::size_t grow_step) : grow_step{grow_step} {}
VKFencedPool::~VKFencedPool() = default;
std::size_t VKFencedPool::CommitResource(VKFence& fence) {
const auto Search = [&](std::size_t begin, std::size_t end) -> std::optional<std::size_t> {
for (std::size_t iterator = begin; iterator < end; ++iterator) {
if (watches[iterator]->TryWatch(fence)) {
// The resource is now being watched, a free resource was successfully found.
return iterator;
}
}
return {};
};
// Try to find a free resource from the hinted position to the end.
auto found = Search(free_iterator, watches.size());
if (!found) {
// Search from beginning to the hinted position.
found = Search(0, free_iterator);
if (!found) {
// Both searches failed, the pool is full; handle it.
const std::size_t free_resource = ManageOverflow();
// Watch will wait for the resource to be free.
watches[free_resource]->Watch(fence);
found = free_resource;
}
}
// Free iterator is hinted to the resource after the one that's been commited.
free_iterator = (*found + 1) % watches.size();
return *found;
}
std::size_t VKFencedPool::ManageOverflow() {
const std::size_t old_capacity = watches.size();
Grow();
// The last entry is guaranted to be free, since it's the first element of the freshly
// allocated resources.
return old_capacity;
}
void VKFencedPool::Grow() {
const std::size_t old_capacity = watches.size();
watches.resize(old_capacity + grow_step);
std::generate(watches.begin() + old_capacity, watches.end(),
[]() { return std::make_unique<VKFenceWatch>(); });
Allocate(old_capacity, old_capacity + grow_step);
}
VKResourceManager::VKResourceManager(const VKDevice& device) : device{device} {
GrowFences(FENCES_GROW_STEP);
command_buffer_pool = std::make_unique<CommandBufferPool>(device);
}
VKResourceManager::~VKResourceManager() = default;
VKFence& VKResourceManager::CommitFence() {
const auto StepFences = [&](bool gpu_wait, bool owner_wait) -> VKFence* {
const auto Tick = [=](auto& fence) { return fence->Tick(gpu_wait, owner_wait); };
const auto hinted = fences.begin() + fences_iterator;
auto it = std::find_if(hinted, fences.end(), Tick);
if (it == fences.end()) {
it = std::find_if(fences.begin(), hinted, Tick);
if (it == hinted) {
return nullptr;
}
}
fences_iterator = std::distance(fences.begin(), it) + 1;
if (fences_iterator >= fences.size())
fences_iterator = 0;
auto& fence = *it;
fence->Commit();
return fence.get();
};
VKFence* found_fence = StepFences(false, false);
if (!found_fence) {
// Try again, this time waiting.
found_fence = StepFences(true, false);
if (!found_fence) {
// Allocate new fences and try again.
LOG_INFO(Render_Vulkan, "Allocating new fences {} -> {}", fences.size(),
fences.size() + FENCES_GROW_STEP);
GrowFences(FENCES_GROW_STEP);
found_fence = StepFences(true, false);
ASSERT(found_fence != nullptr);
}
}
return *found_fence;
}
VkCommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) {
return command_buffer_pool->Commit(fence);
}
void VKResourceManager::GrowFences(std::size_t new_fences_count) {
const std::size_t previous_size = fences.size();
fences.resize(previous_size + new_fences_count);
std::generate(fences.begin() + previous_size, fences.end(),
[this] { return std::make_unique<VKFence>(device); });
}
} // namespace Vulkan

View file

@ -1,196 +0,0 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <cstddef>
#include <memory>
#include <vector>
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
class VKDevice;
class VKFence;
class VKResourceManager;
class CommandBufferPool;
/// Interface for a Vulkan resource
class VKResource {
public:
explicit VKResource();
virtual ~VKResource();
/**
* Signals the object that an owning fence has been signaled.
* @param signaling_fence Fence that signals its usage end.
*/
virtual void OnFenceRemoval(VKFence* signaling_fence) = 0;
};
/**
* Fences take ownership of objects, protecting them from GPU-side or driver-side concurrent access.
* They must be commited from the resource manager. Their usage flow is: commit the fence from the
* resource manager, protect resources with it and use them, send the fence to an execution queue
* and Wait for it if needed and then call Release. Used resources will automatically be signaled
* when they are free to be reused.
* @brief Protects resources for concurrent usage and signals its release.
*/
class VKFence {
friend class VKResourceManager;
public:
explicit VKFence(const VKDevice& device);
~VKFence();
/**
* Waits for the fence to be signaled.
* @warning You must have ownership of the fence and it has to be previously sent to a queue to
* call this function.
*/
void Wait();
/**
* Releases ownership of the fence. Pass after it has been sent to an execution queue.
* Unmanaged usage of the fence after the call will result in undefined behavior because it may
* be being used for something else.
*/
void Release();
/// Protects a resource with this fence.
void Protect(VKResource* resource);
/// Removes protection for a resource.
void Unprotect(VKResource* resource);
/// Redirects one protected resource to a new address.
void RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept;
/// Retreives the fence.
operator VkFence() const {
return *handle;
}
private:
/// Take ownership of the fence.
void Commit();
/**
* Updates the fence status.
* @warning Waiting for the owner might soft lock the execution.
* @param gpu_wait Wait for the fence to be signaled by the driver.
* @param owner_wait Wait for the owner to signal its freedom.
* @returns True if the fence is free. Waiting for gpu and owner will always return true.
*/
bool Tick(bool gpu_wait, bool owner_wait);
const VKDevice& device; ///< Device handler
vk::Fence handle; ///< Vulkan fence
std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence
bool is_owned = false; ///< The fence has been commited but not released yet.
bool is_used = false; ///< The fence has been commited but it has not been checked to be free.
};
/**
* A fence watch is used to keep track of the usage of a fence and protect a resource or set of
* resources without having to inherit VKResource from their handlers.
*/
class VKFenceWatch final : public VKResource {
public:
explicit VKFenceWatch();
VKFenceWatch(VKFence& initial_fence);
VKFenceWatch(VKFenceWatch&&) noexcept;
VKFenceWatch(const VKFenceWatch&) = delete;
~VKFenceWatch() override;
VKFenceWatch& operator=(VKFenceWatch&&) noexcept;
/// Waits for the fence to be released.
void Wait();
/**
* Waits for a previous fence and watches a new one.
* @param new_fence New fence to wait to.
*/
void Watch(VKFence& new_fence);
/**
* Checks if it's currently being watched and starts watching it if it's available.
* @returns True if a watch has started, false if it's being watched.
*/
bool TryWatch(VKFence& new_fence);
void OnFenceRemoval(VKFence* signaling_fence) override;
/**
* Do not use it paired with Watch. Use TryWatch instead.
* Returns true when the watch is free.
*/
bool IsUsed() const {
return fence != nullptr;
}
private:
VKFence* fence{}; ///< Fence watching this resource. nullptr when the watch is free.
};
/**
* Handles a pool of resources protected by fences. Manages resource overflow allocating more
* resources.
*/
class VKFencedPool {
public:
explicit VKFencedPool(std::size_t grow_step);
virtual ~VKFencedPool();
protected:
/**
* Commits a free resource and protects it with a fence. It may allocate new resources.
* @param fence Fence that protects the commited resource.
* @returns Index of the resource commited.
*/
std::size_t CommitResource(VKFence& fence);
/// Called when a chunk of resources have to be allocated.
virtual void Allocate(std::size_t begin, std::size_t end) = 0;
private:
/// Manages pool overflow allocating new resources.
std::size_t ManageOverflow();
/// Allocates a new page of resources.
void Grow();
std::size_t grow_step = 0; ///< Number of new resources created after an overflow
std::size_t free_iterator = 0; ///< Hint to where the next free resources is likely to be found
std::vector<std::unique_ptr<VKFenceWatch>> watches; ///< Set of watched resources
};
/**
* The resource manager handles all resources that can be protected with a fence avoiding
* driver-side or GPU-side concurrent usage. Usage is documented in VKFence.
*/
class VKResourceManager final {
public:
explicit VKResourceManager(const VKDevice& device);
~VKResourceManager();
/// Commits a fence. It has to be sent to a queue and released.
VKFence& CommitFence();
/// Commits an unused command buffer and protects it with a fence.
VkCommandBuffer CommitCommandBuffer(VKFence& fence);
private:
/// Allocates new fences.
void GrowFences(std::size_t new_fences_count);
const VKDevice& device; ///< Device handler.
std::size_t fences_iterator = 0; ///< Index where a free fence is likely to be found.
std::vector<std::unique_ptr<VKFence>> fences; ///< Pool of fences.
std::unique_ptr<CommandBufferPool> command_buffer_pool; ///< Pool of command buffers.
};
} // namespace Vulkan

View file

@ -0,0 +1,63 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <optional>
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/renderer_vulkan/vk_resource_pool.h"
namespace Vulkan {
ResourcePool::ResourcePool(MasterSemaphore& master_semaphore_, size_t grow_step_)
: master_semaphore{master_semaphore_}, grow_step{grow_step_} {}
ResourcePool::~ResourcePool() = default;
size_t ResourcePool::CommitResource() {
// Refresh semaphore to query updated results
master_semaphore.Refresh();
const auto search = [this](size_t begin, size_t end) -> std::optional<size_t> {
for (size_t iterator = begin; iterator < end; ++iterator) {
if (master_semaphore.IsFree(ticks[iterator])) {
ticks[iterator] = master_semaphore.CurrentTick();
return iterator;
}
}
return {};
};
// Try to find a free resource from the hinted position to the end.
auto found = search(free_iterator, ticks.size());
if (!found) {
// Search from beginning to the hinted position.
found = search(0, free_iterator);
if (!found) {
// Both searches failed, the pool is full; handle it.
const size_t free_resource = ManageOverflow();
ticks[free_resource] = master_semaphore.CurrentTick();
found = free_resource;
}
}
// Free iterator is hinted to the resource after the one that's been commited.
free_iterator = (*found + 1) % ticks.size();
return *found;
}
size_t ResourcePool::ManageOverflow() {
const size_t old_capacity = ticks.size();
Grow();
// The last entry is guaranted to be free, since it's the first element of the freshly
// allocated resources.
return old_capacity;
}
void ResourcePool::Grow() {
const size_t old_capacity = ticks.size();
ticks.resize(old_capacity + grow_step);
Allocate(old_capacity, old_capacity + grow_step);
}
} // namespace Vulkan

View file

@ -0,0 +1,43 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include "common/common_types.h"
namespace Vulkan {
class MasterSemaphore;
/**
* Handles a pool of resources protected by fences. Manages resource overflow allocating more
* resources.
*/
class ResourcePool {
public:
explicit ResourcePool(MasterSemaphore& master_semaphore, size_t grow_step);
virtual ~ResourcePool();
protected:
size_t CommitResource();
/// Called when a chunk of resources have to be allocated.
virtual void Allocate(size_t begin, size_t end) = 0;
private:
/// Manages pool overflow allocating new resources.
size_t ManageOverflow();
/// Allocates a new page of resources.
void Grow();
MasterSemaphore& master_semaphore;
size_t grow_step = 0; ///< Number of new resources created after an overflow
size_t free_iterator = 0; ///< Hint to where the next free resources is likely to be found
std::vector<u64> ticks; ///< Ticks for each resource
};
} // namespace Vulkan

View file

@ -10,9 +10,10 @@
#include "common/microprofile.h" #include "common/microprofile.h"
#include "common/thread.h" #include "common/thread.h"
#include "video_core/renderer_vulkan/vk_command_pool.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/renderer_vulkan/vk_query_cache.h" #include "video_core/renderer_vulkan/vk_query_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h" #include "video_core/renderer_vulkan/vk_state_tracker.h"
#include "video_core/renderer_vulkan/wrapper.h" #include "video_core/renderer_vulkan/wrapper.h"
@ -35,10 +36,10 @@ void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
last = nullptr; last = nullptr;
} }
VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_manager, VKScheduler::VKScheduler(const VKDevice& device_, StateTracker& state_tracker_)
StateTracker& state_tracker) : device{device_}, state_tracker{state_tracker_},
: device{device}, resource_manager{resource_manager}, state_tracker{state_tracker}, master_semaphore{std::make_unique<MasterSemaphore>(device)},
next_fence{&resource_manager.CommitFence()} { command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
AcquireNewChunk(); AcquireNewChunk();
AllocateNewContext(); AllocateNewContext();
worker_thread = std::thread(&VKScheduler::WorkerThread, this); worker_thread = std::thread(&VKScheduler::WorkerThread, this);
@ -50,20 +51,27 @@ VKScheduler::~VKScheduler() {
worker_thread.join(); worker_thread.join();
} }
void VKScheduler::Flush(bool release_fence, VkSemaphore semaphore) { u64 VKScheduler::CurrentTick() const noexcept {
return master_semaphore->CurrentTick();
}
bool VKScheduler::IsFree(u64 tick) const noexcept {
return master_semaphore->IsFree(tick);
}
void VKScheduler::Wait(u64 tick) {
master_semaphore->Wait(tick);
}
void VKScheduler::Flush(VkSemaphore semaphore) {
SubmitExecution(semaphore); SubmitExecution(semaphore);
if (release_fence) {
current_fence->Release();
}
AllocateNewContext(); AllocateNewContext();
} }
void VKScheduler::Finish(bool release_fence, VkSemaphore semaphore) { void VKScheduler::Finish(VkSemaphore semaphore) {
const u64 presubmit_tick = CurrentTick();
SubmitExecution(semaphore); SubmitExecution(semaphore);
current_fence->Wait(); Wait(presubmit_tick);
if (release_fence) {
current_fence->Release();
}
AllocateNewContext(); AllocateNewContext();
} }
@ -160,18 +168,38 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
current_cmdbuf.End(); current_cmdbuf.End();
const VkSemaphore timeline_semaphore = master_semaphore->Handle();
const u32 num_signal_semaphores = semaphore ? 2U : 1U;
const u64 signal_value = master_semaphore->CurrentTick();
const u64 wait_value = signal_value - 1;
const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
master_semaphore->NextTick();
const std::array signal_values{signal_value, u64(0)};
const std::array signal_semaphores{timeline_semaphore, semaphore};
const VkTimelineSemaphoreSubmitInfoKHR timeline_si{
.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR,
.pNext = nullptr,
.waitSemaphoreValueCount = 1,
.pWaitSemaphoreValues = &wait_value,
.signalSemaphoreValueCount = num_signal_semaphores,
.pSignalSemaphoreValues = signal_values.data(),
};
const VkSubmitInfo submit_info{ const VkSubmitInfo submit_info{
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr, .pNext = &timeline_si,
.waitSemaphoreCount = 0, .waitSemaphoreCount = 1,
.pWaitSemaphores = nullptr, .pWaitSemaphores = &timeline_semaphore,
.pWaitDstStageMask = nullptr, .pWaitDstStageMask = &wait_stage_mask,
.commandBufferCount = 1, .commandBufferCount = 1,
.pCommandBuffers = current_cmdbuf.address(), .pCommandBuffers = current_cmdbuf.address(),
.signalSemaphoreCount = semaphore ? 1U : 0U, .signalSemaphoreCount = num_signal_semaphores,
.pSignalSemaphores = &semaphore, .pSignalSemaphores = signal_semaphores.data(),
}; };
switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info, *current_fence)) { switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info)) {
case VK_SUCCESS: case VK_SUCCESS:
break; break;
case VK_ERROR_DEVICE_LOST: case VK_ERROR_DEVICE_LOST:
@ -183,14 +211,9 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
} }
void VKScheduler::AllocateNewContext() { void VKScheduler::AllocateNewContext() {
++ticks;
std::unique_lock lock{mutex}; std::unique_lock lock{mutex};
current_fence = next_fence;
next_fence = &resource_manager.CommitFence();
current_cmdbuf = vk::CommandBuffer(resource_manager.CommitCommandBuffer(*current_fence), current_cmdbuf = vk::CommandBuffer(command_pool->Commit(), device.GetDispatchLoader());
device.GetDispatchLoader());
current_cmdbuf.Begin({ current_cmdbuf.Begin({
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr, .pNext = nullptr,

View file

@ -16,42 +16,33 @@
namespace Vulkan { namespace Vulkan {
class CommandPool;
class MasterSemaphore;
class StateTracker; class StateTracker;
class VKDevice; class VKDevice;
class VKFence;
class VKQueryCache; class VKQueryCache;
class VKResourceManager;
class VKFenceView {
public:
VKFenceView() = default;
VKFenceView(VKFence* const& fence) : fence{fence} {}
VKFence* operator->() const noexcept {
return fence;
}
operator VKFence&() const noexcept {
return *fence;
}
private:
VKFence* const& fence;
};
/// The scheduler abstracts command buffer and fence management with an interface that's able to do /// The scheduler abstracts command buffer and fence management with an interface that's able to do
/// OpenGL-like operations on Vulkan command buffers. /// OpenGL-like operations on Vulkan command buffers.
class VKScheduler { class VKScheduler {
public: public:
explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager, explicit VKScheduler(const VKDevice& device, StateTracker& state_tracker);
StateTracker& state_tracker);
~VKScheduler(); ~VKScheduler();
/// Returns the current command buffer tick.
[[nodiscard]] u64 CurrentTick() const noexcept;
/// Returns true when a tick has been triggered by the GPU.
[[nodiscard]] bool IsFree(u64 tick) const noexcept;
/// Waits for the given tick to trigger on the GPU.
void Wait(u64 tick);
/// Sends the current execution context to the GPU. /// Sends the current execution context to the GPU.
void Flush(bool release_fence = true, VkSemaphore semaphore = nullptr); void Flush(VkSemaphore semaphore = nullptr);
/// Sends the current execution context to the GPU and waits for it to complete. /// Sends the current execution context to the GPU and waits for it to complete.
void Finish(bool release_fence = true, VkSemaphore semaphore = nullptr); void Finish(VkSemaphore semaphore = nullptr);
/// Waits for the worker thread to finish executing everything. After this function returns it's /// Waits for the worker thread to finish executing everything. After this function returns it's
/// safe to touch worker resources. /// safe to touch worker resources.
@ -86,14 +77,9 @@ public:
(void)chunk->Record(command); (void)chunk->Record(command);
} }
/// Gets a reference to the current fence. /// Returns the master timeline semaphore.
VKFenceView GetFence() const { [[nodiscard]] MasterSemaphore& GetMasterSemaphore() const noexcept {
return current_fence; return *master_semaphore;
}
/// Returns the current command buffer tick.
u64 Ticks() const {
return ticks;
} }
private: private:
@ -171,6 +157,13 @@ private:
std::array<u8, 0x8000> data{}; std::array<u8, 0x8000> data{};
}; };
struct State {
VkRenderPass renderpass = nullptr;
VkFramebuffer framebuffer = nullptr;
VkExtent2D render_area = {0, 0};
VkPipeline graphics_pipeline = nullptr;
};
void WorkerThread(); void WorkerThread();
void SubmitExecution(VkSemaphore semaphore); void SubmitExecution(VkSemaphore semaphore);
@ -186,30 +179,23 @@ private:
void AcquireNewChunk(); void AcquireNewChunk();
const VKDevice& device; const VKDevice& device;
VKResourceManager& resource_manager;
StateTracker& state_tracker; StateTracker& state_tracker;
std::unique_ptr<MasterSemaphore> master_semaphore;
std::unique_ptr<CommandPool> command_pool;
VKQueryCache* query_cache = nullptr; VKQueryCache* query_cache = nullptr;
vk::CommandBuffer current_cmdbuf; vk::CommandBuffer current_cmdbuf;
VKFence* current_fence = nullptr;
VKFence* next_fence = nullptr;
struct State {
VkRenderPass renderpass = nullptr;
VkFramebuffer framebuffer = nullptr;
VkExtent2D render_area = {0, 0};
VkPipeline graphics_pipeline = nullptr;
} state;
std::unique_ptr<CommandChunk> chunk; std::unique_ptr<CommandChunk> chunk;
std::thread worker_thread; std::thread worker_thread;
State state;
Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_queue; Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_queue;
Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_reserve; Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_reserve;
std::mutex mutex; std::mutex mutex;
std::condition_variable cv; std::condition_variable cv;
std::atomic<u64> ticks = 0;
bool quit = false; bool quit = false;
}; };

View file

@ -10,36 +10,18 @@
#include "common/bit_util.h" #include "common/bit_util.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/wrapper.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence, VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer_)
u64 last_epoch) : buffer{std::move(buffer_)} {}
: buffer{std::move(buffer)}, watch{fence}, last_epoch{last_epoch} {}
VKStagingBufferPool::StagingBuffer::StagingBuffer(StagingBuffer&& rhs) noexcept { VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device_, VKMemoryManager& memory_manager_,
buffer = std::move(rhs.buffer); VKScheduler& scheduler_)
watch = std::move(rhs.watch); : device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_} {}
last_epoch = rhs.last_epoch;
}
VKStagingBufferPool::StagingBuffer::~StagingBuffer() = default;
VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator=(
StagingBuffer&& rhs) noexcept {
buffer = std::move(rhs.buffer);
watch = std::move(rhs.watch);
last_epoch = rhs.last_epoch;
return *this;
}
VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler)
: device{device}, memory_manager{memory_manager}, scheduler{scheduler} {}
VKStagingBufferPool::~VKStagingBufferPool() = default; VKStagingBufferPool::~VKStagingBufferPool() = default;
@ -51,7 +33,6 @@ VKBuffer& VKStagingBufferPool::GetUnusedBuffer(std::size_t size, bool host_visib
} }
void VKStagingBufferPool::TickFrame() { void VKStagingBufferPool::TickFrame() {
++epoch;
current_delete_level = (current_delete_level + 1) % NumLevels; current_delete_level = (current_delete_level + 1) % NumLevels;
ReleaseCache(true); ReleaseCache(true);
@ -59,11 +40,12 @@ void VKStagingBufferPool::TickFrame() {
} }
VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) { VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) {
for (auto& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) { for (StagingBuffer& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) {
if (entry.watch.TryWatch(scheduler.GetFence())) { if (!scheduler.IsFree(entry.tick)) {
entry.last_epoch = epoch; continue;
return &*entry.buffer;
} }
entry.tick = scheduler.CurrentTick();
return &*entry.buffer;
} }
return nullptr; return nullptr;
} }
@ -86,8 +68,10 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
}); });
buffer->commit = memory_manager.Commit(buffer->handle, host_visible); buffer->commit = memory_manager.Commit(buffer->handle, host_visible);
auto& entries = GetCache(host_visible)[log2].entries; std::vector<StagingBuffer>& entries = GetCache(host_visible)[log2].entries;
return *entries.emplace_back(std::move(buffer), scheduler.GetFence(), epoch).buffer; StagingBuffer& entry = entries.emplace_back(std::move(buffer));
entry.tick = scheduler.CurrentTick();
return *entry.buffer;
} }
VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) { VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) {
@ -109,9 +93,8 @@ u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t lo
auto& entries = staging.entries; auto& entries = staging.entries;
const std::size_t old_size = entries.size(); const std::size_t old_size = entries.size();
const auto is_deleteable = [this](const auto& entry) { const auto is_deleteable = [this](const StagingBuffer& entry) {
static constexpr u64 epochs_to_destroy = 180; return scheduler.IsFree(entry.tick);
return entry.last_epoch + epochs_to_destroy < epoch && !entry.watch.IsUsed();
}; };
const std::size_t begin_offset = staging.delete_index; const std::size_t begin_offset = staging.delete_index;
const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size); const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);

View file

@ -10,13 +10,11 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
class VKDevice; class VKDevice;
class VKFenceWatch;
class VKScheduler; class VKScheduler;
struct VKBuffer final { struct VKBuffer final {
@ -36,16 +34,10 @@ public:
private: private:
struct StagingBuffer final { struct StagingBuffer final {
explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence, u64 last_epoch); explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer);
StagingBuffer(StagingBuffer&& rhs) noexcept;
StagingBuffer(const StagingBuffer&) = delete;
~StagingBuffer();
StagingBuffer& operator=(StagingBuffer&& rhs) noexcept;
std::unique_ptr<VKBuffer> buffer; std::unique_ptr<VKBuffer> buffer;
VKFenceWatch watch; u64 tick = 0;
u64 last_epoch = 0;
}; };
struct StagingBuffers final { struct StagingBuffers final {
@ -73,8 +65,6 @@ private:
StagingBuffersCache host_staging_buffers; StagingBuffersCache host_staging_buffers;
StagingBuffersCache device_staging_buffers; StagingBuffersCache device_staging_buffers;
u64 epoch = 0;
std::size_t current_delete_level = 0; std::size_t current_delete_level = 0;
}; };

View file

@ -11,7 +11,6 @@
#include "common/alignment.h" #include "common/alignment.h"
#include "common/assert.h" #include "common/assert.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h" #include "video_core/renderer_vulkan/vk_stream_buffer.h"
#include "video_core/renderer_vulkan/wrapper.h" #include "video_core/renderer_vulkan/wrapper.h"
@ -111,7 +110,7 @@ void VKStreamBuffer::Unmap(u64 size) {
} }
auto& watch = current_watches[current_watch_cursor++]; auto& watch = current_watches[current_watch_cursor++];
watch.upper_bound = offset; watch.upper_bound = offset;
watch.fence.Watch(scheduler.GetFence()); watch.tick = scheduler.CurrentTick();
} }
void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) { void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) {
@ -157,7 +156,7 @@ void VKStreamBuffer::WaitPendingOperations(u64 requested_upper_bound) {
while (requested_upper_bound < wait_bound && wait_cursor < *invalidation_mark) { while (requested_upper_bound < wait_bound && wait_cursor < *invalidation_mark) {
auto& watch = previous_watches[wait_cursor]; auto& watch = previous_watches[wait_cursor];
wait_bound = watch.upper_bound; wait_bound = watch.upper_bound;
watch.fence.Wait(); scheduler.Wait(watch.tick);
++wait_cursor; ++wait_cursor;
} }
} }

View file

@ -14,7 +14,6 @@
namespace Vulkan { namespace Vulkan {
class VKDevice; class VKDevice;
class VKFence;
class VKFenceWatch; class VKFenceWatch;
class VKScheduler; class VKScheduler;
@ -44,8 +43,8 @@ public:
} }
private: private:
struct Watch final { struct Watch {
VKFenceWatch fence; u64 tick{};
u64 upper_bound{}; u64 upper_bound{};
}; };

View file

@ -12,7 +12,7 @@
#include "core/core.h" #include "core/core.h"
#include "core/frontend/framebuffer_layout.h" #include "core/frontend/framebuffer_layout.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_swapchain.h" #include "video_core/renderer_vulkan/vk_swapchain.h"
#include "video_core/renderer_vulkan/wrapper.h" #include "video_core/renderer_vulkan/wrapper.h"
@ -56,8 +56,8 @@ VkExtent2D ChooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities, u32 wi
} // Anonymous namespace } // Anonymous namespace
VKSwapchain::VKSwapchain(VkSurfaceKHR surface, const VKDevice& device) VKSwapchain::VKSwapchain(VkSurfaceKHR surface_, const VKDevice& device_, VKScheduler& scheduler_)
: surface{surface}, device{device} {} : surface{surface_}, device{device_}, scheduler{scheduler_} {}
VKSwapchain::~VKSwapchain() = default; VKSwapchain::~VKSwapchain() = default;
@ -75,21 +75,18 @@ void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
CreateSemaphores(); CreateSemaphores();
CreateImageViews(); CreateImageViews();
fences.resize(image_count, nullptr); resource_ticks.clear();
resource_ticks.resize(image_count);
} }
void VKSwapchain::AcquireNextImage() { void VKSwapchain::AcquireNextImage() {
device.GetLogical().AcquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(), device.GetLogical().AcquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
*present_semaphores[frame_index], {}, &image_index); *present_semaphores[frame_index], {}, &image_index);
if (auto& fence = fences[image_index]; fence) { scheduler.Wait(resource_ticks[image_index]);
fence->Wait();
fence->Release();
fence = nullptr;
}
} }
bool VKSwapchain::Present(VkSemaphore render_semaphore, VKFence& fence) { bool VKSwapchain::Present(VkSemaphore render_semaphore) {
const VkSemaphore present_semaphore{*present_semaphores[frame_index]}; const VkSemaphore present_semaphore{*present_semaphores[frame_index]};
const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore}; const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore};
const auto present_queue{device.GetPresentQueue()}; const auto present_queue{device.GetPresentQueue()};
@ -123,8 +120,7 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore, VKFence& fence) {
break; break;
} }
ASSERT(fences[image_index] == nullptr); resource_ticks[image_index] = scheduler.CurrentTick();
fences[image_index] = &fence;
frame_index = (frame_index + 1) % static_cast<u32>(image_count); frame_index = (frame_index + 1) % static_cast<u32>(image_count);
return recreated; return recreated;
} }

View file

@ -16,11 +16,11 @@ struct FramebufferLayout;
namespace Vulkan { namespace Vulkan {
class VKDevice; class VKDevice;
class VKFence; class VKScheduler;
class VKSwapchain { class VKSwapchain {
public: public:
explicit VKSwapchain(VkSurfaceKHR surface, const VKDevice& device); explicit VKSwapchain(VkSurfaceKHR surface, const VKDevice& device, VKScheduler& scheduler);
~VKSwapchain(); ~VKSwapchain();
/// Creates (or recreates) the swapchain with a given size. /// Creates (or recreates) the swapchain with a given size.
@ -31,7 +31,7 @@ public:
/// Presents the rendered image to the swapchain. Returns true when the swapchains had to be /// Presents the rendered image to the swapchain. Returns true when the swapchains had to be
/// recreated. Takes responsability for the ownership of fence. /// recreated. Takes responsability for the ownership of fence.
bool Present(VkSemaphore render_semaphore, VKFence& fence); bool Present(VkSemaphore render_semaphore);
/// Returns true when the framebuffer layout has changed. /// Returns true when the framebuffer layout has changed.
bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const; bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const;
@ -74,6 +74,7 @@ private:
const VkSurfaceKHR surface; const VkSurfaceKHR surface;
const VKDevice& device; const VKDevice& device;
VKScheduler& scheduler;
vk::SwapchainKHR swapchain; vk::SwapchainKHR swapchain;
@ -81,7 +82,7 @@ private:
std::vector<VkImage> images; std::vector<VkImage> images;
std::vector<vk::ImageView> image_views; std::vector<vk::ImageView> image_views;
std::vector<vk::Framebuffer> framebuffers; std::vector<vk::Framebuffer> framebuffers;
std::vector<VKFence*> fences; std::vector<u64> resource_ticks;
std::vector<vk::Semaphore> present_semaphores; std::vector<vk::Semaphore> present_semaphores;
u32 image_index{}; u32 image_index{};

View file

@ -188,13 +188,11 @@ u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source, Tegra::Texture::Swizzl
} // Anonymous namespace } // Anonymous namespace
CachedSurface::CachedSurface(const VKDevice& device, VKResourceManager& resource_manager, CachedSurface::CachedSurface(const VKDevice& device, VKMemoryManager& memory_manager,
VKMemoryManager& memory_manager, VKScheduler& scheduler, VKScheduler& scheduler, VKStagingBufferPool& staging_pool,
VKStagingBufferPool& staging_pool, GPUVAddr gpu_addr, GPUVAddr gpu_addr, const SurfaceParams& params)
const SurfaceParams& params)
: SurfaceBase<View>{gpu_addr, params, device.IsOptimalAstcSupported()}, device{device}, : SurfaceBase<View>{gpu_addr, params, device.IsOptimalAstcSupported()}, device{device},
resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler}, memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} {
staging_pool{staging_pool} {
if (params.IsBuffer()) { if (params.IsBuffer()) {
buffer = CreateBuffer(device, params, host_memory_size); buffer = CreateBuffer(device, params, host_memory_size);
commit = memory_manager.Commit(buffer, false); commit = memory_manager.Commit(buffer, false);
@ -493,18 +491,17 @@ VkImageView CachedSurfaceView::GetAttachment() {
VKTextureCache::VKTextureCache(VideoCore::RasterizerInterface& rasterizer, VKTextureCache::VKTextureCache(VideoCore::RasterizerInterface& rasterizer,
Tegra::Engines::Maxwell3D& maxwell3d, Tegra::Engines::Maxwell3D& maxwell3d,
Tegra::MemoryManager& gpu_memory, const VKDevice& device_, Tegra::MemoryManager& gpu_memory, const VKDevice& device_,
VKResourceManager& resource_manager_,
VKMemoryManager& memory_manager_, VKScheduler& scheduler_, VKMemoryManager& memory_manager_, VKScheduler& scheduler_,
VKStagingBufferPool& staging_pool_) VKStagingBufferPool& staging_pool_)
: TextureCache(rasterizer, maxwell3d, gpu_memory, device_.IsOptimalAstcSupported()), : TextureCache(rasterizer, maxwell3d, gpu_memory, device_.IsOptimalAstcSupported()),
device{device_}, resource_manager{resource_manager_}, device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{
memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{staging_pool_} {} staging_pool_} {}
VKTextureCache::~VKTextureCache() = default; VKTextureCache::~VKTextureCache() = default;
Surface VKTextureCache::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) { Surface VKTextureCache::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) {
return std::make_shared<CachedSurface>(device, resource_manager, memory_manager, scheduler, return std::make_shared<CachedSurface>(device, memory_manager, scheduler, staging_pool,
staging_pool, gpu_addr, params); gpu_addr, params);
} }
void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface, void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface,

View file

@ -23,7 +23,6 @@ namespace Vulkan {
class RasterizerVulkan; class RasterizerVulkan;
class VKDevice; class VKDevice;
class VKResourceManager;
class VKScheduler; class VKScheduler;
class VKStagingBufferPool; class VKStagingBufferPool;
@ -41,10 +40,9 @@ class CachedSurface final : public VideoCommon::SurfaceBase<View> {
friend CachedSurfaceView; friend CachedSurfaceView;
public: public:
explicit CachedSurface(const VKDevice& device, VKResourceManager& resource_manager, explicit CachedSurface(const VKDevice& device, VKMemoryManager& memory_manager,
VKMemoryManager& memory_manager, VKScheduler& scheduler, VKScheduler& scheduler, VKStagingBufferPool& staging_pool,
VKStagingBufferPool& staging_pool, GPUVAddr gpu_addr, GPUVAddr gpu_addr, const SurfaceParams& params);
const SurfaceParams& params);
~CachedSurface(); ~CachedSurface();
void UploadTexture(const std::vector<u8>& staging_buffer) override; void UploadTexture(const std::vector<u8>& staging_buffer) override;
@ -98,7 +96,6 @@ private:
VkImageSubresourceRange GetImageSubresourceRange() const; VkImageSubresourceRange GetImageSubresourceRange() const;
const VKDevice& device; const VKDevice& device;
VKResourceManager& resource_manager;
VKMemoryManager& memory_manager; VKMemoryManager& memory_manager;
VKScheduler& scheduler; VKScheduler& scheduler;
VKStagingBufferPool& staging_pool; VKStagingBufferPool& staging_pool;
@ -198,9 +195,8 @@ class VKTextureCache final : public TextureCacheBase {
public: public:
explicit VKTextureCache(VideoCore::RasterizerInterface& rasterizer, explicit VKTextureCache(VideoCore::RasterizerInterface& rasterizer,
Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
const VKDevice& device, VKResourceManager& resource_manager, const VKDevice& device, VKMemoryManager& memory_manager,
VKMemoryManager& memory_manager, VKScheduler& scheduler, VKScheduler& scheduler, VKStagingBufferPool& staging_pool);
VKStagingBufferPool& staging_pool);
~VKTextureCache(); ~VKTextureCache();
private: private:
@ -215,7 +211,6 @@ private:
void BufferCopy(Surface& src_surface, Surface& dst_surface) override; void BufferCopy(Surface& src_surface, Surface& dst_surface) override;
const VKDevice& device; const VKDevice& device;
VKResourceManager& resource_manager;
VKMemoryManager& memory_manager; VKMemoryManager& memory_manager;
VKScheduler& scheduler; VKScheduler& scheduler;
VKStagingBufferPool& staging_pool; VKStagingBufferPool& staging_pool;

View file

@ -148,6 +148,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkGetFenceStatus); X(vkGetFenceStatus);
X(vkGetImageMemoryRequirements); X(vkGetImageMemoryRequirements);
X(vkGetQueryPoolResults); X(vkGetQueryPoolResults);
X(vkGetSemaphoreCounterValueKHR);
X(vkMapMemory); X(vkMapMemory);
X(vkQueueSubmit); X(vkQueueSubmit);
X(vkResetFences); X(vkResetFences);
@ -156,6 +157,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkUpdateDescriptorSetWithTemplateKHR); X(vkUpdateDescriptorSetWithTemplateKHR);
X(vkUpdateDescriptorSets); X(vkUpdateDescriptorSets);
X(vkWaitForFences); X(vkWaitForFences);
X(vkWaitSemaphoresKHR);
#undef X #undef X
} }
@ -574,7 +576,10 @@ Semaphore Device::CreateSemaphore() const {
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
}; };
return CreateSemaphore(ci);
}
Semaphore Device::CreateSemaphore(const VkSemaphoreCreateInfo& ci) const {
VkSemaphore object; VkSemaphore object;
Check(dld->vkCreateSemaphore(handle, &ci, nullptr, &object)); Check(dld->vkCreateSemaphore(handle, &ci, nullptr, &object));
return Semaphore(object, handle, *dld); return Semaphore(object, handle, *dld);
@ -660,7 +665,7 @@ ShaderModule Device::CreateShaderModule(const VkShaderModuleCreateInfo& ci) cons
return ShaderModule(object, handle, *dld); return ShaderModule(object, handle, *dld);
} }
Event Device::CreateNewEvent() const { Event Device::CreateEvent() const {
static constexpr VkEventCreateInfo ci{ static constexpr VkEventCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,

View file

@ -267,6 +267,7 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkGetFenceStatus vkGetFenceStatus; PFN_vkGetFenceStatus vkGetFenceStatus;
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
PFN_vkGetQueryPoolResults vkGetQueryPoolResults; PFN_vkGetQueryPoolResults vkGetQueryPoolResults;
PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;
PFN_vkMapMemory vkMapMemory; PFN_vkMapMemory vkMapMemory;
PFN_vkQueueSubmit vkQueueSubmit; PFN_vkQueueSubmit vkQueueSubmit;
PFN_vkResetFences vkResetFences; PFN_vkResetFences vkResetFences;
@ -275,6 +276,7 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR; PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;
PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;
PFN_vkWaitForFences vkWaitForFences; PFN_vkWaitForFences vkWaitForFences;
PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;
}; };
/// Loads instance agnostic function pointers. /// Loads instance agnostic function pointers.
@ -550,7 +552,6 @@ using PipelineLayout = Handle<VkPipelineLayout, VkDevice, DeviceDispatch>;
using QueryPool = Handle<VkQueryPool, VkDevice, DeviceDispatch>; using QueryPool = Handle<VkQueryPool, VkDevice, DeviceDispatch>;
using RenderPass = Handle<VkRenderPass, VkDevice, DeviceDispatch>; using RenderPass = Handle<VkRenderPass, VkDevice, DeviceDispatch>;
using Sampler = Handle<VkSampler, VkDevice, DeviceDispatch>; using Sampler = Handle<VkSampler, VkDevice, DeviceDispatch>;
using Semaphore = Handle<VkSemaphore, VkDevice, DeviceDispatch>;
using ShaderModule = Handle<VkShaderModule, VkDevice, DeviceDispatch>; using ShaderModule = Handle<VkShaderModule, VkDevice, DeviceDispatch>;
using SurfaceKHR = Handle<VkSurfaceKHR, VkInstance, InstanceDispatch>; using SurfaceKHR = Handle<VkSurfaceKHR, VkInstance, InstanceDispatch>;
@ -582,7 +583,8 @@ public:
/// Construct a queue handle. /// Construct a queue handle.
constexpr Queue(VkQueue queue, const DeviceDispatch& dld) noexcept : queue{queue}, dld{&dld} {} constexpr Queue(VkQueue queue, const DeviceDispatch& dld) noexcept : queue{queue}, dld{&dld} {}
VkResult Submit(Span<VkSubmitInfo> submit_infos, VkFence fence) const noexcept { VkResult Submit(Span<VkSubmitInfo> submit_infos,
VkFence fence = VK_NULL_HANDLE) const noexcept {
return dld->vkQueueSubmit(queue, submit_infos.size(), submit_infos.data(), fence); return dld->vkQueueSubmit(queue, submit_infos.size(), submit_infos.data(), fence);
} }
@ -674,6 +676,44 @@ public:
} }
}; };
class Semaphore : public Handle<VkSemaphore, VkDevice, DeviceDispatch> {
using Handle<VkSemaphore, VkDevice, DeviceDispatch>::Handle;
public:
[[nodiscard]] u64 GetCounter() const {
u64 value;
Check(dld->vkGetSemaphoreCounterValueKHR(owner, handle, &value));
return value;
}
/**
* Waits for a timeline semaphore on the host.
*
* @param value Value to wait
* @param timeout Time in nanoseconds to timeout
* @return True on successful wait, false on timeout
*/
bool Wait(u64 value, u64 timeout = std::numeric_limits<u64>::max()) const {
const VkSemaphoreWaitInfoKHR wait_info{
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR,
.pNext = nullptr,
.flags = 0,
.semaphoreCount = 1,
.pSemaphores = &handle,
.pValues = &value,
};
const VkResult result = dld->vkWaitSemaphoresKHR(owner, &wait_info, timeout);
switch (result) {
case VK_SUCCESS:
return true;
case VK_TIMEOUT:
return false;
default:
throw Exception(result);
}
}
};
class Device : public Handle<VkDevice, NoOwner, DeviceDispatch> { class Device : public Handle<VkDevice, NoOwner, DeviceDispatch> {
using Handle<VkDevice, NoOwner, DeviceDispatch>::Handle; using Handle<VkDevice, NoOwner, DeviceDispatch>::Handle;
@ -694,6 +734,8 @@ public:
Semaphore CreateSemaphore() const; Semaphore CreateSemaphore() const;
Semaphore CreateSemaphore(const VkSemaphoreCreateInfo& ci) const;
Fence CreateFence(const VkFenceCreateInfo& ci) const; Fence CreateFence(const VkFenceCreateInfo& ci) const;
DescriptorPool CreateDescriptorPool(const VkDescriptorPoolCreateInfo& ci) const; DescriptorPool CreateDescriptorPool(const VkDescriptorPoolCreateInfo& ci) const;
@ -721,7 +763,7 @@ public:
ShaderModule CreateShaderModule(const VkShaderModuleCreateInfo& ci) const; ShaderModule CreateShaderModule(const VkShaderModuleCreateInfo& ci) const;
Event CreateNewEvent() const; Event CreateEvent() const;
SwapchainKHR CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const; SwapchainKHR CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const;

View file

@ -9,6 +9,17 @@
#include <shared_mutex> #include <shared_mutex>
#include <thread> #include <thread>
// This header includes both Vulkan and OpenGL headers, this has to be fixed
// Unfortunately, including OpenGL will include Windows.h that defines macros that can cause issues.
// Forcefully include glad early and undefine macros
#include <glad/glad.h>
#ifdef CreateEvent
#undef CreateEvent
#endif
#ifdef CreateSemaphore
#undef CreateSemaphore
#endif
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_opengl/gl_device.h" #include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_resource_manager.h"