skyline/app/src/main/cpp/skyline/gpu/fence_cycle.h
PixelyIon 216e5cee81 Separate Guest and Host Presentation + AChoreographer V-Sync Event
We had issues when combining host and guest presentation since certain configurations in guest presentation such as double buffering were very unoptimal for the host and would significantly affect the FPS. As a result of this, we've now made host presentation have its own presentation textures which are copied into from the guest at presentation time, allowing us to change parameters of the host presentation independently of the guest.

We've implemented the infrastructure for this which includes being able to create images from host GPU memory using VMA, an optimized linear texture sync and a method to do on-GPU texture-to-texture copies.

We've also moved to driving the V-Sync event using AChoreographer on its on thread in this PR, which more accurately encapsulates HOS behavior and allows games such as ARMS to boot as they depend on the V-Sync event being signalled even when the game isn't presenting.
2021-07-12 21:27:49 +05:30

150 lines
6.0 KiB
C++

// SPDX-License-Identifier: MPL-2.0
// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/)
#pragma once
#include <forward_list>
#include <vulkan/vulkan_raii.hpp>
#include <common.h>
namespace skyline::gpu {
struct FenceCycle;
/**
* @brief Any object whose lifetime can be attached to a fence cycle needs to inherit this class
*/
struct FenceCycleDependency {
private:
std::shared_ptr<FenceCycleDependency> next{}; //!< A shared pointer to the next dependendency to form a linked list
friend FenceCycle;
};
/**
* @brief A wrapper around a Vulkan Fence which only tracks a single reset -> signal cycle with the ability to attach lifetimes of objects to it
* @note This provides the guarantee that the fence must be signalled prior to destruction when objects are to be destroyed
* @note All waits to the fence **must** be done through the same instance of this, the state of the fence changing externally will lead to UB
*/
struct FenceCycle {
private:
std::atomic_flag signalled;
const vk::raii::Device &device;
vk::Fence fence;
std::shared_ptr<FenceCycleDependency> list;
/**
* @brief Sequentially iterate through the shared_ptr linked list of dependencies and reset all pointers in a thread-safe atomic manner
* @note We cannot simply nullify the base pointer of the list as a false dependency chain is maintained between the objects when retained exteranlly
*/
void DestroyDependencies() {
auto current{std::atomic_exchange_explicit(&list, std::shared_ptr<FenceCycleDependency>{}, std::memory_order_acquire)};
while (current) {
std::shared_ptr<FenceCycleDependency> next{};
next.swap(current->next);
current.swap(next);
}
}
public:
FenceCycle(const vk::raii::Device &device, vk::Fence fence) : signalled(false), device(device), fence(fence) {
device.resetFences(fence);
}
~FenceCycle() {
Wait();
}
/**
* @brief Wait on a fence cycle till it has been signalled
*/
void Wait() {
if (signalled.test(std::memory_order_consume))
return;
while (device.waitForFences(fence, false, std::numeric_limits<u64>::max()) != vk::Result::eSuccess);
if (signalled.test_and_set(std::memory_order_release))
DestroyDependencies();
}
/**
* @brief Wait on a fence cycle with a timeout in nanoseconds
* @return If the wait was successful or timed out
*/
bool Wait(std::chrono::duration<u64, std::nano> timeout) {
if (signalled.test(std::memory_order_consume))
return true;
if (device.waitForFences(fence, false, timeout.count()) == vk::Result::eSuccess) {
if (signalled.test_and_set(std::memory_order_release))
DestroyDependencies();
return true;
} else {
return false;
}
}
/**
* @return If the fence is signalled currently or not
*/
bool Poll() {
if (signalled.test(std::memory_order_consume))
return true;
if ((*device).getFenceStatus(fence, *device.getDispatcher()) == vk::Result::eSuccess) {
if (signalled.test_and_set(std::memory_order_release))
DestroyDependencies();
return true;
} else {
return false;
}
}
/**
* @brief Attach the lifetime of an object to the fence being signalled
*/
void AttachObject(const std::shared_ptr<FenceCycleDependency> &dependency) {
if (!signalled.test(std::memory_order_consume)) {
std::shared_ptr<FenceCycleDependency> next{std::atomic_load_explicit(&list, std::memory_order_consume)};
do {
dependency->next = next;
if (!next && signalled.test(std::memory_order_consume))
return;
} while (std::atomic_compare_exchange_strong_explicit(&list, &next, dependency, std::memory_order_release, std::memory_order_consume));
}
}
/**
* @brief A version of AttachObject optimized for several objects being attached at once
*/
void AttachObjects(std::initializer_list<std::shared_ptr<FenceCycleDependency>> dependencies) {
if (!signalled.test(std::memory_order_consume)) {
{
auto it{dependencies.begin()};
while (it != dependencies.end()) {
auto next{std::next(it)};
(*it)->next = *next;
it = next;
}
}
const auto &first{*dependencies.begin()};
const auto &last{*dependencies.end()};
std::shared_ptr<FenceCycleDependency> next{std::atomic_load_explicit(&list, std::memory_order_consume)};
do {
last->next = next;
if (!next && signalled.test(std::memory_order_consume)) {
std::shared_ptr<FenceCycleDependency> current{first};
while (current) {
next.swap(first->next);
current.swap(next);
next.reset();
}
return;
}
} while (std::atomic_compare_exchange_strong(&list, &next, first));
}
}
template<typename... Dependencies>
void AttachObjects(Dependencies... dependencies) {
AttachObjects(std::initializer_list<std::shared_ptr<FenceCycleDependency>>{std::forward<Dependencies>(dependencies)...});
}
};
}