Revamp Host1X Syncpoint + Address Review Comments

This commit is contained in:
PixelyIon 2021-06-19 01:18:29 +05:30 committed by ◱ Mark
parent 216e5cee81
commit b2bb855a02
13 changed files with 117 additions and 99 deletions

View File

@ -42,7 +42,7 @@ namespace skyline::gpu {
void PresentationEngine::ChoreographerThread() {
choreographerLooper = ALooper_prepare(0);
AChoreographer_postFrameCallback(AChoreographer_getInstance(), reinterpret_cast<AChoreographer_frameCallback>(&ChoreographerCallback), vsyncEvent.get());
ALooper_pollAll(-1, nullptr, nullptr, nullptr);
ALooper_pollAll(-1, nullptr, nullptr, nullptr); // Will block and process callbacks till ALooper_wake() is called
}
service::hosbinder::NativeWindowTransform GetAndroidTransform(vk::SurfaceTransformFlagBitsKHR transform) {
@ -70,8 +70,8 @@ namespace skyline::gpu {
void PresentationEngine::UpdateSwapchain(texture::Format format, texture::Dimensions extent) {
auto minImageCount{std::max(vkSurfaceCapabilities.minImageCount, state.settings->forceTripleBuffering ? 3U : 0U)};
if (minImageCount > MaxSlotCount)
throw exception("Requesting swapchain with higher image count ({}) than maximum slot count ({})", minImageCount, MaxSlotCount);
if (minImageCount > MaxSwapchainImageCount)
throw exception("Requesting swapchain with higher image count ({}) than maximum slot count ({})", minImageCount, MaxSwapchainImageCount);
const auto &capabilities{vkSurfaceCapabilities};
if (minImageCount < capabilities.minImageCount || (capabilities.maxImageCount && minImageCount > capabilities.maxImageCount))
@ -104,16 +104,17 @@ namespace skyline::gpu {
});
auto vkImages{vkSwapchain->getImages()};
if (vkImages.size() > MaxSlotCount)
throw exception("Swapchain has higher image count ({}) than maximum slot count ({})", minImageCount, MaxSlotCount);
if (vkImages.size() > MaxSwapchainImageCount)
throw exception("Swapchain has higher image count ({}) than maximum slot count ({})", minImageCount, MaxSwapchainImageCount);
for (size_t index{}; index < vkImages.size(); index++) {
auto &slot{slots[index]};
auto &slot{images[index]};
slot = std::make_shared<Texture>(*state.gpu, vkImages[index], extent, format::GetFormat(format), vk::ImageLayout::eUndefined, vk::ImageTiling::eOptimal);
slot->TransitionLayout(vk::ImageLayout::ePresentSrcKHR);
}
for (size_t index{vkImages.size()}; index < MaxSlotCount; index++)
slots[index] = {};
for (size_t index{vkImages.size()}; index < MaxSwapchainImageCount; index++)
// We need to clear all the slots which aren't filled, keeping around stale slots could lead to issues
images[index] = {};
swapchainFormat = format;
swapchainExtent = extent;
@ -157,14 +158,15 @@ namespace skyline::gpu {
UpdateSwapchain(texture->format, texture->dimensions);
std::pair<vk::Result, u32> nextImage;
while ((nextImage = vkSwapchain->acquireNextImage(std::numeric_limits<u64>::max(), {}, *acquireFence)).first != vk::Result::eSuccess) [[unlikely]]
while (nextImage = vkSwapchain->acquireNextImage(std::numeric_limits<u64>::max(), {}, *acquireFence), nextImage.first != vk::Result::eSuccess) [[unlikely]] {
if (nextImage.first == vk::Result::eSuboptimalKHR)
surfaceCondition.wait(lock, [this]() { return vkSurface.has_value(); });
else
throw exception("vkAcquireNextImageKHR returned an unhandled result '{}'", vk::to_string(nextImage.first));
while (gpu.vkDevice.waitForFences(*acquireFence, true, std::numeric_limits<u64>::max()) == vk::Result::eTimeout);
}
slots.at(nextImage.second)->CopyFrom(texture);
static_cast<void>(gpu.vkDevice.waitForFences(*acquireFence, true, std::numeric_limits<u64>::max()));
images.at(nextImage.second)->CopyFrom(texture);
{
std::lock_guard queueLock(gpu.queueMutex);

View File

@ -32,8 +32,8 @@ namespace skyline::gpu {
texture::Format swapchainFormat{}; //!< The image format of the textures in the current swapchain
texture::Dimensions swapchainExtent{}; //!< The extent of images in the current swapchain
static constexpr size_t MaxSlotCount{6}; //!< The maximum amount of queue slots, this affects the amount of images that can be in the swapchain
std::array<std::shared_ptr<Texture>, MaxSlotCount> slots; //!< The backing for storing all slots and sorted in the same order as supplied by the Vulkan swapchain
static constexpr size_t MaxSwapchainImageCount{6}; //!< The maximum amount of swapchain textures, this affects the amount of images that can be in the swapchain
std::array<std::shared_ptr<Texture>, MaxSwapchainImageCount> images; //!< All the swapchain textures in the same order as supplied by the host swapchain
u64 frameTimestamp{}; //!< The timestamp of the last frame being shown
perfetto::Track presentationTrack; //!< Perfetto track used for presentation events

View File

@ -12,7 +12,7 @@ namespace skyline::gpu::format {
constexpr Format RGB565Unorm{sizeof(u8) * 2, 1, 1, vk::Format::eR5G6B5UnormPack16}; //!< Red channel: 5-bit, Green channel: 6-bit, Blue channel: 5-bit
/**
* @brief Converts a format from Vulkan to a Skyline format
* @brief Converts a Vulkan format to a Skyline format
*/
constexpr const Format &GetFormat(vk::Format format) {
switch (format) {

View File

@ -140,12 +140,14 @@ namespace skyline::gpu {
u8 *bufferData;
auto stagingBuffer{[&]() -> std::shared_ptr<memory::StagingBuffer> {
if (tiling == vk::ImageTiling::eOptimal || !std::holds_alternative<memory::Image>(backing)) {
// We need a staging buffer for all optimal copies (Since we aren't aware of the host optimal layout) and linear textures which we cannot map on the CPU since we do not have access to their backing VkDeviceMemory
auto stagingBuffer{gpu.memory.AllocateStagingBuffer(size)};
bufferData = stagingBuffer->data();
return stagingBuffer;
} else if (tiling == vk::ImageTiling::eLinear) {
// We can optimize linear texture sync on a UMA by mapping the texture onto the CPU and copying directly into it rather than a staging buffer
bufferData = std::get<memory::Image>(backing).data();
WaitOnFence();
WaitOnFence(); // We need to wait on fence here since we are mutating the texture directly after, the wait can be deferred till the copy when a staging buffer is used
return nullptr;
} else {
throw exception("Guest -> Host synchronization of images tiled as '{}' isn't implemented", vk::to_string(tiling));

View File

@ -50,7 +50,6 @@ namespace skyline::service::hosbinder {
std::lock_guard guard(mutex);
auto buffer{queue.end()};
while (true) {
size_t dequeuedSlotCount{};
for (auto it{queue.begin()}; it != queue.end(); it++) {
// We want to select the oldest slot that's free to use as we'd want all slots to be used
@ -58,20 +57,25 @@ namespace skyline::service::hosbinder {
if (it->state == BufferState::Free && it->texture) {
if (buffer == queue.end() || it->frameNumber < buffer->frameNumber)
buffer = it;
else if (it->state == BufferState::Dequeued)
} else if (it->state == BufferState::Dequeued) {
dequeuedSlotCount++;
}
}
if (buffer != queue.end()) {
slot = std::distance(queue.begin(), buffer);
break;
} else if (async) {
return AndroidStatus::WouldBlock;
} else if (dequeuedSlotCount == queue.size()) {
state.logger->Warn("Client attempting to dequeue more buffers when all buffers are dequeued by the client: {}", dequeuedSlotCount);
return AndroidStatus::InvalidOperation;
}
} else {
size_t index{};
std::string bufferString;
for (auto& bufferSlot : queue)
bufferString += util::Format("\n#{} - State: {}, Has Graphic Buffer: {}, Frame Number: {}", ++index, ToString(bufferSlot.state), static_cast<bool>(bufferSlot.graphicBuffer), bufferSlot.frameNumber);
state.logger->Warn("Cannot find any free buffers to dequeue:{}", bufferString);
return AndroidStatus::InvalidOperation;
}
width = width ? width : defaultWidth;
@ -392,7 +396,7 @@ namespace skyline::service::hosbinder {
buffer.frameNumber = 0;
buffer.wasBufferRequested = false;
buffer.graphicBuffer = std::make_unique<GraphicBuffer>(graphicBuffer);
buffer.texture = texture->CreateTexture({}, vk::ImageTiling::eLinear, vk::ImageLayout::eGeneral);
buffer.texture = texture->CreateTexture({}, vk::ImageTiling::eLinear);
activeSlotCount = hasBufferCount = std::count_if(queue.begin(), queue.end(), [](const BufferSlot &slot) { return static_cast<bool>(slot.graphicBuffer); });

View File

@ -55,6 +55,7 @@ namespace skyline::service::hosbinder {
/**
* @url https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/ui/Fence.cpp;l=34-36
* @note Only initializing the first fence is intentional and matches Nvidia's AndroidFence implementation
*/
AndroidFence() : fenceCount(0), fences({InvalidFenceId}) {}

View File

@ -49,7 +49,8 @@ namespace skyline::service::nvdrv::device {
void SyncpointEvent::Cancel(soc::host1x::Host1X &host1x) {
std::lock_guard lock(mutex);
host1x.syncpoints.at(fence.id).DeregisterWaiter(waiterId);
host1x.syncpoints.at(fence.id).DeregisterWaiter(waiterHandle);
waiterHandle = {};
Signal();
event->ResetSignal();
}
@ -59,7 +60,7 @@ namespace skyline::service::nvdrv::device {
fence = pFence;
state = State::Waiting;
waiterId = host1x.syncpoints.at(fence.id).RegisterWaiter(fence.value, [this] { Signal(); });
waiterHandle = host1x.syncpoints.at(fence.id).RegisterWaiter(fence.value, [this] { Signal(); });
}
NvHostCtrl::NvHostCtrl(const DeviceState &state) : NvDevice(state) {}

View File

@ -17,7 +17,7 @@ namespace skyline {
*/
class SyncpointEvent {
private:
u64 waiterId{};
soc::host1x::Syncpoint::WaiterHandle waiterHandle{};
void Signal();

View File

@ -71,7 +71,7 @@ namespace skyline::service::nvdrv {
if (!syncpoints.at(id).reserved)
throw exception("Cannot update an unreserved syncpoint!");
syncpoints.at(id).counterMin = state.soc->host1x.syncpoints.at(id).value.load();
syncpoints.at(id).counterMin = state.soc->host1x.syncpoints.at(id).Load();
return syncpoints.at(id).counterMin;
}
}

View File

@ -5,62 +5,60 @@
#include "syncpoint.h"
namespace skyline::soc::host1x {
u64 Syncpoint::RegisterWaiter(u32 threshold, const std::function<void()> &callback) {
if (value >= threshold) {
Syncpoint::WaiterHandle Syncpoint::RegisterWaiter(u32 threshold, const std::function<void()> &callback) {
if (value.load(std::memory_order_acquire) >= threshold) {
// (Fast path) We don't need to wait on the mutex and can just get away with atomics
callback();
return 0;
return {};
}
std::lock_guard guard(waiterLock);
waiterMap.emplace(nextWaiterId, Waiter{threshold, callback});
return nextWaiterId++;
std::scoped_lock lock(mutex);
if (value.load(std::memory_order_acquire) >= threshold) {
callback();
return {};
}
void Syncpoint::DeregisterWaiter(u64 id) {
std::lock_guard guard(waiterLock);
waiterMap.erase(id);
auto it{waiters.begin()};
while (it != waiters.end() && threshold >= it->threshold)
it++;
return waiters.emplace(it, threshold, callback);
}
void Syncpoint::DeregisterWaiter(WaiterHandle waiter) {
std::scoped_lock lock(mutex);
// We want to ensure the iterator still exists prior to erasing it
// Otherwise, if an invalid iterator was passed in then it could lead to UB
// It is important to avoid UB in that case since the deregister isn't called from a locked context
for (auto it{waiters.begin()}; it != waiters.end(); it++)
if (it == waiter)
waiters.erase(it);
}
u32 Syncpoint::Increment() {
value++;
auto readValue{value.fetch_add(1, std::memory_order_acq_rel)}; // We don't want to constantly do redundant atomic loads
std::lock_guard guard(waiterLock);
std::erase_if(waiterMap, [this](const auto &entry) {
if (value >= entry.second.threshold) {
entry.second.callback();
return true;
} else {
return false;
}
});
std::lock_guard lock(mutex);
auto it{waiters.begin()};
while (it != waiters.end() && readValue >= it->threshold)
it++->callback();
waiters.erase(waiters.begin(), it);
return value;
incrementCondition.notify_all();
return readValue;
}
bool Syncpoint::Wait(u32 threshold, std::chrono::steady_clock::duration timeout) {
if (value >= threshold)
return true;
if (value.load(std::memory_order_acquire) >= threshold)
// (Fast Path) We don't need to wait on the mutex and can just get away with atomics
return {};
std::mutex mtx;
std::condition_variable cv;
bool flag{};
if (!RegisterWaiter(threshold, [&cv, &mtx, &flag] {
std::unique_lock lock(mtx);
flag = true;
lock.unlock();
cv.notify_all();
})) {
return true;
}
std::unique_lock lock(mtx);
std::unique_lock lock(mutex);
if (timeout == std::chrono::steady_clock::duration::max()) {
cv.wait(lock, [&flag] { return flag; });
incrementCondition.wait(lock, [&] { return value.load(std::memory_order_relaxed) >= threshold; });
return true;
} else {
return cv.wait_for(lock, timeout, [&flag] { return flag; });
return incrementCondition.wait_for(lock, timeout, [&] { return value.load(std::memory_order_relaxed) >= threshold; });
}
}
}

View File

@ -14,33 +14,43 @@ namespace skyline::soc::host1x {
*/
class Syncpoint {
private:
std::atomic<u32> value{}; //!< An atomically-incrementing counter at the core of a syncpoint
std::mutex mutex; //!< Synchronizes insertions and deletions of waiters alongside locking the increment condition
std::condition_variable incrementCondition; //!< Signalled on every increment to the syncpoint
struct Waiter {
u32 threshold; //!< The syncpoint value to wait on to be reached
std::function<void()> callback; //!< The callback to do after the wait has ended
};
std::mutex waiterLock; //!< Synchronizes insertions and deletions of waiters
std::map<u64, Waiter> waiterMap;
u64 nextWaiterId{1};
Waiter(u32 threshold, std::function<void()> callback) : threshold(threshold), callback(std::move(callback)) {}
};
std::list<Waiter> waiters; //!< A linked list of all waiters, it's sorted in ascending order by threshold
public:
std::atomic<u32> value{};
/**
* @return The value of the syncpoint, retrieved in an atomically safe manner
*/
constexpr u32 Load() {
return value.load(std::memory_order_acquire);
}
using WaiterHandle = decltype(waiters)::iterator; //!< Aliasing an iterator to a Waiter as an opaque handle
/**
* @brief Registers a new waiter with a callback that will be called when the syncpoint reaches the target threshold
* @note The callback will be called immediately if the syncpoint has already reached the given threshold
* @return A persistent identifier that can be used to refer to the waiter, or 0 if the threshold has already been reached
* @return A handle that can be used to deregister the waiter, its boolean operator will evaluate to false if the threshold has already been reached
*/
u64 RegisterWaiter(u32 threshold, const std::function<void()> &callback);
WaiterHandle RegisterWaiter(u32 threshold, const std::function<void()> &callback);
/**
* @brief Removes a waiter given by 'id' from the pending waiter map
* @note If the supplied handle is invalid then the function will do nothing
*/
void DeregisterWaiter(u64 id);
void DeregisterWaiter(WaiterHandle waiter);
/**
* @brief Increments the syncpoint by 1
* @return The new value of the syncpoint
* @return The new value of the syncpoint after the increment
*/
u32 Increment();