diff --git a/app/src/main/cpp/skyline/gpu/memory_manager.cpp b/app/src/main/cpp/skyline/gpu/memory_manager.cpp index 018f465c..146683f9 100644 --- a/app/src/main/cpp/skyline/gpu/memory_manager.cpp +++ b/app/src/main/cpp/skyline/gpu/memory_manager.cpp @@ -15,7 +15,7 @@ namespace skyline::gpu::memory { StagingBuffer::~StagingBuffer() { if (vmaAllocator && vmaAllocation && vkBuffer) - vmaDestroyBuffer(vmaAllocator, vkBuffer, vmaAllocation); + vmaDestroyBuffer(vmaAllocator, vkBuffer, vmaAllocation); } Image::~Image() { diff --git a/app/src/main/cpp/skyline/gpu/presentation_engine.cpp b/app/src/main/cpp/skyline/gpu/presentation_engine.cpp index ff7ddfa8..824034b0 100644 --- a/app/src/main/cpp/skyline/gpu/presentation_engine.cpp +++ b/app/src/main/cpp/skyline/gpu/presentation_engine.cpp @@ -34,7 +34,7 @@ namespace skyline::gpu { /** * @url https://developer.android.com/ndk/reference/group/choreographer#achoreographer_framecallback */ - void ChoreographerCallback(long frameTimeNanos, kernel::type::KEvent* vsyncEvent) { + void ChoreographerCallback(long frameTimeNanos, kernel::type::KEvent *vsyncEvent) { vsyncEvent->Signal(); AChoreographer_postFrameCallback(AChoreographer_getInstance(), reinterpret_cast(&ChoreographerCallback), vsyncEvent); } @@ -42,7 +42,7 @@ namespace skyline::gpu { void PresentationEngine::ChoreographerThread() { choreographerLooper = ALooper_prepare(0); AChoreographer_postFrameCallback(AChoreographer_getInstance(), reinterpret_cast(&ChoreographerCallback), vsyncEvent.get()); - ALooper_pollAll(-1, nullptr, nullptr, nullptr); + ALooper_pollAll(-1, nullptr, nullptr, nullptr); // Will block and process callbacks till ALooper_wake() is called } service::hosbinder::NativeWindowTransform GetAndroidTransform(vk::SurfaceTransformFlagBitsKHR transform) { @@ -70,8 +70,8 @@ namespace skyline::gpu { void PresentationEngine::UpdateSwapchain(texture::Format format, texture::Dimensions extent) { auto minImageCount{std::max(vkSurfaceCapabilities.minImageCount, state.settings->forceTripleBuffering ? 3U : 0U)}; - if (minImageCount > MaxSlotCount) - throw exception("Requesting swapchain with higher image count ({}) than maximum slot count ({})", minImageCount, MaxSlotCount); + if (minImageCount > MaxSwapchainImageCount) + throw exception("Requesting swapchain with higher image count ({}) than maximum slot count ({})", minImageCount, MaxSwapchainImageCount); const auto &capabilities{vkSurfaceCapabilities}; if (minImageCount < capabilities.minImageCount || (capabilities.maxImageCount && minImageCount > capabilities.maxImageCount)) @@ -104,16 +104,17 @@ namespace skyline::gpu { }); auto vkImages{vkSwapchain->getImages()}; - if (vkImages.size() > MaxSlotCount) - throw exception("Swapchain has higher image count ({}) than maximum slot count ({})", minImageCount, MaxSlotCount); + if (vkImages.size() > MaxSwapchainImageCount) + throw exception("Swapchain has higher image count ({}) than maximum slot count ({})", minImageCount, MaxSwapchainImageCount); for (size_t index{}; index < vkImages.size(); index++) { - auto &slot{slots[index]}; + auto &slot{images[index]}; slot = std::make_shared(*state.gpu, vkImages[index], extent, format::GetFormat(format), vk::ImageLayout::eUndefined, vk::ImageTiling::eOptimal); slot->TransitionLayout(vk::ImageLayout::ePresentSrcKHR); } - for (size_t index{vkImages.size()}; index < MaxSlotCount; index++) - slots[index] = {}; + for (size_t index{vkImages.size()}; index < MaxSwapchainImageCount; index++) + // We need to clear all the slots which aren't filled, keeping around stale slots could lead to issues + images[index] = {}; swapchainFormat = format; swapchainExtent = extent; @@ -157,14 +158,15 @@ namespace skyline::gpu { UpdateSwapchain(texture->format, texture->dimensions); std::pair nextImage; - while ((nextImage = vkSwapchain->acquireNextImage(std::numeric_limits::max(), {}, *acquireFence)).first != vk::Result::eSuccess) [[unlikely]] + while (nextImage = vkSwapchain->acquireNextImage(std::numeric_limits::max(), {}, *acquireFence), nextImage.first != vk::Result::eSuccess) [[unlikely]] { if (nextImage.first == vk::Result::eSuboptimalKHR) surfaceCondition.wait(lock, [this]() { return vkSurface.has_value(); }); else throw exception("vkAcquireNextImageKHR returned an unhandled result '{}'", vk::to_string(nextImage.first)); - while (gpu.vkDevice.waitForFences(*acquireFence, true, std::numeric_limits::max()) == vk::Result::eTimeout); + } - slots.at(nextImage.second)->CopyFrom(texture); + static_cast(gpu.vkDevice.waitForFences(*acquireFence, true, std::numeric_limits::max())); + images.at(nextImage.second)->CopyFrom(texture); { std::lock_guard queueLock(gpu.queueMutex); diff --git a/app/src/main/cpp/skyline/gpu/presentation_engine.h b/app/src/main/cpp/skyline/gpu/presentation_engine.h index b0696537..95b9e95b 100644 --- a/app/src/main/cpp/skyline/gpu/presentation_engine.h +++ b/app/src/main/cpp/skyline/gpu/presentation_engine.h @@ -32,14 +32,14 @@ namespace skyline::gpu { texture::Format swapchainFormat{}; //!< The image format of the textures in the current swapchain texture::Dimensions swapchainExtent{}; //!< The extent of images in the current swapchain - static constexpr size_t MaxSlotCount{6}; //!< The maximum amount of queue slots, this affects the amount of images that can be in the swapchain - std::array, MaxSlotCount> slots; //!< The backing for storing all slots and sorted in the same order as supplied by the Vulkan swapchain + static constexpr size_t MaxSwapchainImageCount{6}; //!< The maximum amount of swapchain textures, this affects the amount of images that can be in the swapchain + std::array, MaxSwapchainImageCount> images; //!< All the swapchain textures in the same order as supplied by the host swapchain u64 frameTimestamp{}; //!< The timestamp of the last frame being shown perfetto::Track presentationTrack; //!< Perfetto track used for presentation events std::thread choreographerThread; //!< A thread for signalling the V-Sync event using AChoreographer - ALooper* choreographerLooper{}; //!< The looper object associated with the Choreographer thread + ALooper *choreographerLooper{}; //!< The looper object associated with the Choreographer thread /** * @brief The entry point for the the Choreographer thread, the function runs ALooper on the thread diff --git a/app/src/main/cpp/skyline/gpu/texture/format.h b/app/src/main/cpp/skyline/gpu/texture/format.h index cbbf3321..c0c3a872 100644 --- a/app/src/main/cpp/skyline/gpu/texture/format.h +++ b/app/src/main/cpp/skyline/gpu/texture/format.h @@ -12,7 +12,7 @@ namespace skyline::gpu::format { constexpr Format RGB565Unorm{sizeof(u8) * 2, 1, 1, vk::Format::eR5G6B5UnormPack16}; //!< Red channel: 5-bit, Green channel: 6-bit, Blue channel: 5-bit /** - * @brief Converts a format from Vulkan to a Skyline format + * @brief Converts a Vulkan format to a Skyline format */ constexpr const Format &GetFormat(vk::Format format) { switch (format) { diff --git a/app/src/main/cpp/skyline/gpu/texture/texture.cpp b/app/src/main/cpp/skyline/gpu/texture/texture.cpp index 700330f5..c9685bfe 100644 --- a/app/src/main/cpp/skyline/gpu/texture/texture.cpp +++ b/app/src/main/cpp/skyline/gpu/texture/texture.cpp @@ -140,12 +140,14 @@ namespace skyline::gpu { u8 *bufferData; auto stagingBuffer{[&]() -> std::shared_ptr { if (tiling == vk::ImageTiling::eOptimal || !std::holds_alternative(backing)) { + // We need a staging buffer for all optimal copies (Since we aren't aware of the host optimal layout) and linear textures which we cannot map on the CPU since we do not have access to their backing VkDeviceMemory auto stagingBuffer{gpu.memory.AllocateStagingBuffer(size)}; bufferData = stagingBuffer->data(); return stagingBuffer; } else if (tiling == vk::ImageTiling::eLinear) { + // We can optimize linear texture sync on a UMA by mapping the texture onto the CPU and copying directly into it rather than a staging buffer bufferData = std::get(backing).data(); - WaitOnFence(); + WaitOnFence(); // We need to wait on fence here since we are mutating the texture directly after, the wait can be deferred till the copy when a staging buffer is used return nullptr; } else { throw exception("Guest -> Host synchronization of images tiled as '{}' isn't implemented", vk::to_string(tiling)); diff --git a/app/src/main/cpp/skyline/gpu/texture/texture.h b/app/src/main/cpp/skyline/gpu/texture/texture.h index 4a4bc686..296f6827 100644 --- a/app/src/main/cpp/skyline/gpu/texture/texture.h +++ b/app/src/main/cpp/skyline/gpu/texture/texture.h @@ -188,7 +188,7 @@ namespace skyline::gpu { texture::TileMode tileMode; texture::TileConfig tileConfig; - GuestTexture(const DeviceState &state, u8 *pointer, texture::Dimensions dimensions, const texture::Format& format, texture::TileMode tileMode = texture::TileMode::Linear, texture::TileConfig tileConfig = {}); + GuestTexture(const DeviceState &state, u8 *pointer, texture::Dimensions dimensions, const texture::Format &format, texture::TileMode tileMode = texture::TileMode::Linear, texture::TileConfig tileConfig = {}); constexpr size_t Size() { return format.GetSize(dimensions); @@ -206,18 +206,18 @@ namespace skyline::gpu { * @note There can only be one host texture for a corresponding guest texture * @note If any of the supplied parameters do not match up with the backing then it's undefined behavior */ - std::shared_ptr InitializeTexture(vk::Image backing, texture::Dimensions dimensions = {}, const texture::Format& format = {}, std::optional tiling = std::nullopt, vk::ImageLayout layout = vk::ImageLayout::eUndefined, texture::Swizzle swizzle = {}); + std::shared_ptr InitializeTexture(vk::Image backing, texture::Dimensions dimensions = {}, const texture::Format &format = {}, std::optional tiling = std::nullopt, vk::ImageLayout layout = vk::ImageLayout::eUndefined, texture::Swizzle swizzle = {}); /** * @note As a RAII object is used here, the lifetime of the backing is handled by the host texture */ - std::shared_ptr InitializeTexture(vk::raii::Image &&backing, std::optional tiling = std::nullopt, vk::ImageLayout layout = vk::ImageLayout::eUndefined, const texture::Format& format = {}, texture::Dimensions dimensions = {}, texture::Swizzle swizzle = {}); + std::shared_ptr InitializeTexture(vk::raii::Image &&backing, std::optional tiling = std::nullopt, vk::ImageLayout layout = vk::ImageLayout::eUndefined, const texture::Format &format = {}, texture::Dimensions dimensions = {}, texture::Swizzle swizzle = {}); /** * @brief Similar to InitializeTexture but creation of the backing and allocation of memory for the backing is automatically performed by the function * @param usage Usage flags that will applied aside from VK_IMAGE_USAGE_TRANSFER_SRC_BIT/VK_IMAGE_USAGE_TRANSFER_DST_BIT which are mandatory */ - std::shared_ptr CreateTexture(vk::ImageUsageFlags usage = {}, std::optional tiling = std::nullopt, vk::ImageLayout initialLayout = vk::ImageLayout::eGeneral, const texture::Format& format = {}, texture::Dimensions dimensions = {}, texture::Swizzle swizzle = {}); + std::shared_ptr CreateTexture(vk::ImageUsageFlags usage = {}, std::optional tiling = std::nullopt, vk::ImageLayout initialLayout = vk::ImageLayout::eGeneral, const texture::Format &format = {}, texture::Dimensions dimensions = {}, texture::Swizzle swizzle = {}); }; /** @@ -252,15 +252,15 @@ namespace skyline::gpu { vk::ImageTiling tiling; vk::ComponentMapping mapping; - Texture(GPU &gpu, BackingType &&backing, std::shared_ptr guest, texture::Dimensions dimensions, const texture::Format& format, vk::ImageLayout layout, vk::ImageTiling tiling, vk::ComponentMapping mapping); + Texture(GPU &gpu, BackingType &&backing, std::shared_ptr guest, texture::Dimensions dimensions, const texture::Format &format, vk::ImageLayout layout, vk::ImageTiling tiling, vk::ComponentMapping mapping); - Texture(GPU &gpu, BackingType &&backing, texture::Dimensions dimensions, const texture::Format& format, vk::ImageLayout layout, vk::ImageTiling tiling, vk::ComponentMapping mapping = {}); + Texture(GPU &gpu, BackingType &&backing, texture::Dimensions dimensions, const texture::Format &format, vk::ImageLayout layout, vk::ImageTiling tiling, vk::ComponentMapping mapping = {}); /** * @brief Creates and allocates memory for the backing to creates a texture object wrapping it * @param usage Usage flags that will applied aside from VK_IMAGE_USAGE_TRANSFER_SRC_BIT/VK_IMAGE_USAGE_TRANSFER_DST_BIT which are mandatory */ - Texture(GPU &gpu, texture::Dimensions dimensions, const texture::Format& format, vk::ImageLayout initialLayout = vk::ImageLayout::eGeneral, vk::ImageUsageFlags usage = {}, vk::ImageTiling tiling = vk::ImageTiling::eOptimal, vk::ComponentMapping mapping = {}); + Texture(GPU &gpu, texture::Dimensions dimensions, const texture::Format &format, vk::ImageLayout initialLayout = vk::ImageLayout::eGeneral, vk::ImageUsageFlags usage = {}, vk::ImageTiling tiling = vk::ImageTiling::eOptimal, vk::ComponentMapping mapping = {}); /** * @brief Acquires an exclusive lock on the texture for the calling thread diff --git a/app/src/main/cpp/skyline/services/hosbinder/GraphicBufferProducer.cpp b/app/src/main/cpp/skyline/services/hosbinder/GraphicBufferProducer.cpp index f84c9a7e..d8ffd5a0 100644 --- a/app/src/main/cpp/skyline/services/hosbinder/GraphicBufferProducer.cpp +++ b/app/src/main/cpp/skyline/services/hosbinder/GraphicBufferProducer.cpp @@ -50,28 +50,32 @@ namespace skyline::service::hosbinder { std::lock_guard guard(mutex); auto buffer{queue.end()}; - while (true) { - size_t dequeuedSlotCount{}; - for (auto it{queue.begin()}; it != queue.end(); it++) { - // We want to select the oldest slot that's free to use as we'd want all slots to be used - // If we go linearly then we have a higher preference for selecting the former slots and being out of order - if (it->state == BufferState::Free && it->texture) { - if (buffer == queue.end() || it->frameNumber < buffer->frameNumber) - buffer = it; - else if (it->state == BufferState::Dequeued) - dequeuedSlotCount++; - } + size_t dequeuedSlotCount{}; + for (auto it{queue.begin()}; it != queue.end(); it++) { + // We want to select the oldest slot that's free to use as we'd want all slots to be used + // If we go linearly then we have a higher preference for selecting the former slots and being out of order + if (it->state == BufferState::Free && it->texture) { + if (buffer == queue.end() || it->frameNumber < buffer->frameNumber) + buffer = it; + } else if (it->state == BufferState::Dequeued) { + dequeuedSlotCount++; } + } - if (buffer != queue.end()) { - slot = std::distance(queue.begin(), buffer); - break; - } else if (async) { - return AndroidStatus::WouldBlock; - } else if (dequeuedSlotCount == queue.size()) { - state.logger->Warn("Client attempting to dequeue more buffers when all buffers are dequeued by the client: {}", dequeuedSlotCount); - return AndroidStatus::InvalidOperation; - } + if (buffer != queue.end()) { + slot = std::distance(queue.begin(), buffer); + } else if (async) { + return AndroidStatus::WouldBlock; + } else if (dequeuedSlotCount == queue.size()) { + state.logger->Warn("Client attempting to dequeue more buffers when all buffers are dequeued by the client: {}", dequeuedSlotCount); + return AndroidStatus::InvalidOperation; + } else { + size_t index{}; + std::string bufferString; + for (auto& bufferSlot : queue) + bufferString += util::Format("\n#{} - State: {}, Has Graphic Buffer: {}, Frame Number: {}", ++index, ToString(bufferSlot.state), static_cast(bufferSlot.graphicBuffer), bufferSlot.frameNumber); + state.logger->Warn("Cannot find any free buffers to dequeue:{}", bufferString); + return AndroidStatus::InvalidOperation; } width = width ? width : defaultWidth; @@ -392,7 +396,7 @@ namespace skyline::service::hosbinder { buffer.frameNumber = 0; buffer.wasBufferRequested = false; buffer.graphicBuffer = std::make_unique(graphicBuffer); - buffer.texture = texture->CreateTexture({}, vk::ImageTiling::eLinear, vk::ImageLayout::eGeneral); + buffer.texture = texture->CreateTexture({}, vk::ImageTiling::eLinear); activeSlotCount = hasBufferCount = std::count_if(queue.begin(), queue.end(), [](const BufferSlot &slot) { return static_cast(slot.graphicBuffer); }); diff --git a/app/src/main/cpp/skyline/services/hosbinder/android_types.h b/app/src/main/cpp/skyline/services/hosbinder/android_types.h index 91f29efe..200758d4 100644 --- a/app/src/main/cpp/skyline/services/hosbinder/android_types.h +++ b/app/src/main/cpp/skyline/services/hosbinder/android_types.h @@ -55,6 +55,7 @@ namespace skyline::service::hosbinder { /** * @url https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/ui/Fence.cpp;l=34-36 + * @note Only initializing the first fence is intentional and matches Nvidia's AndroidFence implementation */ AndroidFence() : fenceCount(0), fences({InvalidFenceId}) {} diff --git a/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_ctrl.cpp b/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_ctrl.cpp index d7780417..ac9834c5 100644 --- a/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_ctrl.cpp +++ b/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_ctrl.cpp @@ -49,7 +49,8 @@ namespace skyline::service::nvdrv::device { void SyncpointEvent::Cancel(soc::host1x::Host1X &host1x) { std::lock_guard lock(mutex); - host1x.syncpoints.at(fence.id).DeregisterWaiter(waiterId); + host1x.syncpoints.at(fence.id).DeregisterWaiter(waiterHandle); + waiterHandle = {}; Signal(); event->ResetSignal(); } @@ -59,7 +60,7 @@ namespace skyline::service::nvdrv::device { fence = pFence; state = State::Waiting; - waiterId = host1x.syncpoints.at(fence.id).RegisterWaiter(fence.value, [this] { Signal(); }); + waiterHandle = host1x.syncpoints.at(fence.id).RegisterWaiter(fence.value, [this] { Signal(); }); } NvHostCtrl::NvHostCtrl(const DeviceState &state) : NvDevice(state) {} diff --git a/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_ctrl.h b/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_ctrl.h index 6ed9e624..07562979 100644 --- a/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_ctrl.h +++ b/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_ctrl.h @@ -17,7 +17,7 @@ namespace skyline { */ class SyncpointEvent { private: - u64 waiterId{}; + soc::host1x::Syncpoint::WaiterHandle waiterHandle{}; void Signal(); diff --git a/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_syncpoint.cpp b/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_syncpoint.cpp index ee95b4a8..fa019dca 100644 --- a/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_syncpoint.cpp +++ b/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_syncpoint.cpp @@ -71,7 +71,7 @@ namespace skyline::service::nvdrv { if (!syncpoints.at(id).reserved) throw exception("Cannot update an unreserved syncpoint!"); - syncpoints.at(id).counterMin = state.soc->host1x.syncpoints.at(id).value.load(); + syncpoints.at(id).counterMin = state.soc->host1x.syncpoints.at(id).Load(); return syncpoints.at(id).counterMin; } } diff --git a/app/src/main/cpp/skyline/soc/host1x/syncpoint.cpp b/app/src/main/cpp/skyline/soc/host1x/syncpoint.cpp index fb6ffab7..b6990925 100644 --- a/app/src/main/cpp/skyline/soc/host1x/syncpoint.cpp +++ b/app/src/main/cpp/skyline/soc/host1x/syncpoint.cpp @@ -5,62 +5,60 @@ #include "syncpoint.h" namespace skyline::soc::host1x { - u64 Syncpoint::RegisterWaiter(u32 threshold, const std::function &callback) { - if (value >= threshold) { + Syncpoint::WaiterHandle Syncpoint::RegisterWaiter(u32 threshold, const std::function &callback) { + if (value.load(std::memory_order_acquire) >= threshold) { + // (Fast path) We don't need to wait on the mutex and can just get away with atomics callback(); - return 0; + return {}; } - std::lock_guard guard(waiterLock); - waiterMap.emplace(nextWaiterId, Waiter{threshold, callback}); + std::scoped_lock lock(mutex); + if (value.load(std::memory_order_acquire) >= threshold) { + callback(); + return {}; + } - return nextWaiterId++; + auto it{waiters.begin()}; + while (it != waiters.end() && threshold >= it->threshold) + it++; + return waiters.emplace(it, threshold, callback); } - void Syncpoint::DeregisterWaiter(u64 id) { - std::lock_guard guard(waiterLock); - waiterMap.erase(id); + void Syncpoint::DeregisterWaiter(WaiterHandle waiter) { + std::scoped_lock lock(mutex); + // We want to ensure the iterator still exists prior to erasing it + // Otherwise, if an invalid iterator was passed in then it could lead to UB + // It is important to avoid UB in that case since the deregister isn't called from a locked context + for (auto it{waiters.begin()}; it != waiters.end(); it++) + if (it == waiter) + waiters.erase(it); } u32 Syncpoint::Increment() { - value++; + auto readValue{value.fetch_add(1, std::memory_order_acq_rel)}; // We don't want to constantly do redundant atomic loads - std::lock_guard guard(waiterLock); - std::erase_if(waiterMap, [this](const auto &entry) { - if (value >= entry.second.threshold) { - entry.second.callback(); - return true; - } else { - return false; - } - }); + std::lock_guard lock(mutex); + auto it{waiters.begin()}; + while (it != waiters.end() && readValue >= it->threshold) + it++->callback(); + waiters.erase(waiters.begin(), it); - return value; + incrementCondition.notify_all(); + + return readValue; } bool Syncpoint::Wait(u32 threshold, std::chrono::steady_clock::duration timeout) { - if (value >= threshold) - return true; + if (value.load(std::memory_order_acquire) >= threshold) + // (Fast Path) We don't need to wait on the mutex and can just get away with atomics + return {}; - std::mutex mtx; - std::condition_variable cv; - bool flag{}; - - if (!RegisterWaiter(threshold, [&cv, &mtx, &flag] { - std::unique_lock lock(mtx); - flag = true; - lock.unlock(); - cv.notify_all(); - })) { - return true; - } - - std::unique_lock lock(mtx); + std::unique_lock lock(mutex); if (timeout == std::chrono::steady_clock::duration::max()) { - cv.wait(lock, [&flag] { return flag; }); + incrementCondition.wait(lock, [&] { return value.load(std::memory_order_relaxed) >= threshold; }); return true; } else { - return cv.wait_for(lock, timeout, [&flag] { return flag; }); + return incrementCondition.wait_for(lock, timeout, [&] { return value.load(std::memory_order_relaxed) >= threshold; }); } } } diff --git a/app/src/main/cpp/skyline/soc/host1x/syncpoint.h b/app/src/main/cpp/skyline/soc/host1x/syncpoint.h index 1d7b5b81..3f48738f 100644 --- a/app/src/main/cpp/skyline/soc/host1x/syncpoint.h +++ b/app/src/main/cpp/skyline/soc/host1x/syncpoint.h @@ -14,33 +14,43 @@ namespace skyline::soc::host1x { */ class Syncpoint { private: + std::atomic value{}; //!< An atomically-incrementing counter at the core of a syncpoint + + std::mutex mutex; //!< Synchronizes insertions and deletions of waiters alongside locking the increment condition + std::condition_variable incrementCondition; //!< Signalled on every increment to the syncpoint + struct Waiter { u32 threshold; //!< The syncpoint value to wait on to be reached std::function callback; //!< The callback to do after the wait has ended - }; - std::mutex waiterLock; //!< Synchronizes insertions and deletions of waiters - std::map waiterMap; - u64 nextWaiterId{1}; + Waiter(u32 threshold, std::function callback) : threshold(threshold), callback(std::move(callback)) {} + }; + std::list waiters; //!< A linked list of all waiters, it's sorted in ascending order by threshold public: - std::atomic value{}; + /** + * @return The value of the syncpoint, retrieved in an atomically safe manner + */ + constexpr u32 Load() { + return value.load(std::memory_order_acquire); + } + + using WaiterHandle = decltype(waiters)::iterator; //!< Aliasing an iterator to a Waiter as an opaque handle /** * @brief Registers a new waiter with a callback that will be called when the syncpoint reaches the target threshold * @note The callback will be called immediately if the syncpoint has already reached the given threshold - * @return A persistent identifier that can be used to refer to the waiter, or 0 if the threshold has already been reached + * @return A handle that can be used to deregister the waiter, its boolean operator will evaluate to false if the threshold has already been reached */ - u64 RegisterWaiter(u32 threshold, const std::function &callback); + WaiterHandle RegisterWaiter(u32 threshold, const std::function &callback); /** - * @brief Removes a waiter given by 'id' from the pending waiter map + * @note If the supplied handle is invalid then the function will do nothing */ - void DeregisterWaiter(u64 id); + void DeregisterWaiter(WaiterHandle waiter); /** - * @brief Increments the syncpoint by 1 - * @return The new value of the syncpoint + * @return The new value of the syncpoint after the increment */ u32 Increment();