Split out megabuffer allocation from pushing data

The `Allocate` method allocates the given amount of space in a megabuffer chunk, returning a descriptor of the allocated region. This is useful for situations where you want to write directly to the megabuffer, avoiding the need for an intermediary buffer.
This commit is contained in:
lynxnb 2022-08-08 14:47:53 +01:00 committed by Billy Laws
parent cdc6a4628a
commit e6741642ba
2 changed files with 28 additions and 18 deletions

View File

@ -23,15 +23,15 @@ namespace skyline::gpu {
return backing.vkBuffer; return backing.vkBuffer;
} }
vk::DeviceSize MegaBufferChunk::Push(const std::shared_ptr<FenceCycle> &newCycle, span<u8> data, bool pageAlign) { std::pair<vk::DeviceSize, span<u8>> MegaBufferChunk::Allocate(const std::shared_ptr<FenceCycle> &newCycle, vk::DeviceSize size, bool pageAlign) {
if (pageAlign) { if (pageAlign) {
// If page aligned data was requested then align the free // If page aligned data was requested then align the free
auto alignedFreeBase{util::AlignUp(static_cast<size_t>(freeRegion.data() - backing.data()), PAGE_SIZE)}; auto alignedFreeBase{util::AlignUp(static_cast<size_t>(freeRegion.data() - backing.data()), PAGE_SIZE)};
freeRegion = backing.subspan(alignedFreeBase); freeRegion = backing.subspan(alignedFreeBase);
} }
if (data.size() > freeRegion.size()) if (size > freeRegion.size())
return 0; return {0, {}};
if (cycle != newCycle) { if (cycle != newCycle) {
newCycle->ChainCycle(cycle); newCycle->ChainCycle(cycle);
@ -39,12 +39,12 @@ namespace skyline::gpu {
} }
// Allocate space for data from the free region // Allocate space for data from the free region
auto resultSpan{freeRegion.subspan(0, data.size())}; auto resultSpan{freeRegion.subspan(0, size)};
resultSpan.copy_from(data);
// Move the free region along // Move the free region along
freeRegion = freeRegion.subspan(data.size()); freeRegion = freeRegion.subspan(size);
return static_cast<vk::DeviceSize>(resultSpan.data() - backing.data());
return {static_cast<vk::DeviceSize>(resultSpan.data() - backing.data()), resultSpan};
} }
MegaBufferAllocator::MegaBufferAllocator(GPU &gpu) : gpu{gpu}, activeChunk{chunks.emplace(chunks.end(), gpu)} {} MegaBufferAllocator::MegaBufferAllocator(GPU &gpu) : gpu{gpu}, activeChunk{chunks.emplace(chunks.end(), gpu)} {}
@ -61,17 +61,23 @@ namespace skyline::gpu {
return mutex.try_lock(); return mutex.try_lock();
} }
MegaBufferAllocator::Allocation MegaBufferAllocator::Push(const std::shared_ptr<FenceCycle> &cycle, span<u8> data, bool pageAlign) { MegaBufferAllocator::Allocation MegaBufferAllocator::Allocate(const std::shared_ptr<FenceCycle> &cycle, vk::DeviceSize size, bool pageAlign) {
if (vk::DeviceSize offset{activeChunk->Push(cycle, data, pageAlign)}; offset) if (auto allocation{activeChunk->Allocate(cycle, size, pageAlign)}; allocation.first)
return {activeChunk->GetBacking(), offset}; return {activeChunk->GetBacking(), allocation.first, allocation.second};
activeChunk = ranges::find_if(chunks, [&](auto &chunk) { return chunk.TryReset(); }); activeChunk = ranges::find_if(chunks, [&](auto &chunk) { return chunk.TryReset(); });
if (activeChunk == chunks.end()) // If there are no chunks available, allocate a new one if (activeChunk == chunks.end()) // If there are no chunks available, allocate a new one
activeChunk = chunks.emplace(chunks.end(), gpu); activeChunk = chunks.emplace(chunks.end(), gpu);
if (vk::DeviceSize offset{activeChunk->Push(cycle, data, pageAlign)}; offset) if (auto allocation{activeChunk->Allocate(cycle, size, pageAlign)}; allocation.first)
return {activeChunk->GetBacking(), offset}; return {activeChunk->GetBacking(), allocation.first, allocation.second};
else else
throw exception("Failed to to allocate megabuffer space for size: 0x{:X}", data.size()); throw exception("Failed to to allocate megabuffer space for size: 0x{:X}", size);
}
MegaBufferAllocator::Allocation MegaBufferAllocator::Push(const std::shared_ptr<FenceCycle> &cycle, span<u8> data, bool pageAlign) {
auto allocation{Allocate(cycle, data.size(), pageAlign)};
allocation.region.copy_from(data);
return allocation;
} }
} }

View File

@ -30,11 +30,7 @@ namespace skyline::gpu {
*/ */
vk::Buffer GetBacking() const; vk::Buffer GetBacking() const;
/** std::pair<vk::DeviceSize, span<u8>> Allocate(const std::shared_ptr<FenceCycle> &newCycle, vk::DeviceSize size, bool pageAlign = false);
* @brief Pushes data to the chunk and returns the offset at which it was written
* @param pageAlign Whether the pushed data should be page aligned in the chunk
*/
vk::DeviceSize Push(const std::shared_ptr<FenceCycle> &newCycle, span<u8> data, bool pageAlign = false);
}; };
/** /**
@ -55,6 +51,7 @@ namespace skyline::gpu {
struct Allocation { struct Allocation {
vk::Buffer buffer; //!< The megabuffer chunk backing hat the allocation was made within vk::Buffer buffer; //!< The megabuffer chunk backing hat the allocation was made within
vk::DeviceSize offset; //!< The offset of the allocation in the chunk vk::DeviceSize offset; //!< The offset of the allocation in the chunk
span<u8> region; //!< The CPU mapped region of the allocation in the chunk
operator bool() const { operator bool() const {
return offset != 0; return offset != 0;
@ -81,6 +78,13 @@ namespace skyline::gpu {
*/ */
bool try_lock(); bool try_lock();
/**
* @brief Allocates data in a megabuffer chunk and returns an structure describing the allocation
* @param pageAlign Whether the pushed data should be page aligned in the megabuffer
* @note The allocator *MUST* be locked before calling this function
*/
Allocation Allocate(const std::shared_ptr<FenceCycle> &cycle, vk::DeviceSize size, bool pageAlign = false);
/** /**
* @brief Pushes data to a megabuffer chunk and returns an structure describing the allocation * @brief Pushes data to a megabuffer chunk and returns an structure describing the allocation
* @param pageAlign Whether the pushed data should be page aligned in the megabuffer * @param pageAlign Whether the pushed data should be page aligned in the megabuffer