Vulkan: Protect against uniform var ringbuffer overflow (#1378)

This commit is contained in:
goeiecool9999 2024-10-14 14:03:36 +02:00 committed by GitHub
parent 6dc73f5d79
commit 3acd0c4f2c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 75 additions and 36 deletions

View File

@ -1892,6 +1892,7 @@ void VulkanRenderer::ProcessFinishedCommandBuffers()
if (fenceStatus == VK_SUCCESS) if (fenceStatus == VK_SUCCESS)
{ {
ProcessDestructionQueue(); ProcessDestructionQueue();
m_uniformVarBufferReadIndex = m_cmdBufferUniformRingbufIndices[m_commandBufferSyncIndex];
m_commandBufferSyncIndex = (m_commandBufferSyncIndex + 1) % m_commandBuffers.size(); m_commandBufferSyncIndex = (m_commandBufferSyncIndex + 1) % m_commandBuffers.size();
memoryManager->cleanupBuffers(m_countCommandBufferFinished); memoryManager->cleanupBuffers(m_countCommandBufferFinished);
m_countCommandBufferFinished++; m_countCommandBufferFinished++;
@ -1985,6 +1986,7 @@ void VulkanRenderer::SubmitCommandBuffer(VkSemaphore signalSemaphore, VkSemaphor
cemuLog_logDebug(LogType::Force, "Vulkan: Waiting for available command buffer..."); cemuLog_logDebug(LogType::Force, "Vulkan: Waiting for available command buffer...");
WaitForNextFinishedCommandBuffer(); WaitForNextFinishedCommandBuffer();
} }
m_cmdBufferUniformRingbufIndices[nextCmdBufferIndex] = m_cmdBufferUniformRingbufIndices[m_commandBufferIndex];
m_commandBufferIndex = nextCmdBufferIndex; m_commandBufferIndex = nextCmdBufferIndex;
@ -3562,13 +3564,13 @@ void VulkanRenderer::buffer_bindUniformBuffer(LatteConst::ShaderType shaderType,
switch (shaderType) switch (shaderType)
{ {
case LatteConst::ShaderType::Vertex: case LatteConst::ShaderType::Vertex:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX].unformBufferOffset[bufferIndex] = offset; dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX].uniformBufferOffset[bufferIndex] = offset;
break; break;
case LatteConst::ShaderType::Geometry: case LatteConst::ShaderType::Geometry:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY].unformBufferOffset[bufferIndex] = offset; dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY].uniformBufferOffset[bufferIndex] = offset;
break; break;
case LatteConst::ShaderType::Pixel: case LatteConst::ShaderType::Pixel:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT].unformBufferOffset[bufferIndex] = offset; dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT].uniformBufferOffset[bufferIndex] = offset;
break; break;
default: default:
cemu_assert_debug(false); cemu_assert_debug(false);

View File

@ -591,6 +591,7 @@ private:
bool m_uniformVarBufferMemoryIsCoherent{false}; bool m_uniformVarBufferMemoryIsCoherent{false};
uint8* m_uniformVarBufferPtr = nullptr; uint8* m_uniformVarBufferPtr = nullptr;
uint32 m_uniformVarBufferWriteIndex = 0; uint32 m_uniformVarBufferWriteIndex = 0;
uint32 m_uniformVarBufferReadIndex = 0;
// transform feedback ringbuffer // transform feedback ringbuffer
VkBuffer m_xfbRingBuffer = VK_NULL_HANDLE; VkBuffer m_xfbRingBuffer = VK_NULL_HANDLE;
@ -637,6 +638,7 @@ private:
size_t m_commandBufferIndex = 0; // current buffer being filled size_t m_commandBufferIndex = 0; // current buffer being filled
size_t m_commandBufferSyncIndex = 0; // latest buffer that finished execution (updated on submit) size_t m_commandBufferSyncIndex = 0; // latest buffer that finished execution (updated on submit)
size_t m_commandBufferIDOfPrevFrame = 0; size_t m_commandBufferIDOfPrevFrame = 0;
std::array<size_t, kCommandBufferPoolSize> m_cmdBufferUniformRingbufIndices {}; // index in the uniform ringbuffer
std::array<VkFence, kCommandBufferPoolSize> m_cmd_buffer_fences; std::array<VkFence, kCommandBufferPoolSize> m_cmd_buffer_fences;
std::array<VkCommandBuffer, kCommandBufferPoolSize> m_commandBuffers; std::array<VkCommandBuffer, kCommandBufferPoolSize> m_commandBuffers;
std::array<VkSemaphore, kCommandBufferPoolSize> m_commandBufferSemaphores; std::array<VkSemaphore, kCommandBufferPoolSize> m_commandBufferSemaphores;
@ -659,7 +661,7 @@ private:
uint32 uniformVarBufferOffset[VulkanRendererConst::SHADER_STAGE_INDEX_COUNT]; uint32 uniformVarBufferOffset[VulkanRendererConst::SHADER_STAGE_INDEX_COUNT];
struct struct
{ {
uint32 unformBufferOffset[LATTE_NUM_MAX_UNIFORM_BUFFERS]; uint32 uniformBufferOffset[LATTE_NUM_MAX_UNIFORM_BUFFERS];
}shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_COUNT]; }shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_COUNT];
}dynamicOffsetInfo{}; }dynamicOffsetInfo{};

View File

@ -375,24 +375,20 @@ float s_vkUniformData[512 * 4];
void VulkanRenderer::uniformData_updateUniformVars(uint32 shaderStageIndex, LatteDecompilerShader* shader) void VulkanRenderer::uniformData_updateUniformVars(uint32 shaderStageIndex, LatteDecompilerShader* shader)
{ {
auto GET_UNIFORM_DATA_PTR = [&](size_t index) { return s_vkUniformData + (index / 4); }; auto GET_UNIFORM_DATA_PTR = [](size_t index) { return s_vkUniformData + (index / 4); };
sint32 shaderAluConst; sint32 shaderAluConst;
sint32 shaderUniformRegisterOffset;
switch (shader->shaderType) switch (shader->shaderType)
{ {
case LatteConst::ShaderType::Vertex: case LatteConst::ShaderType::Vertex:
shaderAluConst = 0x400; shaderAluConst = 0x400;
shaderUniformRegisterOffset = mmSQ_VTX_UNIFORM_BLOCK_START;
break; break;
case LatteConst::ShaderType::Pixel: case LatteConst::ShaderType::Pixel:
shaderAluConst = 0; shaderAluConst = 0;
shaderUniformRegisterOffset = mmSQ_PS_UNIFORM_BLOCK_START;
break; break;
case LatteConst::ShaderType::Geometry: case LatteConst::ShaderType::Geometry:
shaderAluConst = 0; // geometry shader has no ALU const shaderAluConst = 0; // geometry shader has no ALU const
shaderUniformRegisterOffset = mmSQ_GS_UNIFORM_BLOCK_START;
break; break;
default: default:
UNREACHABLE; UNREACHABLE;
@ -445,7 +441,7 @@ void VulkanRenderer::uniformData_updateUniformVars(uint32 shaderStageIndex, Latt
} }
if (shader->uniform.loc_verticesPerInstance >= 0) if (shader->uniform.loc_verticesPerInstance >= 0)
{ {
*(int*)(s_vkUniformData + ((size_t)shader->uniform.loc_verticesPerInstance / 4)) = m_streamoutState.verticesPerInstance; *(int*)GET_UNIFORM_DATA_PTR(shader->uniform.loc_verticesPerInstance) = m_streamoutState.verticesPerInstance;
for (sint32 b = 0; b < LATTE_NUM_STREAMOUT_BUFFER; b++) for (sint32 b = 0; b < LATTE_NUM_STREAMOUT_BUFFER; b++)
{ {
if (shader->uniform.loc_streamoutBufferBase[b] >= 0) if (shader->uniform.loc_streamoutBufferBase[b] >= 0)
@ -455,26 +451,63 @@ void VulkanRenderer::uniformData_updateUniformVars(uint32 shaderStageIndex, Latt
} }
} }
// upload // upload
if ((m_uniformVarBufferWriteIndex + shader->uniform.uniformRangeSize + 1024) > UNIFORMVAR_RINGBUFFER_SIZE) const uint32 bufferAlignmentM1 = std::max(m_featureControl.limits.minUniformBufferOffsetAlignment, m_featureControl.limits.nonCoherentAtomSize) - 1;
const uint32 uniformSize = (shader->uniform.uniformRangeSize + bufferAlignmentM1) & ~bufferAlignmentM1;
auto waitWhileCondition = [&](std::function<bool()> condition) {
while (condition())
{
if (m_commandBufferSyncIndex == m_commandBufferIndex)
{
if (m_cmdBufferUniformRingbufIndices[m_commandBufferIndex] != m_uniformVarBufferReadIndex)
{
draw_endRenderPass();
SubmitCommandBuffer();
}
else
{
// submitting work would not change readIndex, so there's no way for conditions based on it to change
cemuLog_log(LogType::Force, "draw call overflowed and corrupted uniform ringbuffer. expect visual corruption");
cemu_assert_suspicious();
break;
}
}
WaitForNextFinishedCommandBuffer();
}
};
// wrap around if it doesnt fit consecutively
if (m_uniformVarBufferWriteIndex + uniformSize > UNIFORMVAR_RINGBUFFER_SIZE)
{ {
waitWhileCondition([&]() {
return m_uniformVarBufferReadIndex > m_uniformVarBufferWriteIndex || m_uniformVarBufferReadIndex == 0;
});
m_uniformVarBufferWriteIndex = 0; m_uniformVarBufferWriteIndex = 0;
} }
uint32 bufferAlignmentM1 = std::max(m_featureControl.limits.minUniformBufferOffsetAlignment, m_featureControl.limits.nonCoherentAtomSize) - 1;
auto ringBufRemaining = [&]() {
ssize_t ringBufferUsedBytes = (ssize_t)m_uniformVarBufferWriteIndex - m_uniformVarBufferReadIndex;
if (ringBufferUsedBytes < 0)
ringBufferUsedBytes += UNIFORMVAR_RINGBUFFER_SIZE;
return UNIFORMVAR_RINGBUFFER_SIZE - 1 - ringBufferUsedBytes;
};
waitWhileCondition([&]() {
return ringBufRemaining() < uniformSize;
});
const uint32 uniformOffset = m_uniformVarBufferWriteIndex; const uint32 uniformOffset = m_uniformVarBufferWriteIndex;
memcpy(m_uniformVarBufferPtr + uniformOffset, s_vkUniformData, shader->uniform.uniformRangeSize); memcpy(m_uniformVarBufferPtr + uniformOffset, s_vkUniformData, shader->uniform.uniformRangeSize);
m_uniformVarBufferWriteIndex += shader->uniform.uniformRangeSize; m_uniformVarBufferWriteIndex += uniformSize;
m_uniformVarBufferWriteIndex = (m_uniformVarBufferWriteIndex + bufferAlignmentM1) & ~bufferAlignmentM1;
// update dynamic offset // update dynamic offset
dynamicOffsetInfo.uniformVarBufferOffset[shaderStageIndex] = uniformOffset; dynamicOffsetInfo.uniformVarBufferOffset[shaderStageIndex] = uniformOffset;
// flush if not coherent // flush if not coherent
if (!m_uniformVarBufferMemoryIsCoherent) if (!m_uniformVarBufferMemoryIsCoherent)
{ {
uint32 nonCoherentAtomSizeM1 = m_featureControl.limits.nonCoherentAtomSize - 1;
VkMappedMemoryRange flushedRange{}; VkMappedMemoryRange flushedRange{};
flushedRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; flushedRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
flushedRange.memory = m_uniformVarBufferMemory; flushedRange.memory = m_uniformVarBufferMemory;
flushedRange.offset = uniformOffset; flushedRange.offset = uniformOffset;
flushedRange.size = (shader->uniform.uniformRangeSize + nonCoherentAtomSizeM1) & ~nonCoherentAtomSizeM1; flushedRange.size = uniformSize;
vkFlushMappedMemoryRanges(m_logicalDevice, 1, &flushedRange); vkFlushMappedMemoryRanges(m_logicalDevice, 1, &flushedRange);
} }
} }
@ -494,7 +527,7 @@ void VulkanRenderer::draw_prepareDynamicOffsetsForDescriptorSet(uint32 shaderSta
{ {
for (auto& itr : pipeline_info->dynamicOffsetInfo.list_uniformBuffers[shaderStageIndex]) for (auto& itr : pipeline_info->dynamicOffsetInfo.list_uniformBuffers[shaderStageIndex])
{ {
dynamicOffsets[numDynOffsets] = dynamicOffsetInfo.shaderUB[shaderStageIndex].unformBufferOffset[itr]; dynamicOffsets[numDynOffsets] = dynamicOffsetInfo.shaderUB[shaderStageIndex].uniformBufferOffset[itr];
numDynOffsets++; numDynOffsets++;
} }
} }
@ -1357,6 +1390,24 @@ void VulkanRenderer::draw_execute(uint32 baseVertex, uint32 baseInstance, uint32
return; return;
} }
// prepare streamout
m_streamoutState.verticesPerInstance = count;
LatteStreamout_PrepareDrawcall(count, instanceCount);
// update uniform vars
LatteDecompilerShader* vertexShader = LatteSHRC_GetActiveVertexShader();
LatteDecompilerShader* pixelShader = LatteSHRC_GetActivePixelShader();
LatteDecompilerShader* geometryShader = LatteSHRC_GetActiveGeometryShader();
if (vertexShader)
uniformData_updateUniformVars(VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX, vertexShader);
if (pixelShader)
uniformData_updateUniformVars(VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT, pixelShader);
if (geometryShader)
uniformData_updateUniformVars(VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY, geometryShader);
// store where the read pointer should go after command buffer execution
m_cmdBufferUniformRingbufIndices[m_commandBufferIndex] = m_uniformVarBufferWriteIndex;
// process index data // process index data
const LattePrimitiveMode primitiveMode = static_cast<LattePrimitiveMode>(LatteGPUState.contextRegister[mmVGT_PRIMITIVE_TYPE]); const LattePrimitiveMode primitiveMode = static_cast<LattePrimitiveMode>(LatteGPUState.contextRegister[mmVGT_PRIMITIVE_TYPE]);
@ -1410,22 +1461,6 @@ void VulkanRenderer::draw_execute(uint32 baseVertex, uint32 baseInstance, uint32
LatteBufferCache_Sync(indexMin + baseVertex, indexMax + baseVertex, baseInstance, instanceCount); LatteBufferCache_Sync(indexMin + baseVertex, indexMax + baseVertex, baseInstance, instanceCount);
} }
// prepare streamout
m_streamoutState.verticesPerInstance = count;
LatteStreamout_PrepareDrawcall(count, instanceCount);
// update uniform vars
LatteDecompilerShader* vertexShader = LatteSHRC_GetActiveVertexShader();
LatteDecompilerShader* pixelShader = LatteSHRC_GetActivePixelShader();
LatteDecompilerShader* geometryShader = LatteSHRC_GetActiveGeometryShader();
if (vertexShader)
uniformData_updateUniformVars(VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX, vertexShader);
if (pixelShader)
uniformData_updateUniformVars(VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT, pixelShader);
if (geometryShader)
uniformData_updateUniformVars(VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY, geometryShader);
PipelineInfo* pipeline_info; PipelineInfo* pipeline_info;
if (!isFirst) if (!isFirst)
@ -1613,13 +1648,13 @@ void VulkanRenderer::draw_updateUniformBuffersDirectAccess(LatteDecompilerShader
switch (shaderType) switch (shaderType)
{ {
case LatteConst::ShaderType::Vertex: case LatteConst::ShaderType::Vertex:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX].unformBufferOffset[bufferIndex] = physicalAddr - m_importedMemBaseAddress; dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX].uniformBufferOffset[bufferIndex] = physicalAddr - m_importedMemBaseAddress;
break; break;
case LatteConst::ShaderType::Geometry: case LatteConst::ShaderType::Geometry:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY].unformBufferOffset[bufferIndex] = physicalAddr - m_importedMemBaseAddress; dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY].uniformBufferOffset[bufferIndex] = physicalAddr - m_importedMemBaseAddress;
break; break;
case LatteConst::ShaderType::Pixel: case LatteConst::ShaderType::Pixel:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT].unformBufferOffset[bufferIndex] = physicalAddr - m_importedMemBaseAddress; dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT].uniformBufferOffset[bufferIndex] = physicalAddr - m_importedMemBaseAddress;
break; break;
default: default:
UNREACHABLE; UNREACHABLE;