mirror of
https://github.com/cemu-project/Cemu.git
synced 2024-11-22 09:09:18 +01:00
Make controller button code thread-safe (#405)
* Refactor spinlock to meet Lockable requirements * Input: Refactor button code and make it thread-safe
This commit is contained in:
parent
c40466f3a8
commit
028b3f7992
@ -24,28 +24,28 @@ class DebugSymbolStorage
|
||||
public:
|
||||
static void StoreDataType(MPTR address, DEBUG_SYMBOL_TYPE type)
|
||||
{
|
||||
s_lock.acquire();
|
||||
s_lock.lock();
|
||||
s_typeStorage[address] = type;
|
||||
s_lock.release();
|
||||
s_lock.unlock();
|
||||
}
|
||||
|
||||
static DEBUG_SYMBOL_TYPE GetDataType(MPTR address)
|
||||
{
|
||||
s_lock.acquire();
|
||||
s_lock.lock();
|
||||
auto itr = s_typeStorage.find(address);
|
||||
if (itr == s_typeStorage.end())
|
||||
{
|
||||
s_lock.release();
|
||||
s_lock.unlock();
|
||||
return DEBUG_SYMBOL_TYPE::UNDEFINED;
|
||||
}
|
||||
DEBUG_SYMBOL_TYPE t = itr->second;
|
||||
s_lock.release();
|
||||
s_lock.unlock();
|
||||
return t;
|
||||
}
|
||||
|
||||
static void ClearRange(MPTR address, uint32 length)
|
||||
{
|
||||
s_lock.acquire();
|
||||
s_lock.lock();
|
||||
while (length > 0)
|
||||
{
|
||||
auto itr = s_typeStorage.find(address);
|
||||
@ -54,7 +54,7 @@ public:
|
||||
address += 4;
|
||||
length -= 4;
|
||||
}
|
||||
s_lock.release();
|
||||
s_lock.unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -129,7 +129,7 @@ FSpinlock sTimerSpinlock;
|
||||
// thread safe
|
||||
uint64 PPCTimer_getFromRDTSC()
|
||||
{
|
||||
sTimerSpinlock.acquire();
|
||||
sTimerSpinlock.lock();
|
||||
_mm_mfence();
|
||||
uint64 rdtscCurrentMeasure = __rdtsc();
|
||||
uint64 rdtscDif = rdtscCurrentMeasure - _rdtscLastMeasure;
|
||||
@ -165,6 +165,6 @@ uint64 PPCTimer_getFromRDTSC()
|
||||
|
||||
_tickSummary += elapsedTick;
|
||||
|
||||
sTimerSpinlock.release();
|
||||
sTimerSpinlock.unlock();
|
||||
return _tickSummary;
|
||||
}
|
||||
|
@ -47,20 +47,20 @@ void PPCRecompiler_visitAddressNoBlock(uint32 enterAddress)
|
||||
if (ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4] != PPCRecompiler_leaveRecompilerCode_unvisited)
|
||||
return;
|
||||
// try to acquire lock
|
||||
if (!PPCRecompilerState.recompilerSpinlock.tryAcquire())
|
||||
if (!PPCRecompilerState.recompilerSpinlock.try_lock())
|
||||
return;
|
||||
auto funcPtr = ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4];
|
||||
if (funcPtr != PPCRecompiler_leaveRecompilerCode_unvisited)
|
||||
{
|
||||
// was visited since previous check
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
return;
|
||||
}
|
||||
// add to recompilation queue and flag as visited
|
||||
PPCRecompilerState.targetQueue.emplace(enterAddress);
|
||||
ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4] = PPCRecompiler_leaveRecompilerCode_visited;
|
||||
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
}
|
||||
|
||||
void PPCRecompiler_recompileIfUnvisited(uint32 enterAddress)
|
||||
@ -193,13 +193,13 @@ PPCRecFunction_t* PPCRecompiler_recompileFunction(PPCFunctionBoundaryTracker::PP
|
||||
bool PPCRecompiler_makeRecompiledFunctionActive(uint32 initialEntryPoint, PPCFunctionBoundaryTracker::PPCRange_t& range, PPCRecFunction_t* ppcRecFunc, std::vector<std::pair<MPTR, uint32>>& entryPoints)
|
||||
{
|
||||
// update jump table
|
||||
PPCRecompilerState.recompilerSpinlock.acquire();
|
||||
PPCRecompilerState.recompilerSpinlock.lock();
|
||||
|
||||
// check if the initial entrypoint is still flagged for recompilation
|
||||
// its possible that the range has been invalidated during the time it took to translate the function
|
||||
if (ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[initialEntryPoint / 4] != PPCRecompiler_leaveRecompilerCode_visited)
|
||||
{
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ bool PPCRecompiler_makeRecompiledFunctionActive(uint32 initialEntryPoint, PPCFun
|
||||
PPCRecompilerState.invalidationRanges.clear();
|
||||
if (isInvalidated)
|
||||
{
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -249,7 +249,7 @@ bool PPCRecompiler_makeRecompiledFunctionActive(uint32 initialEntryPoint, PPCFun
|
||||
{
|
||||
r.storedRange = rangeStore_ppcRanges.storeRange(ppcRecFunc, r.ppcAddress, r.ppcAddress + r.ppcSize);
|
||||
}
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
|
||||
|
||||
return true;
|
||||
@ -272,13 +272,13 @@ void PPCRecompiler_recompileAtAddress(uint32 address)
|
||||
// todo - use info from previously compiled ranges to determine full size of this function (and merge all the entryAddresses)
|
||||
|
||||
// collect all currently known entry points for this range
|
||||
PPCRecompilerState.recompilerSpinlock.acquire();
|
||||
PPCRecompilerState.recompilerSpinlock.lock();
|
||||
|
||||
std::set<uint32> entryAddresses;
|
||||
|
||||
entryAddresses.emplace(address);
|
||||
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
|
||||
std::vector<std::pair<MPTR, uint32>> functionEntryPoints;
|
||||
auto func = PPCRecompiler_recompileFunction(range, entryAddresses, functionEntryPoints);
|
||||
@ -302,10 +302,10 @@ void PPCRecompiler_thread()
|
||||
// 3) if yes -> calculate size, gather all entry points, recompile and update jump table
|
||||
while (true)
|
||||
{
|
||||
PPCRecompilerState.recompilerSpinlock.acquire();
|
||||
PPCRecompilerState.recompilerSpinlock.lock();
|
||||
if (PPCRecompilerState.targetQueue.empty())
|
||||
{
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
break;
|
||||
}
|
||||
auto enterAddress = PPCRecompilerState.targetQueue.front();
|
||||
@ -315,10 +315,10 @@ void PPCRecompiler_thread()
|
||||
if (funcPtr != PPCRecompiler_leaveRecompilerCode_visited)
|
||||
{
|
||||
// only recompile functions if marked as visited
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
continue;
|
||||
}
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
|
||||
PPCRecompiler_recompileAtAddress(enterAddress);
|
||||
}
|
||||
@ -376,7 +376,7 @@ struct ppcRecompilerFuncRange_t
|
||||
|
||||
bool PPCRecompiler_findFuncRanges(uint32 addr, ppcRecompilerFuncRange_t* rangesOut, size_t* countInOut)
|
||||
{
|
||||
PPCRecompilerState.recompilerSpinlock.acquire();
|
||||
PPCRecompilerState.recompilerSpinlock.lock();
|
||||
size_t countIn = *countInOut;
|
||||
size_t countOut = 0;
|
||||
|
||||
@ -392,7 +392,7 @@ bool PPCRecompiler_findFuncRanges(uint32 addr, ppcRecompilerFuncRange_t* rangesO
|
||||
countOut++;
|
||||
}
|
||||
);
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
*countInOut = countOut;
|
||||
if (countOut > countIn)
|
||||
return false;
|
||||
@ -420,7 +420,7 @@ void PPCRecompiler_invalidateTableRange(uint32 offset, uint32 size)
|
||||
void PPCRecompiler_deleteFunction(PPCRecFunction_t* func)
|
||||
{
|
||||
// assumes PPCRecompilerState.recompilerSpinlock is already held
|
||||
cemu_assert_debug(PPCRecompilerState.recompilerSpinlock.isHolding());
|
||||
cemu_assert_debug(PPCRecompilerState.recompilerSpinlock.is_locked());
|
||||
for (auto& r : func->list_ranges)
|
||||
{
|
||||
PPCRecompiler_invalidateTableRange(r.ppcAddress, r.ppcSize);
|
||||
@ -439,7 +439,7 @@ void PPCRecompiler_invalidateRange(uint32 startAddr, uint32 endAddr)
|
||||
return;
|
||||
cemu_assert_debug(endAddr >= startAddr);
|
||||
|
||||
PPCRecompilerState.recompilerSpinlock.acquire();
|
||||
PPCRecompilerState.recompilerSpinlock.lock();
|
||||
|
||||
uint32 rStart;
|
||||
uint32 rEnd;
|
||||
@ -458,7 +458,7 @@ void PPCRecompiler_invalidateRange(uint32 startAddr, uint32 endAddr)
|
||||
PPCRecompiler_deleteFunction(rFunc);
|
||||
}
|
||||
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
}
|
||||
|
||||
void PPCRecompiler_init()
|
||||
|
@ -516,16 +516,16 @@ FSpinlock s_spinlockFetchShaderCache;
|
||||
|
||||
LatteFetchShader* LatteFetchShader::RegisterInCache(CacheHash fsHash)
|
||||
{
|
||||
s_spinlockFetchShaderCache.acquire();
|
||||
s_spinlockFetchShaderCache.lock();
|
||||
auto itr = s_fetchShaderByHash.find(fsHash);
|
||||
if (itr != s_fetchShaderByHash.end())
|
||||
{
|
||||
LatteFetchShader* fs = itr->second;
|
||||
s_spinlockFetchShaderCache.release();
|
||||
s_spinlockFetchShaderCache.unlock();
|
||||
return fs;
|
||||
}
|
||||
s_fetchShaderByHash.emplace(fsHash, this);
|
||||
s_spinlockFetchShaderCache.release();
|
||||
s_spinlockFetchShaderCache.unlock();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -533,11 +533,11 @@ void LatteFetchShader::UnregisterInCache()
|
||||
{
|
||||
if (!m_isRegistered)
|
||||
return;
|
||||
s_spinlockFetchShaderCache.acquire();
|
||||
s_spinlockFetchShaderCache.lock();
|
||||
auto itr = s_fetchShaderByHash.find(m_cacheHash);
|
||||
cemu_assert(itr == s_fetchShaderByHash.end());
|
||||
s_fetchShaderByHash.erase(itr);
|
||||
s_spinlockFetchShaderCache.release();
|
||||
s_spinlockFetchShaderCache.unlock();
|
||||
}
|
||||
|
||||
std::unordered_map<LatteFetchShader::CacheHash, LatteFetchShader*> LatteFetchShader::s_fetchShaderByHash;
|
||||
|
@ -1074,19 +1074,19 @@ void LatteBufferCache_notifyDCFlush(MPTR address, uint32 size)
|
||||
|
||||
uint32 firstPage = address / CACHE_PAGE_SIZE;
|
||||
uint32 lastPage = (address + size - 1) / CACHE_PAGE_SIZE;
|
||||
g_spinlockDCFlushQueue.acquire();
|
||||
g_spinlockDCFlushQueue.lock();
|
||||
for (uint32 i = firstPage; i <= lastPage; i++)
|
||||
s_DCFlushQueue->Set(i);
|
||||
g_spinlockDCFlushQueue.release();
|
||||
g_spinlockDCFlushQueue.unlock();
|
||||
}
|
||||
|
||||
void LatteBufferCache_processDCFlushQueue()
|
||||
{
|
||||
if (s_DCFlushQueue->Empty()) // quick check to avoid locking if there is no work to do
|
||||
return;
|
||||
g_spinlockDCFlushQueue.acquire();
|
||||
g_spinlockDCFlushQueue.lock();
|
||||
std::swap(s_DCFlushQueue, s_DCFlushQueueAlternate);
|
||||
g_spinlockDCFlushQueue.release();
|
||||
g_spinlockDCFlushQueue.unlock();
|
||||
s_DCFlushQueueAlternate->ForAllAndClear([](uint32 index) {LatteBufferCache_invalidatePage(index * CACHE_PAGE_SIZE); });
|
||||
}
|
||||
|
||||
|
@ -37,16 +37,16 @@ public:
|
||||
|
||||
void TrackDependency(class PipelineInfo* pipelineInfo)
|
||||
{
|
||||
s_spinlockDependency.acquire();
|
||||
s_spinlockDependency.lock();
|
||||
m_usedByPipelines.emplace_back(pipelineInfo);
|
||||
s_spinlockDependency.release();
|
||||
s_spinlockDependency.unlock();
|
||||
}
|
||||
|
||||
void RemoveDependency(class PipelineInfo* pipelineInfo)
|
||||
{
|
||||
s_spinlockDependency.acquire();
|
||||
s_spinlockDependency.lock();
|
||||
vectorRemoveByValue(m_usedByPipelines, pipelineInfo);
|
||||
s_spinlockDependency.release();
|
||||
s_spinlockDependency.unlock();
|
||||
}
|
||||
|
||||
[[nodiscard]] const VkExtent2D& GetExtend() const { return m_extend;}
|
||||
|
@ -37,16 +37,16 @@ public:
|
||||
|
||||
void TrackDependency(class PipelineInfo* p)
|
||||
{
|
||||
s_dependencyLock.acquire();
|
||||
s_dependencyLock.lock();
|
||||
list_pipelineInfo.emplace_back(p);
|
||||
s_dependencyLock.release();
|
||||
s_dependencyLock.unlock();
|
||||
}
|
||||
|
||||
void RemoveDependency(class PipelineInfo* p)
|
||||
{
|
||||
s_dependencyLock.acquire();
|
||||
s_dependencyLock.lock();
|
||||
vectorRemoveByValue(list_pipelineInfo, p);
|
||||
s_dependencyLock.release();
|
||||
s_dependencyLock.unlock();
|
||||
}
|
||||
|
||||
void PreponeCompilation(bool isRenderThread) override;
|
||||
|
@ -206,18 +206,18 @@ void VulkanPipelineStableCache::LoadPipelineFromCache(std::span<uint8> fileData)
|
||||
|
||||
// deserialize file
|
||||
LatteContextRegister* lcr = new LatteContextRegister();
|
||||
s_spinlockSharedInternal.acquire();
|
||||
s_spinlockSharedInternal.lock();
|
||||
CachedPipeline* cachedPipeline = new CachedPipeline();
|
||||
s_spinlockSharedInternal.release();
|
||||
s_spinlockSharedInternal.unlock();
|
||||
|
||||
MemStreamReader streamReader(fileData.data(), fileData.size());
|
||||
if (!DeserializePipeline(streamReader, *cachedPipeline))
|
||||
{
|
||||
// failed to deserialize
|
||||
s_spinlockSharedInternal.acquire();
|
||||
s_spinlockSharedInternal.lock();
|
||||
delete lcr;
|
||||
delete cachedPipeline;
|
||||
s_spinlockSharedInternal.release();
|
||||
s_spinlockSharedInternal.unlock();
|
||||
return;
|
||||
}
|
||||
// restored register view from compacted state
|
||||
@ -264,18 +264,18 @@ void VulkanPipelineStableCache::LoadPipelineFromCache(std::span<uint8> fileData)
|
||||
}
|
||||
auto renderPass = __CreateTemporaryRenderPass(pixelShader, *lcr);
|
||||
// create pipeline info
|
||||
m_pipelineIsCachedLock.acquire();
|
||||
m_pipelineIsCachedLock.lock();
|
||||
PipelineInfo* pipelineInfo = new PipelineInfo(0, 0, vertexShader->compatibleFetchShader, vertexShader, pixelShader, geometryShader);
|
||||
m_pipelineIsCachedLock.release();
|
||||
m_pipelineIsCachedLock.unlock();
|
||||
// compile
|
||||
{
|
||||
PipelineCompiler pp;
|
||||
if (!pp.InitFromCurrentGPUState(pipelineInfo, *lcr, renderPass))
|
||||
{
|
||||
s_spinlockSharedInternal.acquire();
|
||||
s_spinlockSharedInternal.lock();
|
||||
delete lcr;
|
||||
delete cachedPipeline;
|
||||
s_spinlockSharedInternal.release();
|
||||
s_spinlockSharedInternal.unlock();
|
||||
return;
|
||||
}
|
||||
pp.Compile(true, true, false);
|
||||
@ -284,16 +284,16 @@ void VulkanPipelineStableCache::LoadPipelineFromCache(std::span<uint8> fileData)
|
||||
// on success, calculate pipeline hash and flag as present in cache
|
||||
uint64 pipelineBaseHash = vertexShader->baseHash;
|
||||
uint64 pipelineStateHash = VulkanRenderer::draw_calculateGraphicsPipelineHash(vertexShader->compatibleFetchShader, vertexShader, geometryShader, pixelShader, renderPass, *lcr);
|
||||
m_pipelineIsCachedLock.acquire();
|
||||
m_pipelineIsCachedLock.lock();
|
||||
m_pipelineIsCached.emplace(pipelineBaseHash, pipelineStateHash);
|
||||
m_pipelineIsCachedLock.release();
|
||||
m_pipelineIsCachedLock.unlock();
|
||||
// clean up
|
||||
s_spinlockSharedInternal.acquire();
|
||||
s_spinlockSharedInternal.lock();
|
||||
delete pipelineInfo;
|
||||
delete lcr;
|
||||
delete cachedPipeline;
|
||||
VulkanRenderer::GetInstance()->releaseDestructibleObject(renderPass);
|
||||
s_spinlockSharedInternal.release();
|
||||
s_spinlockSharedInternal.unlock();
|
||||
}
|
||||
|
||||
bool VulkanPipelineStableCache::HasPipelineCached(uint64 baseHash, uint64 pipelineStateHash)
|
||||
|
@ -3447,14 +3447,14 @@ void VulkanRenderer::releaseDestructibleObject(VKRDestructibleObject* destructib
|
||||
return;
|
||||
}
|
||||
// otherwise put on queue
|
||||
m_spinlockDestructionQueue.acquire();
|
||||
m_spinlockDestructionQueue.lock();
|
||||
m_destructionQueue.emplace_back(destructibleObject);
|
||||
m_spinlockDestructionQueue.release();
|
||||
m_spinlockDestructionQueue.unlock();
|
||||
}
|
||||
|
||||
void VulkanRenderer::ProcessDestructionQueue2()
|
||||
{
|
||||
m_spinlockDestructionQueue.acquire();
|
||||
m_spinlockDestructionQueue.lock();
|
||||
for (auto it = m_destructionQueue.begin(); it != m_destructionQueue.end();)
|
||||
{
|
||||
if ((*it)->canDestroy())
|
||||
@ -3465,7 +3465,7 @@ void VulkanRenderer::ProcessDestructionQueue2()
|
||||
}
|
||||
++it;
|
||||
}
|
||||
m_spinlockDestructionQueue.release();
|
||||
m_spinlockDestructionQueue.unlock();
|
||||
}
|
||||
|
||||
VkDescriptorSetInfo::~VkDescriptorSetInfo()
|
||||
@ -4010,9 +4010,9 @@ void VulkanRenderer::AppendOverlayDebugInfo()
|
||||
ImGui::Text("ImageView %u", performanceMonitor.vk.numImageViews.get());
|
||||
ImGui::Text("RenderPass %u", performanceMonitor.vk.numRenderPass.get());
|
||||
ImGui::Text("Framebuffer %u", performanceMonitor.vk.numFramebuffer.get());
|
||||
m_spinlockDestructionQueue.acquire();
|
||||
m_spinlockDestructionQueue.lock();
|
||||
ImGui::Text("DestructionQ %u", (unsigned int)m_destructionQueue.size());
|
||||
m_spinlockDestructionQueue.release();
|
||||
m_spinlockDestructionQueue.unlock();
|
||||
|
||||
|
||||
ImGui::Text("BeginRP/f %u", performanceMonitor.vk.numBeginRenderpassPerFrame.get());
|
||||
|
@ -234,38 +234,38 @@ namespace iosu
|
||||
|
||||
void _IPCInitDispatchablePool()
|
||||
{
|
||||
sIPCDispatchableCommandPoolLock.acquire();
|
||||
sIPCDispatchableCommandPoolLock.lock();
|
||||
while (!sIPCFreeDispatchableCommands.empty())
|
||||
sIPCFreeDispatchableCommands.pop();
|
||||
for (size_t i = 0; i < sIPCDispatchableCommandPool.GetCount(); i++)
|
||||
sIPCFreeDispatchableCommands.push(sIPCDispatchableCommandPool.GetPtr()+i);
|
||||
sIPCDispatchableCommandPoolLock.release();
|
||||
sIPCDispatchableCommandPoolLock.unlock();
|
||||
}
|
||||
|
||||
IOSDispatchableCommand* _IPCAllocateDispatchableCommand()
|
||||
{
|
||||
sIPCDispatchableCommandPoolLock.acquire();
|
||||
sIPCDispatchableCommandPoolLock.lock();
|
||||
if (sIPCFreeDispatchableCommands.empty())
|
||||
{
|
||||
cemuLog_log(LogType::Force, "IOS: Exhausted pool of dispatchable commands");
|
||||
sIPCDispatchableCommandPoolLock.release();
|
||||
sIPCDispatchableCommandPoolLock.unlock();
|
||||
return nullptr;
|
||||
}
|
||||
IOSDispatchableCommand* cmd = sIPCFreeDispatchableCommands.front();
|
||||
sIPCFreeDispatchableCommands.pop();
|
||||
cemu_assert_debug(!cmd->isAllocated);
|
||||
cmd->isAllocated = true;
|
||||
sIPCDispatchableCommandPoolLock.release();
|
||||
sIPCDispatchableCommandPoolLock.unlock();
|
||||
return cmd;
|
||||
}
|
||||
|
||||
void _IPCReleaseDispatchableCommand(IOSDispatchableCommand* cmd)
|
||||
{
|
||||
sIPCDispatchableCommandPoolLock.acquire();
|
||||
sIPCDispatchableCommandPoolLock.lock();
|
||||
cemu_assert_debug(cmd->isAllocated);
|
||||
cmd->isAllocated = false;
|
||||
sIPCFreeDispatchableCommands.push(cmd);
|
||||
sIPCDispatchableCommandPoolLock.release();
|
||||
sIPCDispatchableCommandPoolLock.unlock();
|
||||
}
|
||||
|
||||
static constexpr size_t MAX_NUM_ACTIVE_DEV_HANDLES = 96; // per process
|
||||
|
@ -8,27 +8,27 @@ struct CoreinitAsyncCallback
|
||||
|
||||
static void queue(MPTR functionMPTR, uint32 numParameters, uint32 r3, uint32 r4, uint32 r5, uint32 r6, uint32 r7, uint32 r8, uint32 r9, uint32 r10)
|
||||
{
|
||||
s_asyncCallbackSpinlock.acquire();
|
||||
s_asyncCallbackSpinlock.lock();
|
||||
s_asyncCallbackQueue.emplace_back(allocateAndInitFromPool(functionMPTR, numParameters, r3, r4, r5, r6, r7, r8, r9, r10));
|
||||
s_asyncCallbackSpinlock.release();
|
||||
s_asyncCallbackSpinlock.unlock();
|
||||
}
|
||||
|
||||
static void callNextFromQueue()
|
||||
{
|
||||
s_asyncCallbackSpinlock.acquire();
|
||||
s_asyncCallbackSpinlock.lock();
|
||||
if (s_asyncCallbackQueue.empty())
|
||||
{
|
||||
cemuLog_log(LogType::Force, "AsyncCallbackQueue is empty. Unexpected behavior");
|
||||
s_asyncCallbackSpinlock.release();
|
||||
s_asyncCallbackSpinlock.unlock();
|
||||
return;
|
||||
}
|
||||
CoreinitAsyncCallback* cb = s_asyncCallbackQueue[0];
|
||||
s_asyncCallbackQueue.erase(s_asyncCallbackQueue.begin());
|
||||
s_asyncCallbackSpinlock.release();
|
||||
s_asyncCallbackSpinlock.unlock();
|
||||
cb->doCall();
|
||||
s_asyncCallbackSpinlock.acquire();
|
||||
s_asyncCallbackSpinlock.lock();
|
||||
releaseToPool(cb);
|
||||
s_asyncCallbackSpinlock.release();
|
||||
s_asyncCallbackSpinlock.unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
@ -39,7 +39,7 @@ private:
|
||||
|
||||
static CoreinitAsyncCallback* allocateAndInitFromPool(MPTR functionMPTR, uint32 numParameters, uint32 r3, uint32 r4, uint32 r5, uint32 r6, uint32 r7, uint32 r8, uint32 r9, uint32 r10)
|
||||
{
|
||||
cemu_assert_debug(s_asyncCallbackSpinlock.isHolding());
|
||||
cemu_assert_debug(s_asyncCallbackSpinlock.is_locked());
|
||||
if (s_asyncCallbackPool.empty())
|
||||
{
|
||||
CoreinitAsyncCallback* cb = new CoreinitAsyncCallback(functionMPTR, numParameters, r3, r4, r5, r6, r7, r8, r9, r10);
|
||||
@ -54,7 +54,7 @@ private:
|
||||
|
||||
static void releaseToPool(CoreinitAsyncCallback* cb)
|
||||
{
|
||||
cemu_assert_debug(s_asyncCallbackSpinlock.isHolding());
|
||||
cemu_assert_debug(s_asyncCallbackSpinlock.is_locked());
|
||||
s_asyncCallbackPool.emplace_back(cb);
|
||||
}
|
||||
|
||||
|
@ -6,8 +6,8 @@
|
||||
|
||||
// titles that utilize MP task queue: Yoshi's Woolly World, Fast Racing Neo, Tokyo Mirage Sessions, Mii Maker
|
||||
|
||||
#define AcquireMPQLock() s_workaroundSpinlock.acquire()
|
||||
#define ReleaseMPQLock() s_workaroundSpinlock.release()
|
||||
#define AcquireMPQLock() s_workaroundSpinlock.lock()
|
||||
#define ReleaseMPQLock() s_workaroundSpinlock.unlock()
|
||||
|
||||
namespace coreinit
|
||||
{
|
||||
@ -35,7 +35,7 @@ namespace coreinit
|
||||
|
||||
void MPInitTask(MPTask* task, void* func, void* data, uint32 size)
|
||||
{
|
||||
s_workaroundSpinlock.acquire();
|
||||
s_workaroundSpinlock.lock();
|
||||
task->thisptr = task;
|
||||
|
||||
task->coreIndex = PPC_CORE_COUNT;
|
||||
@ -48,7 +48,7 @@ namespace coreinit
|
||||
|
||||
task->userdata = nullptr;
|
||||
task->runtime = 0;
|
||||
s_workaroundSpinlock.release();
|
||||
s_workaroundSpinlock.unlock();
|
||||
}
|
||||
|
||||
bool MPTermTask(MPTask* task)
|
||||
|
@ -465,12 +465,12 @@ namespace coreinit
|
||||
|
||||
void _OSFastMutex_AcquireContention(OSFastMutex* fastMutex)
|
||||
{
|
||||
g_fastMutexSpinlock.acquire();
|
||||
g_fastMutexSpinlock.lock();
|
||||
}
|
||||
|
||||
void _OSFastMutex_ReleaseContention(OSFastMutex* fastMutex)
|
||||
{
|
||||
g_fastMutexSpinlock.release();
|
||||
g_fastMutexSpinlock.unlock();
|
||||
}
|
||||
|
||||
void OSFastMutex_LockInternal(OSFastMutex* fastMutex)
|
||||
|
@ -778,7 +778,7 @@ namespace snd_core
|
||||
|
||||
void AXIst_SyncVPB(AXVPBInternal_t** lastProcessedDSPShadowCopy, AXVPBInternal_t** lastProcessedPPCShadowCopy)
|
||||
{
|
||||
__AXVoiceListSpinlock.acquire();
|
||||
__AXVoiceListSpinlock.lock();
|
||||
|
||||
AXVPBInternal_t* previousInternalDSP = nullptr;
|
||||
AXVPBInternal_t* previousInternalPPC = nullptr;
|
||||
@ -869,7 +869,7 @@ namespace snd_core
|
||||
else
|
||||
*lastProcessedPPCShadowCopy = nullptr;
|
||||
}
|
||||
__AXVoiceListSpinlock.release();
|
||||
__AXVoiceListSpinlock.unlock();
|
||||
}
|
||||
|
||||
void AXIst_HandleFrameCallbacks()
|
||||
|
@ -393,7 +393,7 @@ namespace snd_core
|
||||
AXVPB* AXAcquireVoiceEx(uint32 priority, MPTR callbackEx, MPTR userParam)
|
||||
{
|
||||
cemu_assert(priority != AX_PRIORITY_FREE && priority < AX_PRIORITY_MAX);
|
||||
__AXVoiceListSpinlock.acquire();
|
||||
__AXVoiceListSpinlock.lock();
|
||||
AXVPB* vpb = AXVoiceList_GetFreeVoice();
|
||||
if (vpb != nullptr)
|
||||
{
|
||||
@ -410,7 +410,7 @@ namespace snd_core
|
||||
if (droppedVoice == nullptr)
|
||||
{
|
||||
// no voice available
|
||||
__AXVoiceListSpinlock.release();
|
||||
__AXVoiceListSpinlock.unlock();
|
||||
return nullptr;
|
||||
}
|
||||
vpb->userParam = userParam;
|
||||
@ -418,18 +418,18 @@ namespace snd_core
|
||||
vpb->callbackEx = callbackEx;
|
||||
AXVPB_SetVoiceDefault(vpb);
|
||||
}
|
||||
__AXVoiceListSpinlock.release();
|
||||
__AXVoiceListSpinlock.unlock();
|
||||
return vpb;
|
||||
}
|
||||
|
||||
void AXFreeVoice(AXVPB* vpb)
|
||||
{
|
||||
cemu_assert(vpb != nullptr);
|
||||
__AXVoiceListSpinlock.acquire();
|
||||
__AXVoiceListSpinlock.lock();
|
||||
if (vpb->priority == (uint32be)AX_PRIORITY_FREE)
|
||||
{
|
||||
forceLog_printf("AXFreeVoice() called on free voice\n");
|
||||
__AXVoiceListSpinlock.release();
|
||||
__AXVoiceListSpinlock.unlock();
|
||||
return;
|
||||
}
|
||||
AXVoiceProtection_Release(vpb);
|
||||
@ -442,7 +442,7 @@ namespace snd_core
|
||||
vpb->callback = MPTR_NULL;
|
||||
vpb->callbackEx = MPTR_NULL;
|
||||
AXVoiceList_AddFreeVoice(vpb);
|
||||
__AXVoiceListSpinlock.release();
|
||||
__AXVoiceListSpinlock.unlock();
|
||||
}
|
||||
|
||||
void AXVPBInit()
|
||||
|
@ -45,7 +45,8 @@ struct WindowInfo
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(keycode_mutex);
|
||||
m_keydown[keycode] = state;
|
||||
};
|
||||
}
|
||||
|
||||
bool get_keystate(uint32 keycode)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(keycode_mutex);
|
||||
@ -54,25 +55,20 @@ struct WindowInfo
|
||||
return false;
|
||||
return result->second;
|
||||
}
|
||||
void get_keystates(std::unordered_map<uint32, bool>& buttons_out)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(keycode_mutex);
|
||||
for (auto&& button : m_keydown)
|
||||
{
|
||||
buttons_out[button.first] = button.second;
|
||||
}
|
||||
}
|
||||
|
||||
void set_keystatesdown()
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(keycode_mutex);
|
||||
std::for_each(m_keydown.begin(), m_keydown.end(), [](std::pair<const uint32, bool>& el){ el.second = false; });
|
||||
}
|
||||
|
||||
template <typename fn>
|
||||
void iter_keystates(fn f)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(keycode_mutex);
|
||||
std::for_each(m_keydown.cbegin(), m_keydown.cend(), f);
|
||||
}
|
||||
|
||||
WindowHandleInfo window_main;
|
||||
WindowHandleInfo window_pad;
|
||||
|
||||
|
@ -111,7 +111,7 @@ InputSettings2::InputSettings2(wxWindow* parent)
|
||||
Bind(wxEVT_TIMER, &InputSettings2::on_timer, this);
|
||||
|
||||
m_timer = new wxTimer(this);
|
||||
m_timer->Start(100);
|
||||
m_timer->Start(25);
|
||||
|
||||
m_controller_changed = EventService::instance().connect<Events::ControllerChanged>(&InputSettings2::on_controller_changed, this);
|
||||
}
|
||||
|
@ -41,77 +41,69 @@ void InputPanel::on_timer(const EmulatedControllerPtr& emulated_controller, cons
|
||||
}
|
||||
|
||||
static bool s_was_idle = true;
|
||||
if (!std::any_of(state.buttons.begin(), state.buttons.end(), [](auto el){ return el.second; })) {
|
||||
if (state.buttons.IsIdle())
|
||||
{
|
||||
s_was_idle = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!s_was_idle) {
|
||||
if (!s_was_idle)
|
||||
{
|
||||
return;
|
||||
}
|
||||
auto get_button_state = [&](uint32 key_id)
|
||||
{
|
||||
auto result = state.buttons.find(key_id);
|
||||
if (result == state.buttons.end())
|
||||
return false;
|
||||
return result->second;
|
||||
};
|
||||
s_was_idle = false;
|
||||
for(auto && button : state.buttons)
|
||||
for(const auto& id : state.buttons.GetButtonList())
|
||||
{
|
||||
if (button.second)
|
||||
if (controller->has_axis())
|
||||
{
|
||||
auto id=button.first;
|
||||
if (controller->has_axis()) {
|
||||
// test if one axis direction is pressed more than the other
|
||||
if ((id == kAxisXP || id == kAxisXN) && (get_button_state(kAxisYP) || get_button_state(kAxisYN)))
|
||||
{
|
||||
if (std::abs(state.axis.y) > std::abs(state.axis.x))
|
||||
continue;
|
||||
}
|
||||
else if ((id == kAxisYP || id == kAxisYN) && (get_button_state(kAxisXP) || get_button_state(kAxisXN)))
|
||||
{
|
||||
if (std::abs(state.axis.x) > std::abs(state.axis.y))
|
||||
continue;
|
||||
}
|
||||
else if ((id == kRotationXP || id == kRotationXN) && (get_button_state(kRotationYP) || get_button_state(kRotationYN)))
|
||||
{
|
||||
if (std::abs(state.rotation.y) > std::abs(state.rotation.x))
|
||||
continue;
|
||||
}
|
||||
else if ((id == kRotationYP || id == kRotationYN) && (get_button_state(kRotationXP) || get_button_state(kRotationXN)))
|
||||
{
|
||||
if (std::abs(state.rotation.x) > std::abs(state.rotation.y))
|
||||
continue;
|
||||
}
|
||||
else if ((id == kTriggerXP || id == kTriggerXN) && (get_button_state(kTriggerYP) || get_button_state(kTriggerYN)))
|
||||
{
|
||||
if (std::abs(state.trigger.y) > std::abs(state.trigger.x))
|
||||
continue;
|
||||
}
|
||||
else if ((id == kTriggerYP || id == kTriggerYN) && (get_button_state(kTriggerXP) || get_button_state(kTriggerXN)))
|
||||
{
|
||||
if (std::abs(state.trigger.x) > std::abs(state.trigger.y))
|
||||
continue;
|
||||
}
|
||||
|
||||
// ignore too low button values on configuration
|
||||
if (id >= kButtonAxisStart)
|
||||
{
|
||||
if (controller->get_axis_value(id) < 0.33f) {
|
||||
forceLogDebug_printf("skipping since value too low %f", controller->get_axis_value(id));
|
||||
s_was_idle = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
// test if one axis direction is pressed more than the other
|
||||
if ((id == kAxisXP || id == kAxisXN) && (state.buttons.GetButtonState(kAxisYP) || state.buttons.GetButtonState(kAxisYN)))
|
||||
{
|
||||
if (std::abs(state.axis.y) > std::abs(state.axis.x))
|
||||
continue;
|
||||
}
|
||||
else if ((id == kAxisYP || id == kAxisYN) && (state.buttons.GetButtonState(kAxisXP) || state.buttons.GetButtonState(kAxisXN)))
|
||||
{
|
||||
if (std::abs(state.axis.x) > std::abs(state.axis.y))
|
||||
continue;
|
||||
}
|
||||
else if ((id == kRotationXP || id == kRotationXN) && (state.buttons.GetButtonState(kRotationYP) || state.buttons.GetButtonState(kRotationYN)))
|
||||
{
|
||||
if (std::abs(state.rotation.y) > std::abs(state.rotation.x))
|
||||
continue;
|
||||
}
|
||||
else if ((id == kRotationYP || id == kRotationYN) && (state.buttons.GetButtonState(kRotationXP) || state.buttons.GetButtonState(kRotationXN)))
|
||||
{
|
||||
if (std::abs(state.rotation.x) > std::abs(state.rotation.y))
|
||||
continue;
|
||||
}
|
||||
else if ((id == kTriggerXP || id == kTriggerXN) && (state.buttons.GetButtonState(kTriggerYP) || state.buttons.GetButtonState(kTriggerYN)))
|
||||
{
|
||||
if (std::abs(state.trigger.y) > std::abs(state.trigger.x))
|
||||
continue;
|
||||
}
|
||||
else if ((id == kTriggerYP || id == kTriggerYN) && (state.buttons.GetButtonState(kTriggerXP) || state.buttons.GetButtonState(kTriggerXN)))
|
||||
{
|
||||
if (std::abs(state.trigger.x) > std::abs(state.trigger.y))
|
||||
continue;
|
||||
}
|
||||
|
||||
emulated_controller->set_mapping(mapping, controller, id);
|
||||
element->SetValue(controller->get_button_name(id));
|
||||
element->SetBackgroundColour(kKeyColourNormalMode);
|
||||
m_color_backup[element->GetId()] = kKeyColourNormalMode;
|
||||
break;
|
||||
// ignore too low button values on configuration
|
||||
if (id >= kButtonAxisStart)
|
||||
{
|
||||
if (controller->get_axis_value(id) < 0.33f) {
|
||||
forceLogDebug_printf("skipping since value too low %f", controller->get_axis_value(id));
|
||||
s_was_idle = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emulated_controller->set_mapping(mapping, controller, id);
|
||||
element->SetValue(controller->get_button_name(id));
|
||||
element->SetBackgroundColour(kKeyColourNormalMode);
|
||||
m_color_backup[element->GetId()] = kKeyColourNormalMode;
|
||||
break;
|
||||
}
|
||||
|
||||
if (const auto sibling = get_next_sibling(element))
|
||||
|
@ -15,10 +15,7 @@ const ControllerState& ControllerBase::update_state()
|
||||
ControllerState result = raw_state();
|
||||
|
||||
// ignore default buttons
|
||||
for (auto&& el : m_default_state.buttons)
|
||||
{
|
||||
result.buttons[el.first] = result.buttons[el.first] && !el.second;
|
||||
}
|
||||
result.buttons.UnsetButtons(m_default_state.buttons);
|
||||
// apply deadzone and range and ignore default axis values
|
||||
apply_axis_setting(result.axis, m_default_state.axis, m_settings.axis);
|
||||
apply_axis_setting(result.rotation, m_default_state.rotation, m_settings.rotation);
|
||||
@ -26,22 +23,22 @@ const ControllerState& ControllerBase::update_state()
|
||||
|
||||
#define APPLY_AXIS_BUTTON(_axis_, _flag_) \
|
||||
if (result._axis_.x < -ControllerState::kAxisThreshold) \
|
||||
result.buttons[(_flag_) + (kAxisXN - kAxisXP)]=true; \
|
||||
result.buttons.SetButtonState((_flag_) + (kAxisXN - kAxisXP), true); \
|
||||
else if (result._axis_.x > ControllerState::kAxisThreshold) \
|
||||
result.buttons[(_flag_)]=true; \
|
||||
result.buttons.SetButtonState((_flag_), true); \
|
||||
if (result._axis_.y < -ControllerState::kAxisThreshold) \
|
||||
result.buttons[(_flag_) + 1 + (kAxisXN - kAxisXP)]=true; \
|
||||
result.buttons.SetButtonState((_flag_) + 1 + (kAxisXN - kAxisXP), true); \
|
||||
else if (result._axis_.y > ControllerState::kAxisThreshold) \
|
||||
result.buttons[(_flag_) + 1]=true;
|
||||
result.buttons.SetButtonState((_flag_) + 1, true);
|
||||
|
||||
if (result.axis.x < -ControllerState::kAxisThreshold)
|
||||
result.buttons[(kAxisXP) + (kAxisXN - kAxisXP)]=true;
|
||||
result.buttons.SetButtonState((kAxisXP) + (kAxisXN - kAxisXP), true);
|
||||
else if (result.axis.x > ControllerState::kAxisThreshold)
|
||||
result.buttons[(kAxisXP)]=true;
|
||||
result.buttons.SetButtonState((kAxisXP), true);
|
||||
if (result.axis.y < -ControllerState::kAxisThreshold)
|
||||
result.buttons[(kAxisXP) + 1 + (kAxisXN - kAxisXP)]=true;
|
||||
result.buttons.SetButtonState((kAxisXP) + 1 + (kAxisXN - kAxisXP), true);
|
||||
else if (result.axis.y > ControllerState::kAxisThreshold)
|
||||
result.buttons[(kAxisXP) + 1]=true;
|
||||
result.buttons.SetButtonState((kAxisXP) + 1, true);
|
||||
APPLY_AXIS_BUTTON(rotation, kRotationXP);
|
||||
APPLY_AXIS_BUTTON(trigger, kTriggerXP);
|
||||
|
||||
@ -129,8 +126,7 @@ bool ControllerBase::operator==(const ControllerBase& c) const
|
||||
|
||||
float ControllerBase::get_axis_value(uint64 button) const
|
||||
{
|
||||
auto buttonState=m_last_state.buttons.find(button);
|
||||
if (buttonState!=m_last_state.buttons.end() && buttonState->second)
|
||||
if (m_last_state.buttons.GetButtonState(button))
|
||||
{
|
||||
if (button <= kButtonNoneAxisMAX || !has_axis())
|
||||
return 1.0f;
|
||||
|
@ -1,6 +1,115 @@
|
||||
#pragma once
|
||||
|
||||
#include <glm/vec2.hpp>
|
||||
#include "util/helpers/fspinlock.h"
|
||||
|
||||
// helper class for storing and managing button press states in a thread-safe manner
|
||||
struct ControllerButtonState
|
||||
{
|
||||
ControllerButtonState() = default;
|
||||
ControllerButtonState(const ControllerButtonState& other)
|
||||
{
|
||||
this->m_pressedButtons = other.m_pressedButtons;
|
||||
}
|
||||
|
||||
ControllerButtonState(ControllerButtonState&& other)
|
||||
{
|
||||
this->m_pressedButtons = std::move(other.m_pressedButtons);
|
||||
}
|
||||
|
||||
void SetButtonState(uint32 buttonId, bool isPressed)
|
||||
{
|
||||
std::lock_guard _l(this->m_spinlock);
|
||||
if (isPressed)
|
||||
{
|
||||
if (std::find(m_pressedButtons.cbegin(), m_pressedButtons.cend(), buttonId) != m_pressedButtons.end())
|
||||
return;
|
||||
m_pressedButtons.emplace_back(buttonId);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::erase(m_pressedButtons, buttonId);
|
||||
}
|
||||
}
|
||||
|
||||
// set multiple buttons at once within a single lock interval
|
||||
void SetPressedButtons(std::span<uint32> buttonList)
|
||||
{
|
||||
std::lock_guard _l(this->m_spinlock);
|
||||
for (auto& buttonId : buttonList)
|
||||
{
|
||||
if (std::find(m_pressedButtons.cbegin(), m_pressedButtons.cend(), buttonId) == m_pressedButtons.end())
|
||||
m_pressedButtons.emplace_back(buttonId);
|
||||
}
|
||||
}
|
||||
|
||||
// returns true if pressed
|
||||
bool GetButtonState(uint32 buttonId) const
|
||||
{
|
||||
std::lock_guard _l(this->m_spinlock);
|
||||
bool r = std::find(m_pressedButtons.cbegin(), m_pressedButtons.cend(), buttonId) != m_pressedButtons.cend();
|
||||
return r;
|
||||
}
|
||||
|
||||
// remove pressed state for all pressed buttons in buttonsToUnset
|
||||
void UnsetButtons(const ControllerButtonState& buttonsToUnset)
|
||||
{
|
||||
std::scoped_lock _l(this->m_spinlock, buttonsToUnset.m_spinlock);
|
||||
for (auto it = m_pressedButtons.begin(); it != m_pressedButtons.end();)
|
||||
{
|
||||
if (std::find(buttonsToUnset.m_pressedButtons.cbegin(), buttonsToUnset.m_pressedButtons.cend(), *it) == buttonsToUnset.m_pressedButtons.cend())
|
||||
{
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
it = m_pressedButtons.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
// returns true if no buttons are pressed
|
||||
bool IsIdle() const
|
||||
{
|
||||
std::lock_guard _l(this->m_spinlock);
|
||||
const bool r = m_pressedButtons.empty();
|
||||
return r;
|
||||
}
|
||||
|
||||
std::vector<uint32> GetButtonList() const
|
||||
{
|
||||
std::lock_guard _l(this->m_spinlock);
|
||||
std::vector<uint32> copy = m_pressedButtons;
|
||||
return copy;
|
||||
}
|
||||
|
||||
bool operator==(const ControllerButtonState& other) const
|
||||
{
|
||||
std::scoped_lock _l(this->m_spinlock, other.m_spinlock);
|
||||
auto& otherButtons = other.m_pressedButtons;
|
||||
if (m_pressedButtons.size() != otherButtons.size())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
for (auto& buttonId : m_pressedButtons)
|
||||
{
|
||||
if (std::find(otherButtons.cbegin(), otherButtons.cend(), buttonId) == otherButtons.cend())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
ControllerButtonState& operator=(ControllerButtonState&& other)
|
||||
{
|
||||
cemu_assert_debug(!other.m_spinlock.is_locked());
|
||||
this->m_pressedButtons = std::move(other.m_pressedButtons);
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<uint32> m_pressedButtons; // since only very few buttons are pressed at a time, using a vector with linear scan is more efficient than a set/map
|
||||
mutable FSpinlock m_spinlock;
|
||||
};
|
||||
|
||||
struct ControllerState
|
||||
{
|
||||
@ -17,7 +126,7 @@ struct ControllerState
|
||||
glm::vec2 rotation{ };
|
||||
glm::vec2 trigger{ };
|
||||
|
||||
std::unordered_map<uint32, bool> buttons{};
|
||||
ControllerButtonState buttons{};
|
||||
|
||||
uint64 last_state = 0;
|
||||
|
||||
|
@ -137,7 +137,7 @@ ControllerState DSUController::raw_state()
|
||||
{
|
||||
if (HAS_BIT(state.data.state1, i))
|
||||
{
|
||||
result.buttons[bitindex]=true;
|
||||
result.buttons.SetButtonState(bitindex, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -145,12 +145,12 @@ ControllerState DSUController::raw_state()
|
||||
{
|
||||
if (HAS_BIT(state.data.state2, i))
|
||||
{
|
||||
result.buttons[bitindex]=true;
|
||||
result.buttons.SetButtonState(bitindex, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (state.data.touch)
|
||||
result.buttons[kButton16]=true;
|
||||
result.buttons.SetButtonState(kButton16, true);
|
||||
|
||||
result.axis.x = (float)state.data.lx / std::numeric_limits<uint8>::max();
|
||||
result.axis.x = (result.axis.x * 2.0f) - 1.0f;
|
||||
|
@ -245,7 +245,6 @@ ControllerState DirectInputController::raw_state()
|
||||
ControllerState result{};
|
||||
if (!is_connected())
|
||||
return result;
|
||||
|
||||
HRESULT hr = m_device->Poll();
|
||||
if (FAILED(hr))
|
||||
{
|
||||
@ -277,9 +276,7 @@ ControllerState DirectInputController::raw_state()
|
||||
for (size_t i = 0; i < std::size(state.rgbButtons); ++i)
|
||||
{
|
||||
if (HAS_BIT(state.rgbButtons[i], 7))
|
||||
{
|
||||
result.buttons[i]=true;
|
||||
}
|
||||
result.buttons.SetButtonState(i, true);
|
||||
}
|
||||
|
||||
// axis
|
||||
@ -316,19 +313,19 @@ ControllerState DirectInputController::raw_state()
|
||||
{
|
||||
switch (pov)
|
||||
{
|
||||
case 0: result.buttons[kButtonUp]=true;
|
||||
case 0: result.buttons.SetButtonState(kButtonUp, true);
|
||||
break;
|
||||
case 4500: result.buttons[kButtonUp]=true; // up + right
|
||||
case 9000: result.buttons[kButtonRight]=true;
|
||||
case 4500: result.buttons.SetButtonState(kButtonUp, true); // up + right
|
||||
case 9000: result.buttons.SetButtonState(kButtonRight, true);
|
||||
break;
|
||||
case 13500: result.buttons[kButtonRight] = true; // right + down
|
||||
case 18000: result.buttons[kButtonDown] = true;
|
||||
case 13500: result.buttons.SetButtonState(kButtonRight, true); // right + down
|
||||
case 18000: result.buttons.SetButtonState(kButtonDown, true);
|
||||
break;
|
||||
case 22500: result.buttons[kButtonDown] = true; // down + left
|
||||
case 27000: result.buttons[kButtonLeft] = true;
|
||||
case 22500: result.buttons.SetButtonState(kButtonDown, true); // down + left
|
||||
case 27000: result.buttons.SetButtonState(kButtonLeft, true);
|
||||
break;
|
||||
case 31500: result.buttons[kButtonLeft] = true; // left + up
|
||||
result.buttons[kButtonUp] = true; // left + up
|
||||
case 31500: result.buttons.SetButtonState(kButtonLeft, true); // left + up
|
||||
result.buttons.SetButtonState(kButtonUp, true); // left + up
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include "input/api/Keyboard/KeyboardController.h"
|
||||
#include <boost/container/small_vector.hpp>
|
||||
|
||||
#include "input/api/Keyboard/KeyboardController.h"
|
||||
#include "gui/guiWrapper.h"
|
||||
|
||||
KeyboardController::KeyboardController()
|
||||
@ -51,7 +52,9 @@ ControllerState KeyboardController::raw_state()
|
||||
ControllerState result{};
|
||||
if (g_window_info.app_active)
|
||||
{
|
||||
g_window_info.get_keystates(result.buttons);
|
||||
boost::container::small_vector<uint32, 16> pressedKeys;
|
||||
g_window_info.iter_keystates([&pressedKeys](const std::pair<const uint32, bool>& keyState) { if (keyState.second) pressedKeys.emplace_back(keyState.first); });
|
||||
result.buttons.SetPressedButtons(pressedKeys);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -146,9 +146,7 @@ ControllerState SDLController::raw_state()
|
||||
for (int i = 0; i < SDL_CONTROLLER_BUTTON_MAX; ++i)
|
||||
{
|
||||
if (m_buttons[i] && SDL_GameControllerGetButton(m_controller, (SDL_GameControllerButton)i))
|
||||
{
|
||||
result.buttons[i]=true;
|
||||
}
|
||||
result.buttons.SetButtonState(i, true);
|
||||
}
|
||||
|
||||
if (m_axis[SDL_CONTROLLER_AXIS_LEFTX])
|
||||
|
@ -207,16 +207,16 @@ ControllerState NativeWiimoteController::raw_state()
|
||||
|
||||
const auto state = m_provider->get_state(m_index);
|
||||
for (int i = 0; i < std::numeric_limits<uint16>::digits; i++)
|
||||
result.buttons[i] = state.buttons & (1<<i);
|
||||
result.buttons.SetButtonState(i, (state.buttons & (1 << i)) != 0);
|
||||
|
||||
if (std::holds_alternative<NunchuckData>(state.m_extension))
|
||||
{
|
||||
const auto nunchuck = std::get<NunchuckData>(state.m_extension);
|
||||
if (nunchuck.c)
|
||||
result.buttons[kWiimoteButton_C]=true;
|
||||
result.buttons.SetButtonState(kWiimoteButton_C, true);
|
||||
|
||||
if (nunchuck.z)
|
||||
result.buttons[kWiimoteButton_Z]=true;
|
||||
result.buttons.SetButtonState(kWiimoteButton_Z, true);
|
||||
|
||||
result.axis = nunchuck.axis;
|
||||
}
|
||||
@ -225,8 +225,11 @@ ControllerState NativeWiimoteController::raw_state()
|
||||
const auto classic = std::get<ClassicData>(state.m_extension);
|
||||
uint64 buttons = (uint64)classic.buttons << kHighestWiimote;
|
||||
for (int i = 0; i < std::numeric_limits<uint64>::digits; i++)
|
||||
result.buttons[i] = result.buttons[i] || (buttons & (1 << i));
|
||||
|
||||
{
|
||||
// OR with base buttons
|
||||
if((buttons & (1 << i)))
|
||||
result.buttons.SetButtonState(i, true);
|
||||
}
|
||||
result.axis = classic.left_axis;
|
||||
result.rotation = classic.right_axis;
|
||||
result.trigger = classic.trigger;
|
||||
|
@ -121,7 +121,7 @@ ControllerState XInputController::raw_state()
|
||||
|
||||
// Buttons
|
||||
for(int i=0;i<std::numeric_limits<WORD>::digits;i++)
|
||||
result.buttons[i] = state.Gamepad.wButtons & (1<<i);
|
||||
result.buttons.SetButtonState(i, (state.Gamepad.wButtons & (1 << i)) != 0);
|
||||
|
||||
if (state.Gamepad.sThumbLX > 0)
|
||||
result.axis.x = (float)state.Gamepad.sThumbLX / std::numeric_limits<sint16>::max();
|
||||
|
@ -279,13 +279,9 @@ bool EmulatedController::is_mapping_down(uint64 mapping) const
|
||||
const auto it = m_mappings.find(mapping);
|
||||
if (it != m_mappings.cend())
|
||||
{
|
||||
if (const auto controller = it->second.controller.lock()) {
|
||||
auto& buttons=controller->get_state().buttons;
|
||||
auto buttonState=buttons.find(it->second.button);
|
||||
return buttonState!=buttons.end() && buttonState->second;
|
||||
}
|
||||
if (const auto controller = it->second.controller.lock())
|
||||
return controller->get_state().buttons.GetButtonState(it->second.button);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -7,32 +7,33 @@
|
||||
class FSpinlock
|
||||
{
|
||||
public:
|
||||
void acquire()
|
||||
bool is_locked() const
|
||||
{
|
||||
while( true )
|
||||
return m_lockBool.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
// implement BasicLockable and Lockable
|
||||
void lock() const
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
if (!m_lockBool.exchange(true, std::memory_order_acquire))
|
||||
if (!m_lockBool.exchange(true, std::memory_order_acquire))
|
||||
break;
|
||||
while (m_lockBool.load(std::memory_order_relaxed)) _mm_pause();
|
||||
}
|
||||
}
|
||||
|
||||
bool tryAcquire()
|
||||
bool try_lock() const
|
||||
{
|
||||
return !m_lockBool.exchange(true, std::memory_order_acquire);
|
||||
}
|
||||
|
||||
void release()
|
||||
void unlock() const
|
||||
{
|
||||
m_lockBool.store(false, std::memory_order_release);
|
||||
}
|
||||
|
||||
bool isHolding() const
|
||||
{
|
||||
return m_lockBool.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
std::atomic<bool> m_lockBool = false;
|
||||
mutable std::atomic<bool> m_lockBool = false;
|
||||
};
|
Loading…
Reference in New Issue
Block a user