Allow providing a callback that's called after every CPU access in GMMU

Required for the planned implementation of GPU side memory trapping.
This commit is contained in:
Billy Laws 2022-05-31 16:04:27 +01:00
parent 46ee18c3e3
commit cb2b36e3ab
2 changed files with 45 additions and 23 deletions

View File

@ -116,19 +116,19 @@ namespace skyline {
/**
* @return A vector of all physical ranges inside of the given virtual range
*/
std::vector<span<u8>> TranslateRange(VaType virt, VaType size);
std::vector<span<u8>> TranslateRange(VaType virt, VaType size, std::function<void(span<u8>)> cpuAccessCallback = {});
void Read(u8 *destination, VaType virt, VaType size);
void Read(u8 *destination, VaType virt, VaType size, std::function<void(span<u8>)> cpuAccessCallback = {});
template<typename T>
void Read(span <T> destination, VaType virt) {
Read(reinterpret_cast<u8 *>(destination.data()), virt, destination.size_bytes());
void Read(span <T> destination, VaType virt, std::function<void(span<u8>)> cpuAccessCallback = {}) {
Read(reinterpret_cast<u8 *>(destination.data()), virt, destination.size_bytes(), cpuAccessCallback);
}
template<typename T>
T Read(VaType virt) {
T Read(VaType virt, std::function<void(span<u8>)> cpuAccessCallback = {}) {
T obj;
Read(reinterpret_cast<u8 *>(&obj), virt, sizeof(T));
Read(reinterpret_cast<u8 *>(&obj), virt, sizeof(T), cpuAccessCallback);
return obj;
}
@ -140,7 +140,7 @@ namespace skyline {
* @note The function will provide no feedback on if the end has been reached or if there was an early exit
*/
template<typename Function, typename Container>
span<u8> ReadTill(Container& destination, VaType virt, Function function) {
span<u8> ReadTill(Container& destination, VaType virt, Function function, std::function<void(span<u8>)> cpuAccessCallback = {}) {
//TRACE_EVENT("containers", "FlatMemoryManager::ReadTill");
std::scoped_lock lock(this->blockMutex);
@ -164,7 +164,11 @@ namespace skyline {
if (predecessor->extraInfo.sparseMapped) {
std::memset(pointer, 0, blockReadSize);
} else {
auto end{function(span<u8>(blockPhys, blockReadSize))};
span<u8> cpuBlock{blockPhys, blockReadSize};
if (cpuAccessCallback)
cpuAccessCallback(cpuBlock);
auto end{function(cpuBlock)};
std::memcpy(pointer, blockPhys, end ? *end : blockReadSize);
if (end)
return {destination.data(), (destination.size() - remainingSize) + *end};
@ -184,18 +188,18 @@ namespace skyline {
return {destination.data(), destination.size()};
}
void Write(VaType virt, u8 *source, VaType size);
void Write(VaType virt, u8 *source, VaType size, std::function<void(span<u8>)> cpuAccessCallback = {});
template<typename T>
void Write(VaType virt, span<T> source) {
void Write(VaType virt, span<T> source, std::function<void(span<u8>)> cpuAccessCallback = {}) {
Write(virt, reinterpret_cast<u8 *>(source.data()), source.size_bytes());
}
void Write(VaType virt, util::TrivialObject auto source) {
Write(virt, reinterpret_cast<u8 *>(&source), sizeof(source));
void Write(VaType virt, util::TrivialObject auto source, std::function<void(span<u8>)> cpuAccessCallback = {}) {
Write(virt, reinterpret_cast<u8 *>(&source), sizeof(source), cpuAccessCallback);
}
void Copy(VaType dst, VaType src, VaType size);
void Copy(VaType dst, VaType src, VaType size, std::function<void(span<u8>)> cpuAccessCallback = {});
};
/**

View File

@ -233,7 +233,7 @@ namespace skyline {
munmap(sparseMap, SparseMapSize);
}
MM_MEMBER(std::vector<span<u8>>)::TranslateRange(VaType virt, VaType size) {
MM_MEMBER(std::vector<span<u8>>)::TranslateRange(VaType virt, VaType size, std::function<void(span<u8>)> cpuAccessCallback) {
TRACE_EVENT("containers", "FlatMemoryManager::TranslateRange");
std::scoped_lock lock(this->blockMutex);
@ -258,7 +258,11 @@ namespace skyline {
blockPhys = sparseMap;
}
ranges.push_back(span(blockPhys, blockSize));
span cpuBlock{blockPhys, blockSize};
if (cpuAccessCallback)
cpuAccessCallback(cpuBlock);
ranges.push_back(cpuBlock);
size -= blockSize;
@ -272,7 +276,7 @@ namespace skyline {
return ranges;
}
MM_MEMBER(void)::Read(u8 *destination, VaType virt, VaType size) {
MM_MEMBER(void)::Read(u8 *destination, VaType virt, VaType size, std::function<void(span<u8>)> cpuAccessCallback) {
TRACE_EVENT("containers", "FlatMemoryManager::Read");
std::scoped_lock lock(this->blockMutex);
@ -291,10 +295,14 @@ namespace skyline {
if (predecessor->phys == nullptr) {
throw exception("Page fault at 0x{:X}", predecessor->virt);
} else {
if (predecessor->extraInfo.sparseMapped) // Sparse mappings read all zeroes
if (predecessor->extraInfo.sparseMapped) { // Sparse mappings read all zeroes
std::memset(destination, 0, blockReadSize);
else
} else {
if (cpuAccessCallback)
cpuAccessCallback(span{blockPhys, blockReadSize});
std::memcpy(destination, blockPhys, blockReadSize);
}
}
destination += blockReadSize;
@ -308,7 +316,7 @@ namespace skyline {
}
}
MM_MEMBER(void)::Write(VaType virt, u8 *source, VaType size) {
MM_MEMBER(void)::Write(VaType virt, u8 *source, VaType size, std::function<void(span<u8>)> cpuAccessCallback) {
TRACE_EVENT("containers", "FlatMemoryManager::Write");
std::scoped_lock lock(this->blockMutex);
@ -329,8 +337,12 @@ namespace skyline {
if (predecessor->phys == nullptr) {
throw exception("Page fault at 0x{:X}", predecessor->virt);
} else {
if (!predecessor->extraInfo.sparseMapped) // Sparse mappings ignore writes
if (!predecessor->extraInfo.sparseMapped) { // Sparse mappings ignore writes
if (cpuAccessCallback)
cpuAccessCallback(span{blockPhys, blockWriteSize});
std::memcpy(blockPhys, source, blockWriteSize);
}
}
source += blockWriteSize;
@ -344,7 +356,7 @@ namespace skyline {
}
}
MM_MEMBER(void)::Copy(VaType dst, VaType src, VaType size) {
MM_MEMBER(void)::Copy(VaType dst, VaType src, VaType size, std::function<void(span<u8>)> cpuAccessCallback) {
TRACE_EVENT("containers", "FlatMemoryManager::Copy");
std::scoped_lock lock(this->blockMutex);
@ -378,10 +390,16 @@ namespace skyline {
} else if (dstPredecessor->phys == nullptr) {
throw exception("Page fault at 0x{:X}", dstPredecessor->virt);
} else { [[likely]]
if (srcPredecessor->extraInfo.sparseMapped)
if (srcPredecessor->extraInfo.sparseMapped) {
std::memset(dstBlockPhys, 0, blockCopySize);
else [[likely]]
} else [[likely]] {
if (cpuAccessCallback) {
cpuAccessCallback(span{dstBlockPhys, blockCopySize});
cpuAccessCallback(span{srcBlockPhys, blockCopySize});
}
std::memcpy(dstBlockPhys, srcBlockPhys, blockCopySize);
}
}
dstBlockPhys += blockCopySize;