Add locking to GPU VMM and fix a few codestyle issues

As VMM can be accessed by nvdrv and the GPFIFO thread at the same
time locking is needed to prevent races.
This commit is contained in:
Billy Laws 2021-01-21 21:20:00 +00:00 committed by ◱ Mark
parent c1aec00ed1
commit c489da610e
2 changed files with 32 additions and 18 deletions

View File

@ -6,17 +6,17 @@
namespace skyline::gpu::vmm {
MemoryManager::MemoryManager(const DeviceState &state) : state(state) {
constexpr u64 GpuAddressSpaceSize{1ul << 40}; //!< The size of the GPU address space
constexpr u64 GpuAddressSpaceBase{0x100000}; //!< The base of the GPU address space - must be non-zero
constexpr u64 gpuAddressSpaceSize{1UL << 40}; //!< The size of the GPU address space
constexpr u64 gpuAddressSpaceBase{0x100000}; //!< The base of the GPU address space - must be non-zero
// Create the initial chunk that will be split to create new chunks
ChunkDescriptor baseChunk(GpuAddressSpaceBase, GpuAddressSpaceSize, 0, ChunkState::Unmapped);
ChunkDescriptor baseChunk(gpuAddressSpaceBase, gpuAddressSpaceSize, nullptr, ChunkState::Unmapped);
chunks.push_back(baseChunk);
}
std::optional<ChunkDescriptor> MemoryManager::FindChunk(ChunkState state, u64 size, u64 alignment) {
auto chunk{std::find_if(chunks.begin(), chunks.end(), [state, size, alignment](const ChunkDescriptor &chunk) -> bool {
return (alignment ? util::IsAligned(chunk.virtAddr, alignment) : true) && chunk.size > size && chunk.state == state;
std::optional<ChunkDescriptor> MemoryManager::FindChunk(ChunkState desiredState, u64 size, u64 alignment) {
auto chunk{std::find_if(chunks.begin(), chunks.end(), [desiredState, size, alignment](const ChunkDescriptor &chunk) -> bool {
return (alignment ? util::IsAligned(chunk.virtAddr, alignment) : true) && chunk.size > size && chunk.state == desiredState;
})};
if (chunk != chunks.end())
@ -41,7 +41,7 @@ namespace skyline::gpu::vmm {
}
if (extension)
chunks.insert(std::next(chunk), ChunkDescriptor(newChunk.virtAddr + newChunk.size, extension, (oldChunk.state == ChunkState::Mapped) ? (oldChunk.cpuPtr + newSize + newChunk.size) : 0, oldChunk.state));
chunks.insert(std::next(chunk), ChunkDescriptor(newChunk.virtAddr + newChunk.size, extension, (oldChunk.state == ChunkState::Mapped) ? (oldChunk.cpuPtr + newSize + newChunk.size) : nullptr, oldChunk.state));
return newChunk.virtAddr;
} else if (chunk->virtAddr + chunk->size > newChunk.virtAddr) {
@ -83,6 +83,8 @@ namespace skyline::gpu::vmm {
u64 MemoryManager::ReserveSpace(u64 size, u64 alignment) {
size = util::AlignUp(size, constant::GpuPageSize);
std::unique_lock lock(vmmMutex);
auto newChunk{FindChunk(ChunkState::Unmapped, size, alignment)};
if (!newChunk)
return 0;
@ -100,11 +102,14 @@ namespace skyline::gpu::vmm {
size = util::AlignUp(size, constant::GpuPageSize);
std::unique_lock lock(vmmMutex);
return InsertChunk(ChunkDescriptor(virtAddr, size, nullptr, ChunkState::Reserved));
}
u64 MemoryManager::MapAllocate(u8 *cpuPtr, u64 size) {
size = util::AlignUp(size, constant::GpuPageSize);
std::unique_lock lock(vmmMutex);
auto mappedChunk{FindChunk(ChunkState::Unmapped, size)};
if (!mappedChunk)
return 0;
@ -123,6 +128,7 @@ namespace skyline::gpu::vmm {
size = util::AlignUp(size, constant::GpuPageSize);
std::unique_lock lock(vmmMutex);
return InsertChunk(ChunkDescriptor(virtAddr, size, cpuPtr, ChunkState::Mapped));
}
@ -131,7 +137,8 @@ namespace skyline::gpu::vmm {
return false;
try {
InsertChunk(ChunkDescriptor(virtAddr, size, 0, ChunkState::Unmapped));
std::unique_lock lock(vmmMutex);
InsertChunk(ChunkDescriptor(virtAddr, size, nullptr, ChunkState::Unmapped));
} catch (const std::exception &e) {
return false;
}
@ -139,7 +146,9 @@ namespace skyline::gpu::vmm {
return true;
}
void MemoryManager::Read(u8 *destination, u64 virtAddr, u64 size) const {
void MemoryManager::Read(u8 *destination, u64 virtAddr, u64 size) {
std::shared_lock lock(vmmMutex);
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), virtAddr, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
return address < chunk.virtAddr;
})};
@ -169,7 +178,9 @@ namespace skyline::gpu::vmm {
}
}
void MemoryManager::Write(u8 *source, u64 virtAddr, u64 size) const {
void MemoryManager::Write(u8 *source, u64 virtAddr, u64 size) {
std::shared_lock lock(vmmMutex);
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), virtAddr, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
return address < chunk.virtAddr;
})};

View File

@ -40,18 +40,21 @@ namespace skyline {
private:
const DeviceState &state;
std::vector<ChunkDescriptor> chunks;
std::shared_mutex vmmMutex;
/**
* @brief Finds a chunk in the virtual address space that is larger than meets the given requirements
* @param state The state of the chunk to find
* @note vmmMutex MUST be locked when calling this
* @param desiredState The state of the chunk to find
* @param size The minimum size of the chunk to find
* @param alignment The minimum alignment of the chunk to find
* @return The first applicable chunk
*/
std::optional<ChunkDescriptor> FindChunk(ChunkState state, u64 size, u64 alignment = 0);
std::optional<ChunkDescriptor> FindChunk(ChunkState desiredState, u64 size, u64 alignment = 0);
/**
* @brief Inserts a chunk into the chunk list, resizing and splitting as necessary
* @note vmmMutex MUST be locked when calling this
* @param newChunk The chunk to insert
* @return The base virtual address of the inserted chunk
*/
@ -99,13 +102,13 @@ namespace skyline {
*/
bool Unmap(u64 virtAddr, u64 size);
void Read(u8 *destination, u64 virtAddr, u64 size) const;
void Read(u8 *destination, u64 virtAddr, u64 size);
/**
* @brief Reads in a span from a region of the virtual address space
*/
template<typename T>
void Read(span <T> destination, u64 virtAddr) const {
void Read(span <T> destination, u64 virtAddr) {
Read(reinterpret_cast<u8 *>(destination.data()), virtAddr, destination.size_bytes());
}
@ -114,19 +117,19 @@ namespace skyline {
* @tparam T The type of object to return
*/
template<typename T>
T Read(u64 virtAddr) const {
T Read(u64 virtAddr) {
T obj;
Read(reinterpret_cast<u8 *>(&obj), virtAddr, sizeof(T));
return obj;
}
void Write(u8 *source, u64 virtAddr, u64 size) const;
void Write(u8 *source, u64 virtAddr, u64 size);
/**
* @brief Writes out a span to a region of the virtual address space
*/
template<typename T>
void Write(span <T> source, u64 virtAddr) const {
void Write(span <T> source, u64 virtAddr) {
Write(reinterpret_cast<u8 *>(source.data()), virtAddr, source.size_bytes());
}
@ -134,7 +137,7 @@ namespace skyline {
* @brief Reads in an object from a region of the virtual address space
*/
template<typename T>
void Write(T source, u64 virtAddr) const {
void Write(T source, u64 virtAddr) {
Write(reinterpret_cast<u8 *>(&source), virtAddr, sizeof(T));
}
};