Implement nvmap handle pinning/unpinning

nvmap allows mapping handles into the SMMU address space through 'pins'. These are refcounted mappings that are lazily freed when required due to AS space running out. Nvidia implements this by using several MRU lists of handles in order to choose which ones to free and which ones to keep, however as we are unlikely to even run into the case of not having enough address space a naive queue approach works fine. This pin infrastructure is used by nvdrv's host1x channel implementation in order to handle mapping of both command and data buffers for submit.
This commit is contained in:
Billy Laws 2021-10-25 22:56:22 +01:00 committed by PixelyIon
parent a0c57256cc
commit debab7c9c7
4 changed files with 137 additions and 14 deletions

View File

@ -159,7 +159,7 @@ namespace skyline {
public:
VaType vaStart; //!< The base VA of the allocator, no allocations will be below this
FlatAllocator(VaType vaStart, VaType vaLimit);
FlatAllocator(VaType vaStart, VaType vaLimit = Base::VaMaximum);
/**
* @brief Allocates a region in the AS of the given size and returns its address

View File

@ -1,8 +1,15 @@
// SPDX-License-Identifier: MIT OR MPL-2.0
// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/)
#include <common/address_space.inc>
#include <soc.h>
#include "nvmap.h"
namespace skyline {
template class FlatAddressSpaceMap<u32, 0, bool, false, false, 32>;
template class FlatAllocator<u32, 0, 32>;
}
namespace skyline::service::nvdrv::core {
NvMap::Handle::Handle(u64 size, Id id) : size(size), alignedSize(size), origSize(size), id(id) {}
@ -51,7 +58,7 @@ namespace skyline::service::nvdrv::core {
return PosixResult::Success;
}
NvMap::NvMap(const DeviceState &state) : state(state) {}
NvMap::NvMap(const DeviceState &state) : state(state), smmuAllocator(PAGE_SIZE) {}
void NvMap::AddHandle(std::shared_ptr<Handle> handleDesc) {
std::scoped_lock lock(handlesLock);
@ -59,12 +66,26 @@ namespace skyline::service::nvdrv::core {
handles.emplace(handleDesc->id, std::move(handleDesc));
}
bool NvMap::TryRemoveHandle(const std::shared_ptr<Handle> &handleDesc) {
void NvMap::UnmapHandle(Handle &handleDesc) {
// Remove pending unmap queue entry if needed
if (handleDesc.unmapQueueEntry) {
unmapQueue.erase(*handleDesc.unmapQueueEntry);
handleDesc.unmapQueueEntry.reset();
}
// Free and unmap the handle from the SMMU
state.soc->smmu.Unmap(handleDesc.pinVirtAddress, static_cast<u32>(handleDesc.alignedSize));
smmuAllocator.Free(handleDesc.pinVirtAddress, static_cast<u32>(handleDesc.alignedSize));
handleDesc.pinVirtAddress = 0;
}
bool NvMap::TryRemoveHandle(const Handle &handleDesc) {
// No dupes left, we can remove from handle map
if (handleDesc->dupes == 0 && handleDesc->internalDupes == 0) {
if (handleDesc.dupes == 0 && handleDesc.internalDupes == 0) {
std::scoped_lock lock(handlesLock);
auto it{handles.find(handleDesc->id)};
auto it{handles.find(handleDesc.id)};
if (it != handles.end())
handles.erase(it);
@ -95,15 +116,72 @@ namespace skyline::service::nvdrv::core {
}
}
u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
auto handleDesc{GetHandle(handle)};
if (!handleDesc) [[unlikely]]
return 0;
std::scoped_lock lock(handleDesc->mutex);
if (!handleDesc->pins) {
// If we're in the unmap queue we can just remove ourselves and return since we're already mapped
{
// Lock now to prevent our queue entry from being removed for allocation in-between the following check and erase
std::scoped_lock queueLock(unmapQueueLock);
if (handleDesc->unmapQueueEntry) {
unmapQueue.erase(*handleDesc->unmapQueueEntry);
handleDesc->unmapQueueEntry.reset();
handleDesc->pins++;
return handleDesc->pinVirtAddress;
}
}
// If not then allocate some space and map it
u32 address{};
while (!(address = smmuAllocator.Allocate(static_cast<u32>(handleDesc->alignedSize)))) {
// Free handles until the allocation succeeds
std::scoped_lock queueLock(unmapQueueLock);
if (auto freeHandleDesc{unmapQueue.front()}) {
// Handles in the unmap queue are guaranteed not to be pinned so don't bother checking if they are before unmapping
std::scoped_lock freeLock(freeHandleDesc->mutex);
if (handleDesc->pinVirtAddress)
UnmapHandle(*freeHandleDesc);
} else {
throw exception("Ran out of SMMU address space!");
}
}
state.soc->smmu.Map(address, handleDesc->GetPointer(), static_cast<u32>(handleDesc->alignedSize));
handleDesc->pinVirtAddress = address;
}
handleDesc->pins++;
return handleDesc->pinVirtAddress;
}
void NvMap::UnpinHandle(Handle::Id handle) {
auto handleDesc{GetHandle(handle)};
if (!handleDesc)
return;
std::scoped_lock lock(handleDesc->mutex);
if (--handleDesc->pins < 0) {
state.logger->Warn("Pin count imbalance detected!");
} else if (!handleDesc->pins) {
std::scoped_lock queueLock(unmapQueueLock);
// Add to the unmap queue allowing this handle's memory to be freed if needed
unmapQueue.push_back(handleDesc);
handleDesc->unmapQueueEntry = std::prev(unmapQueue.end());
}
}
std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internalSession) {
std::weak_ptr<Handle> hWeak{GetHandle(handle)};
FreeInfo freeInfo;
// We use a weak ptr here so we can tell when the handle has been freed and report that back to guest
if (auto handleDesc = hWeak.lock()) {
if (!handleDesc) [[unlikely]]
return std::nullopt;
std::scoped_lock lock(handleDesc->mutex);
if (internalSession) {
@ -113,13 +191,19 @@ namespace skyline::service::nvdrv::core {
if (--handleDesc->dupes < 0) {
state.logger->Warn("User duplicate count imbalance detected!");
} else if (handleDesc->dupes == 0) {
// TODO: unpin
// Force unmap the handle
if (handleDesc->pinVirtAddress) {
std::scoped_lock queueLock(unmapQueueLock);
UnmapHandle(*handleDesc);
}
handleDesc->pins = 0;
}
}
// Try to remove the shared ptr to the handle from the map, if nothing else is using the handle
// then it will now be freed when `h` goes out of scope
if (TryRemoveHandle(handleDesc))
// then it will now be freed when `handleDesc` goes out of scope
if (TryRemoveHandle(*handleDesc))
state.logger->Debug("Removed nvmap handle: {}", handle);
else
state.logger->Debug("Tried to free nvmap handle: {} but didn't as it still has duplicates", handle);

View File

@ -3,7 +3,9 @@
#pragma once
#include <queue>
#include <common.h>
#include <common/address_space.h>
#include <services/common/result.h>
namespace skyline::service::nvdrv::core {
@ -29,6 +31,10 @@ namespace skyline::service::nvdrv::core {
using Id = u32;
Id id; //!< A globally unique identifier for this handle
i32 pins{};
u32 pinVirtAddress{};
std::optional<typeof(std::list<std::shared_ptr<Handle>>::iterator)> unmapQueueEntry{};
struct Flags {
bool mapUncached : 1; //!< If the handle should be mapped as uncached
bool _pad0_ : 1;
@ -42,7 +48,6 @@ namespace skyline::service::nvdrv::core {
u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to, this can also be in the nvdrv tmem
bool isSharedMemMapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC call
u8 kind{}; //!< Used for memory compression
bool allocated{}; //!< If the handle has been allocated with `Alloc`
@ -73,6 +78,10 @@ namespace skyline::service::nvdrv::core {
private:
const DeviceState &state;
FlatAllocator<u32, 0, 32> smmuAllocator;
std::list<std::shared_ptr<Handle>> unmapQueue;
std::mutex unmapQueueLock; //!< Protects access to `unmapQueue`
std::unordered_map<Handle::Id, std::shared_ptr<Handle>> handles; //!< Main owning map of handles
std::mutex handlesLock; //!< Protects access to `handles`
@ -81,12 +90,18 @@ namespace skyline::service::nvdrv::core {
void AddHandle(std::shared_ptr<Handle> handle);
/**
* @brief Unmaps and frees the SMMU memory region a handle is mapped to
* @note Both `unmapQueueLock` and `handleDesc.mutex` MUST be locked when calling this
*/
void UnmapHandle(Handle &handleDesc);
/**
* @brief Removes a handle from the map taking its dupes into account
* @note h->mutex MUST be locked when calling this
* @note handleDesc.mutex MUST be locked when calling this
* @return If the handle was removed from the map
*/
bool TryRemoveHandle(const std::shared_ptr<Handle> &h);
bool TryRemoveHandle(const Handle &handleDesc);
public:
/**
@ -107,6 +122,18 @@ namespace skyline::service::nvdrv::core {
std::shared_ptr<Handle> GetHandle(Handle::Id handle);
/**
* @brief Maps a handle into the SMMU address space
* @note This operation is refcounted, the number of calls to this must eventually match the number of calls to `UnpinHandle`
* @return The SMMU virtual address that the handle has been mapped to
*/
u32 PinHandle(Handle::Id handle);
/**
* @brief When this has been called an equal number of times to `PinHandle` for the supplied handle it will be added to a list of handles to be freed when necessary
*/
void UnpinHandle(Handle::Id handle);
/**
* @brief Tries to free a handle and remove a single dupe
* @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned describing the prior state of the handle

View File

@ -56,11 +56,23 @@ namespace skyline::service::nvdrv::device::nvhost {
PosixResult Host1XChannel::MapBuffer(u8 compressed, span<BufferHandle> handles) {
state.logger->Debug("compressed: {}", compressed);
for (auto &bufferHandle : handles) {
bufferHandle.address = core.nvMap.PinHandle(bufferHandle.handle);
state.logger->Debug("handle: {}, address: 0x{:X}", bufferHandle.handle, bufferHandle.address);
}
return PosixResult::Success;
}
PosixResult Host1XChannel::UnmapBuffer(u8 compressed, span<BufferHandle> handles) {
state.logger->Debug("compressed: {}", compressed);
for (auto &bufferHandle : handles) {
core.nvMap.UnpinHandle(bufferHandle.handle);
state.logger->Debug("handle: {}", bufferHandle.handle);
}
return PosixResult::Success;
}
#include <services/nvdrv/devices/deserialisation/macro_def.inc>