mirror of
https://github.com/skyline-emu/skyline.git
synced 2024-11-29 23:34:16 +01:00
Rework GPU VMM variable naming
This commit is contained in:
parent
4c9d453008
commit
c7e5202042
@ -16,7 +16,7 @@ namespace skyline::gpu::vmm {
|
|||||||
|
|
||||||
std::optional<ChunkDescriptor> MemoryManager::FindChunk(ChunkState state, u64 size, u64 alignment) {
|
std::optional<ChunkDescriptor> MemoryManager::FindChunk(ChunkState state, u64 size, u64 alignment) {
|
||||||
auto chunk{std::find_if(chunks.begin(), chunks.end(), [state, size, alignment](const ChunkDescriptor &chunk) -> bool {
|
auto chunk{std::find_if(chunks.begin(), chunks.end(), [state, size, alignment](const ChunkDescriptor &chunk) -> bool {
|
||||||
return (alignment ? util::IsAligned(chunk.address, alignment) : true) && chunk.size > size && chunk.state == state;
|
return (alignment ? util::IsAligned(chunk.virtAddr, alignment) : true) && chunk.size > size && chunk.state == state;
|
||||||
})};
|
})};
|
||||||
|
|
||||||
if (chunk != chunks.end())
|
if (chunk != chunks.end())
|
||||||
@ -30,7 +30,7 @@ namespace skyline::gpu::vmm {
|
|||||||
for (auto chunk{chunks.begin()}; chunk != chunkEnd; chunk++) {
|
for (auto chunk{chunks.begin()}; chunk != chunkEnd; chunk++) {
|
||||||
if (chunk->CanContain(newChunk)) {
|
if (chunk->CanContain(newChunk)) {
|
||||||
auto oldChunk{*chunk};
|
auto oldChunk{*chunk};
|
||||||
u64 newSize{newChunk.address - chunk->address};
|
u64 newSize{newChunk.virtAddr - chunk->virtAddr};
|
||||||
u64 extension{chunk->size - newSize - newChunk.size};
|
u64 extension{chunk->size - newSize - newChunk.size};
|
||||||
|
|
||||||
if (newSize == 0) {
|
if (newSize == 0) {
|
||||||
@ -41,16 +41,16 @@ namespace skyline::gpu::vmm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (extension)
|
if (extension)
|
||||||
chunks.insert(std::next(chunk), ChunkDescriptor(newChunk.address + newChunk.size, extension, (oldChunk.state == ChunkState::Mapped) ? (oldChunk.pointer + newSize + newChunk.size) : 0, oldChunk.state));
|
chunks.insert(std::next(chunk), ChunkDescriptor(newChunk.virtAddr + newChunk.size, extension, (oldChunk.state == ChunkState::Mapped) ? (oldChunk.cpuPtr + newSize + newChunk.size) : 0, oldChunk.state));
|
||||||
|
|
||||||
return newChunk.address;
|
return newChunk.virtAddr;
|
||||||
} else if (chunk->address + chunk->size > newChunk.address) {
|
} else if (chunk->virtAddr + chunk->size > newChunk.virtAddr) {
|
||||||
chunk->size = newChunk.address - chunk->address;
|
chunk->size = newChunk.virtAddr - chunk->virtAddr;
|
||||||
|
|
||||||
// Deletes all chunks that are within the chunk being inserted and split the final one
|
// Deletes all chunks that are within the chunk being inserted and split the final one
|
||||||
auto tailChunk{std::next(chunk)};
|
auto tailChunk{std::next(chunk)};
|
||||||
while (tailChunk != chunkEnd) {
|
while (tailChunk != chunkEnd) {
|
||||||
if (tailChunk->address + tailChunk->size >= newChunk.address + newChunk.size)
|
if (tailChunk->virtAddr + tailChunk->size >= newChunk.virtAddr + newChunk.size)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
tailChunk = chunks.erase(tailChunk);
|
tailChunk = chunks.erase(tailChunk);
|
||||||
@ -61,11 +61,11 @@ namespace skyline::gpu::vmm {
|
|||||||
if (tailChunk == chunkEnd)
|
if (tailChunk == chunkEnd)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
u64 chunkSliceOffset{newChunk.address + newChunk.size - tailChunk->address};
|
u64 chunkSliceOffset{newChunk.virtAddr + newChunk.size - tailChunk->virtAddr};
|
||||||
tailChunk->address += chunkSliceOffset;
|
tailChunk->virtAddr += chunkSliceOffset;
|
||||||
tailChunk->size -= chunkSliceOffset;
|
tailChunk->size -= chunkSliceOffset;
|
||||||
if (tailChunk->state == ChunkState::Mapped)
|
if (tailChunk->state == ChunkState::Mapped)
|
||||||
tailChunk->pointer += chunkSliceOffset;
|
tailChunk->cpuPtr += chunkSliceOffset;
|
||||||
|
|
||||||
// If the size of the head chunk is zero then we can directly replace it with our new one rather than inserting it
|
// If the size of the head chunk is zero then we can directly replace it with our new one rather than inserting it
|
||||||
auto headChunk{std::prev(tailChunk)};
|
auto headChunk{std::prev(tailChunk)};
|
||||||
@ -74,7 +74,7 @@ namespace skyline::gpu::vmm {
|
|||||||
else
|
else
|
||||||
chunks.insert(std::next(headChunk), newChunk);
|
chunks.insert(std::next(headChunk), newChunk);
|
||||||
|
|
||||||
return newChunk.address;
|
return newChunk.virtAddr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,44 +94,44 @@ namespace skyline::gpu::vmm {
|
|||||||
return InsertChunk(chunk);
|
return InsertChunk(chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 MemoryManager::ReserveFixed(u64 address, u64 size) {
|
u64 MemoryManager::ReserveFixed(u64 virtAddr, u64 size) {
|
||||||
if (!util::IsAligned(address, constant::GpuPageSize))
|
if (!util::IsAligned(virtAddr, constant::GpuPageSize))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
size = util::AlignUp(size, constant::GpuPageSize);
|
size = util::AlignUp(size, constant::GpuPageSize);
|
||||||
|
|
||||||
return InsertChunk(ChunkDescriptor(address, size, 0, ChunkState::Reserved));
|
return InsertChunk(ChunkDescriptor(virtAddr, size, nullptr, ChunkState::Reserved));
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 MemoryManager::MapAllocate(u8 *pointer, u64 size) {
|
u64 MemoryManager::MapAllocate(u8 *cpuPtr, u64 size) {
|
||||||
size = util::AlignUp(size, constant::GpuPageSize);
|
size = util::AlignUp(size, constant::GpuPageSize);
|
||||||
auto mappedChunk{FindChunk(ChunkState::Unmapped, size)};
|
auto mappedChunk{FindChunk(ChunkState::Unmapped, size)};
|
||||||
if (!mappedChunk)
|
if (!mappedChunk)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
auto chunk{*mappedChunk};
|
auto chunk{*mappedChunk};
|
||||||
chunk.pointer = pointer;
|
chunk.cpuPtr = cpuPtr;
|
||||||
chunk.size = size;
|
chunk.size = size;
|
||||||
chunk.state = ChunkState::Mapped;
|
chunk.state = ChunkState::Mapped;
|
||||||
|
|
||||||
return InsertChunk(chunk);
|
return InsertChunk(chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 MemoryManager::MapFixed(u64 address, u8 *pointer, u64 size) {
|
u64 MemoryManager::MapFixed(u64 virtAddr, u8 *cpuPtr, u64 size) {
|
||||||
if (!util::IsAligned(address, constant::GpuPageSize))
|
if (!util::IsAligned(virtAddr, constant::GpuPageSize))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
size = util::AlignUp(size, constant::GpuPageSize);
|
size = util::AlignUp(size, constant::GpuPageSize);
|
||||||
|
|
||||||
return InsertChunk(ChunkDescriptor(address, size, pointer, ChunkState::Mapped));
|
return InsertChunk(ChunkDescriptor(virtAddr, size, cpuPtr, ChunkState::Mapped));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MemoryManager::Unmap(u64 address, u64 size) {
|
bool MemoryManager::Unmap(u64 virtAddr, u64 size) {
|
||||||
if (!util::IsAligned(address, constant::GpuPageSize))
|
if (!util::IsAligned(virtAddr, constant::GpuPageSize))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
InsertChunk(ChunkDescriptor(address, size, 0, ChunkState::Unmapped));
|
InsertChunk(ChunkDescriptor(virtAddr, size, 0, ChunkState::Unmapped));
|
||||||
} catch (const std::exception &e) {
|
} catch (const std::exception &e) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -139,19 +139,19 @@ namespace skyline::gpu::vmm {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::Read(u8 *destination, u64 address, u64 size) const {
|
void MemoryManager::Read(u8 *destination, u64 virtAddr, u64 size) const {
|
||||||
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
|
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), virtAddr, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
|
||||||
return address < chunk.address;
|
return address < chunk.virtAddr;
|
||||||
})};
|
})};
|
||||||
|
|
||||||
if (chunk == chunks.end() || chunk->state != ChunkState::Mapped)
|
if (chunk == chunks.end() || chunk->state != ChunkState::Mapped)
|
||||||
throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size);
|
throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size);
|
||||||
|
|
||||||
chunk--;
|
chunk--;
|
||||||
|
|
||||||
u64 initialSize{size};
|
u64 initialSize{size};
|
||||||
u64 chunkOffset{address - chunk->address};
|
u64 chunkOffset{virtAddr - chunk->virtAddr};
|
||||||
u8 *source{chunk->pointer + chunkOffset};
|
u8 *source{chunk->cpuPtr + chunkOffset};
|
||||||
u64 sourceSize{std::min(chunk->size - chunkOffset, size)};
|
u64 sourceSize{std::min(chunk->size - chunkOffset, size)};
|
||||||
|
|
||||||
// A continuous region in the GPU address space may be made up of several discontinuous regions in physical memory so we have to iterate over all chunks
|
// A continuous region in the GPU address space may be made up of several discontinuous regions in physical memory so we have to iterate over all chunks
|
||||||
@ -161,27 +161,27 @@ namespace skyline::gpu::vmm {
|
|||||||
size -= sourceSize;
|
size -= sourceSize;
|
||||||
if (size) {
|
if (size) {
|
||||||
if (++chunk == chunks.end() || chunk->state != ChunkState::Mapped)
|
if (++chunk == chunks.end() || chunk->state != ChunkState::Mapped)
|
||||||
throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size);
|
throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size);
|
||||||
|
|
||||||
source = chunk->pointer;
|
source = chunk->cpuPtr;
|
||||||
sourceSize = std::min(chunk->size, size);
|
sourceSize = std::min(chunk->size, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::Write(u8 *source, u64 address, u64 size) const {
|
void MemoryManager::Write(u8 *source, u64 virtAddr, u64 size) const {
|
||||||
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
|
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), virtAddr, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
|
||||||
return address < chunk.address;
|
return address < chunk.virtAddr;
|
||||||
})};
|
})};
|
||||||
|
|
||||||
if (chunk == chunks.end() || chunk->state != ChunkState::Mapped)
|
if (chunk == chunks.end() || chunk->state != ChunkState::Mapped)
|
||||||
throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size);
|
throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size);
|
||||||
|
|
||||||
chunk--;
|
chunk--;
|
||||||
|
|
||||||
u64 initialSize{size};
|
u64 initialSize{size};
|
||||||
u64 chunkOffset{address - chunk->address};
|
u64 chunkOffset{virtAddr - chunk->virtAddr};
|
||||||
u8 *destination{chunk->pointer + chunkOffset};
|
u8 *destination{chunk->cpuPtr + chunkOffset};
|
||||||
u64 destinationSize{std::min(chunk->size - chunkOffset, size)};
|
u64 destinationSize{std::min(chunk->size - chunkOffset, size)};
|
||||||
|
|
||||||
// A continuous region in the GPU address space may be made up of several discontinuous regions in physical memory so we have to iterate over all chunks
|
// A continuous region in the GPU address space may be made up of several discontinuous regions in physical memory so we have to iterate over all chunks
|
||||||
@ -191,9 +191,9 @@ namespace skyline::gpu::vmm {
|
|||||||
size -= destinationSize;
|
size -= destinationSize;
|
||||||
if (size) {
|
if (size) {
|
||||||
if (++chunk == chunks.end() || chunk->state != ChunkState::Mapped)
|
if (++chunk == chunks.end() || chunk->state != ChunkState::Mapped)
|
||||||
throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size);
|
throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size);
|
||||||
|
|
||||||
destination = chunk->pointer;
|
destination = chunk->cpuPtr;
|
||||||
destinationSize = std::min(chunk->size, size);
|
destinationSize = std::min(chunk->size, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -11,30 +11,30 @@ namespace skyline {
|
|||||||
}
|
}
|
||||||
|
|
||||||
namespace gpu::vmm {
|
namespace gpu::vmm {
|
||||||
enum ChunkState {
|
enum class ChunkState {
|
||||||
Unmapped, //!< The chunk is unmapped
|
Unmapped, //!< The chunk is unmapped
|
||||||
Reserved, //!< The chunk is reserved
|
Reserved, //!< The chunk is reserved
|
||||||
Mapped //!< The chunk is mapped and a CPU side address is present
|
Mapped //!< The chunk is mapped and a CPU side address is present
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ChunkDescriptor {
|
struct ChunkDescriptor {
|
||||||
u64 address; //!< The address of the chunk in the GPU address space
|
u64 virtAddr; //!< The address of the chunk in the virtual address space
|
||||||
u64 size; //!< The size of the chunk in bytes
|
u64 size; //!< The size of the chunk in bytes
|
||||||
u8 *pointer; //!< A pointer to the chunk in the CPU address space (if mapped)
|
u8 *cpuPtr; //!< A pointer to the chunk in the application's address space (if mapped)
|
||||||
ChunkState state;
|
ChunkState state;
|
||||||
|
|
||||||
ChunkDescriptor(u64 address, u64 size, u8 *pointer, ChunkState state) : address(address), size(size), pointer(pointer), state(state) {}
|
ChunkDescriptor(u64 virtAddr, u64 size, u8 *cpuPtr, ChunkState state) : virtAddr(virtAddr), size(size), cpuPtr(cpuPtr), state(state) {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return If the given chunk can be contained wholly within this chunk
|
* @return If the given chunk can be contained wholly within this chunk
|
||||||
*/
|
*/
|
||||||
inline bool CanContain(const ChunkDescriptor &chunk) {
|
inline bool CanContain(const ChunkDescriptor &chunk) {
|
||||||
return (chunk.address >= this->address) && ((this->size + this->address) >= (chunk.size + chunk.address));
|
return (chunk.virtAddr >= this->virtAddr) && ((this->size + this->virtAddr) >= (chunk.size + chunk.virtAddr));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief The MemoryManager class handles the mapping of the GPU address space
|
* @brief The MemoryManager class handles mapping between a virtual address space and an application's address space
|
||||||
*/
|
*/
|
||||||
class MemoryManager {
|
class MemoryManager {
|
||||||
private:
|
private:
|
||||||
@ -42,18 +42,18 @@ namespace skyline {
|
|||||||
std::vector<ChunkDescriptor> chunks;
|
std::vector<ChunkDescriptor> chunks;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Finds a chunk of the specified type in the GPU address space that is larger than the given size
|
* @brief Finds a chunk in the virtual address space that is larger than meets the given requirements
|
||||||
* @param state The state of the chunk to find
|
* @param state The state of the chunk to find
|
||||||
* @param size The minimum size of the chunk to find
|
* @param size The minimum size of the chunk to find
|
||||||
* @param alignment The alignment of the chunk to find
|
* @param alignment The minimum alignment of the chunk to find
|
||||||
* @return The first unmapped chunk in the GPU address space that fulfils the requested conditions
|
* @return The first applicable chunk
|
||||||
*/
|
*/
|
||||||
std::optional<ChunkDescriptor> FindChunk(ChunkState state, u64 size, u64 alignment = 0);
|
std::optional<ChunkDescriptor> FindChunk(ChunkState state, u64 size, u64 alignment = 0);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Inserts a chunk into the chunk list, resizing and splitting as necessary
|
* @brief Inserts a chunk into the chunk list, resizing and splitting as necessary
|
||||||
* @param newChunk The chunk to insert
|
* @param newChunk The chunk to insert
|
||||||
* @return The base virtual GPU address of the inserted chunk
|
* @return The base virtual address of the inserted chunk
|
||||||
*/
|
*/
|
||||||
u64 InsertChunk(const ChunkDescriptor &newChunk);
|
u64 InsertChunk(const ChunkDescriptor &newChunk);
|
||||||
|
|
||||||
@ -61,81 +61,82 @@ namespace skyline {
|
|||||||
MemoryManager(const DeviceState &state);
|
MemoryManager(const DeviceState &state);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Reserves a region of the GPU address space so it will not be chosen automatically when mapping
|
* @brief Reserves a region of the virtual address space so it will not be chosen automatically when mapping
|
||||||
* @param size The size of the region to reserve
|
* @param size The size of the region to reserve
|
||||||
* @param alignment The alignment of the region to reserve
|
* @param alignment The alignment of the region to reserve
|
||||||
* @return The virtual GPU base address of the region base
|
* @return The base virtual address of the reserved region
|
||||||
*/
|
*/
|
||||||
u64 ReserveSpace(u64 size, u64 alignment);
|
u64 ReserveSpace(u64 size, u64 alignment);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Reserves a fixed region of the GPU address space so it will not be chosen automatically when mapping
|
* @brief Reserves a fixed region of the virtual address space so it will not be chosen automatically when mapping
|
||||||
* @param address The virtual base address of the region to allocate
|
* @param virtAddr The virtual base address of the region to allocate
|
||||||
* @param size The size of the region to allocate
|
* @param size The size of the region to allocate
|
||||||
* @return The virtual address of the region base
|
* @return The base virtual address of the reserved region
|
||||||
*/
|
*/
|
||||||
u64 ReserveFixed(u64 address, u64 size);
|
u64 ReserveFixed(u64 virtAddr, u64 size);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Maps a physical CPU memory region to an automatically chosen virtual memory region
|
* @brief Maps a CPU memory region into an automatically chosen region of the virtual address space
|
||||||
* @param pointer A pointer to the region to be mapped into the GPU's address space
|
* @param cpuPtr A pointer to the region to be mapped into the virtual address space
|
||||||
* @param size The size of the region to map
|
* @param size The size of the region to map
|
||||||
* @return The virtual address of the region base
|
* @return The base virtual address of the mapped region
|
||||||
*/
|
*/
|
||||||
u64 MapAllocate(u8 *pointer, u64 size);
|
u64 MapAllocate(u8 *cpuPtr, u64 size);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Maps a physical CPU memory region to a fixed virtual memory region
|
* @brief Maps a CPU memory region to a fixed region in the virtual address space
|
||||||
* @param address The target virtual address of the region
|
* @param virtAddr The target virtual address of the region
|
||||||
* @param pointer A pointer to the region to be mapped into the GPU's address space
|
* @param cpuPtr A pointer to the region to be mapped into the virtual address space
|
||||||
* @param size The size of the region to map
|
* @param size The size of the region to map
|
||||||
* @return The virtual address of the region base
|
* @return The base virtual address of the mapped region
|
||||||
*/
|
*/
|
||||||
u64 MapFixed(u64 address, u8 *pointer, u64 size);
|
u64 MapFixed(u64 virtAddr, u8 *cpuPtr, u64 size);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Unmaps all chunks in the given region from the GPU address space
|
* @brief Unmaps all chunks in the given region from the virtual address space
|
||||||
* @return Whether the operation succeeded
|
* @return Whether the operation succeeded
|
||||||
*/
|
*/
|
||||||
bool Unmap(u64 address, u64 size);
|
bool Unmap(u64 virtAddr, u64 size);
|
||||||
|
|
||||||
void Read(u8 *destination, u64 address, u64 size) const;
|
|
||||||
|
void Read(u8 *destination, u64 virtAddr, u64 size) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Reads in a span from a region of the GPU virtual address space
|
* @brief Reads in a span from a region of the virtual address space
|
||||||
*/
|
*/
|
||||||
template<typename T>
|
template<typename T>
|
||||||
void Read(span<T> destination, u64 address) const {
|
void Read(span<T> destination, u64 virtAddr) const {
|
||||||
Read(reinterpret_cast<u8 *>(destination.data()), address, destination.size_bytes());
|
Read(reinterpret_cast<u8 *>(destination.data()), virtAddr, destination.size_bytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Reads in an object from a region of the GPU virtual address space
|
* @brief Reads in an object from a region of the virtual address space
|
||||||
* @tparam T The type of object to return
|
* @tparam T The type of object to return
|
||||||
*/
|
*/
|
||||||
template<typename T>
|
template<typename T>
|
||||||
T Read(u64 address) const {
|
T Read(u64 virtAddr) const {
|
||||||
T obj;
|
T obj;
|
||||||
Read(reinterpret_cast<u8 *>(&obj), address, sizeof(T));
|
Read(reinterpret_cast<u8 *>(&obj), virtAddr, sizeof(T));
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Write(u8 *source, u64 address, u64 size) const;
|
void Write(u8 *source, u64 virtAddr, u64 size) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Writes out a span to a region of the GPU virtual address space
|
* @brief Writes out a span to a region of the virtual address space
|
||||||
*/
|
*/
|
||||||
template<typename T>
|
template<typename T>
|
||||||
void Write(span<T> source, u64 address) const {
|
void Write(span<T> source, u64 virtAddr) const {
|
||||||
Write(reinterpret_cast<u8 *>(source.data()), address, source.size_bytes());
|
Write(reinterpret_cast<u8 *>(source.data()), virtAddr, source.size_bytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Reads in an object from a region of the GPU virtual address space
|
* @brief Reads in an object from a region of the virtual address space
|
||||||
*/
|
*/
|
||||||
template<typename T>
|
template<typename T>
|
||||||
void Write(T source, u64 address) const {
|
void Write(T source, u64 virtAddr) const {
|
||||||
Write(reinterpret_cast<u8 *>(&source), address, sizeof(T));
|
Write(reinterpret_cast<u8 *>(&source), virtAddr, sizeof(T));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -148,7 +148,7 @@ namespace skyline::service::hosbinder {
|
|||||||
throw exception("Unknown pixel format used for FB");
|
throw exception("Unknown pixel format used for FB");
|
||||||
}
|
}
|
||||||
|
|
||||||
auto texture{std::make_shared<gpu::GuestTexture>(state, nvBuffer->pointer + gbpBuffer.offset, gpu::texture::Dimensions(gbpBuffer.width, gbpBuffer.height), format, gpu::texture::TileMode::Block, gpu::texture::TileConfig{.surfaceWidth = static_cast<u16>(gbpBuffer.stride), .blockHeight = static_cast<u8>(1U << gbpBuffer.blockHeightLog2), .blockDepth = 1})};
|
auto texture{std::make_shared<gpu::GuestTexture>(state, nvBuffer->ptr + gbpBuffer.offset, gpu::texture::Dimensions(gbpBuffer.width, gbpBuffer.height), format, gpu::texture::TileMode::Block, gpu::texture::TileConfig{.surfaceWidth = static_cast<u16>(gbpBuffer.stride), .blockHeight = static_cast<u8>(1U << gbpBuffer.blockHeightLog2), .blockDepth = 1})};
|
||||||
|
|
||||||
queue[data.slot] = std::make_shared<Buffer>(gbpBuffer, texture->InitializeTexture());
|
queue[data.slot] = std::make_shared<Buffer>(gbpBuffer, texture->InitializeTexture());
|
||||||
state.gpu->presentation.bufferEvent->Signal();
|
state.gpu->presentation.bufferEvent->Signal();
|
||||||
|
@ -98,7 +98,7 @@ namespace skyline::service::nvdrv::device {
|
|||||||
}
|
}
|
||||||
|
|
||||||
u64 gpuAddress{data.offset + data.bufferOffset};
|
u64 gpuAddress{data.offset + data.bufferOffset};
|
||||||
u8 *cpuPtr{region->second.cpuPtr + data.bufferOffset};
|
u8 *cpuPtr{region->second.ptr + data.bufferOffset};
|
||||||
|
|
||||||
if (state.gpu->memoryManager.MapFixed(gpuAddress, cpuPtr, data.mappingSize)) {
|
if (state.gpu->memoryManager.MapFixed(gpuAddress, cpuPtr, data.mappingSize)) {
|
||||||
state.logger->Warn("Failed to remap GPU address space region: 0x{:X}", gpuAddress);
|
state.logger->Warn("Failed to remap GPU address space region: 0x{:X}", gpuAddress);
|
||||||
@ -108,20 +108,20 @@ namespace skyline::service::nvdrv::device {
|
|||||||
return NvStatus::Success;
|
return NvStatus::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
u8 *mapPointer{data.bufferOffset + mapping->pointer};
|
u8 *cpuPtr{data.bufferOffset + mapping->ptr};
|
||||||
u64 mapSize{data.mappingSize ? data.mappingSize : mapping->size};
|
u64 size{data.mappingSize ? data.mappingSize : mapping->size};
|
||||||
|
|
||||||
if (data.flags.fixed)
|
if (data.flags.fixed)
|
||||||
data.offset = state.gpu->memoryManager.MapFixed(data.offset, mapPointer, mapSize);
|
data.offset = state.gpu->memoryManager.MapFixed(data.offset, cpuPtr, size);
|
||||||
else
|
else
|
||||||
data.offset = state.gpu->memoryManager.MapAllocate(mapPointer, mapSize);
|
data.offset = state.gpu->memoryManager.MapAllocate(cpuPtr, size);
|
||||||
|
|
||||||
if (data.offset == 0) {
|
if (data.offset == 0) {
|
||||||
state.logger->Warn("Failed to map GPU address space region!");
|
state.logger->Warn("Failed to map GPU address space region!");
|
||||||
return NvStatus::BadParameter;
|
return NvStatus::BadParameter;
|
||||||
}
|
}
|
||||||
|
|
||||||
regionMap[data.offset] = {mapPointer, mapSize, data.flags.fixed};
|
regionMap[data.offset] = {cpuPtr, size, data.flags.fixed};
|
||||||
|
|
||||||
return NvStatus::Success;
|
return NvStatus::Success;
|
||||||
} catch (const std::out_of_range &) {
|
} catch (const std::out_of_range &) {
|
||||||
@ -176,17 +176,17 @@ namespace skyline::service::nvdrv::device {
|
|||||||
constexpr u32 MinAlignmentShift{0x10}; // This shift is applied to all addresses passed to Remap
|
constexpr u32 MinAlignmentShift{0x10}; // This shift is applied to all addresses passed to Remap
|
||||||
|
|
||||||
auto entries{buffer.cast<Entry>()};
|
auto entries{buffer.cast<Entry>()};
|
||||||
for (auto entry : entries) {
|
for (const auto &entry : entries) {
|
||||||
try {
|
try {
|
||||||
auto driver{nvdrv::driver.lock()};
|
auto driver{nvdrv::driver.lock()};
|
||||||
auto nvmap{driver->nvMap.lock()};
|
auto nvmap{driver->nvMap.lock()};
|
||||||
auto mapping{nvmap->GetObject(entry.nvmapHandle)};
|
auto mapping{nvmap->GetObject(entry.nvmapHandle)};
|
||||||
|
|
||||||
u64 mapAddress{static_cast<u64>(entry.gpuOffset) << MinAlignmentShift};
|
u64 virtAddr{static_cast<u64>(entry.gpuOffset) << MinAlignmentShift};
|
||||||
u8 *mapPointer{mapping->pointer + (static_cast<u64>(entry.mapOffset) << MinAlignmentShift)};
|
u8 *cpuPtr{mapping->ptr + (static_cast<u64>(entry.mapOffset) << MinAlignmentShift)};
|
||||||
u64 mapSize{static_cast<u64>(entry.pages) << MinAlignmentShift};
|
u64 size{static_cast<u64>(entry.pages) << MinAlignmentShift};
|
||||||
|
|
||||||
state.gpu->memoryManager.MapFixed(mapAddress, mapPointer, mapSize);
|
state.gpu->memoryManager.MapFixed(virtAddr, cpuPtr, size);
|
||||||
} catch (const std::out_of_range &) {
|
} catch (const std::out_of_range &) {
|
||||||
state.logger->Warn("Invalid NvMap handle: 0x{:X}", entry.nvmapHandle);
|
state.logger->Warn("Invalid NvMap handle: 0x{:X}", entry.nvmapHandle);
|
||||||
return NvStatus::BadParameter;
|
return NvStatus::BadParameter;
|
||||||
|
@ -13,7 +13,7 @@ namespace skyline::service::nvdrv::device {
|
|||||||
class NvHostAsGpu : public NvDevice {
|
class NvHostAsGpu : public NvDevice {
|
||||||
private:
|
private:
|
||||||
struct AddressSpaceRegion {
|
struct AddressSpaceRegion {
|
||||||
u8 *cpuPtr;
|
u8 *ptr;
|
||||||
u64 size;
|
u64 size;
|
||||||
bool fixed;
|
bool fixed;
|
||||||
};
|
};
|
||||||
|
@ -49,7 +49,7 @@ namespace skyline::service::nvdrv::device {
|
|||||||
u32 align; // In
|
u32 align; // In
|
||||||
u8 kind; // In
|
u8 kind; // In
|
||||||
u8 _pad0_[7];
|
u8 _pad0_[7];
|
||||||
u8 *pointer; // InOut
|
u8 *ptr; // InOut
|
||||||
} &data = buffer.as<Data>();
|
} &data = buffer.as<Data>();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -58,10 +58,10 @@ namespace skyline::service::nvdrv::device {
|
|||||||
object->flags = data.flags;
|
object->flags = data.flags;
|
||||||
object->align = data.align;
|
object->align = data.align;
|
||||||
object->kind = data.kind;
|
object->kind = data.kind;
|
||||||
object->pointer = data.pointer;
|
object->ptr = data.ptr;
|
||||||
object->status = NvMapObject::Status::Allocated;
|
object->status = NvMapObject::Status::Allocated;
|
||||||
|
|
||||||
state.logger->Debug("Handle: 0x{:X}, HeapMask: 0x{:X}, Flags: {}, Align: 0x{:X}, Kind: {}, Pointer: 0x{:X}", data.handle, data.heapMask, data.flags, data.align, data.kind, data.pointer);
|
state.logger->Debug("Handle: 0x{:X}, HeapMask: 0x{:X}, Flags: {}, Align: 0x{:X}, Kind: {}, Pointer: 0x{:X}", data.handle, data.heapMask, data.flags, data.align, data.kind, data.ptr);
|
||||||
return NvStatus::Success;
|
return NvStatus::Success;
|
||||||
} catch (const std::out_of_range &) {
|
} catch (const std::out_of_range &) {
|
||||||
state.logger->Warn("Invalid NvMap handle: 0x{:X}", data.handle);
|
state.logger->Warn("Invalid NvMap handle: 0x{:X}", data.handle);
|
||||||
@ -73,7 +73,7 @@ namespace skyline::service::nvdrv::device {
|
|||||||
struct Data {
|
struct Data {
|
||||||
u32 handle; // In
|
u32 handle; // In
|
||||||
u32 _pad0_;
|
u32 _pad0_;
|
||||||
u8 *pointer; // Out
|
u8 *ptr; // Out
|
||||||
u32 size; // Out
|
u32 size; // Out
|
||||||
u32 flags; // Out
|
u32 flags; // Out
|
||||||
} &data = buffer.as<Data>();
|
} &data = buffer.as<Data>();
|
||||||
@ -82,17 +82,17 @@ namespace skyline::service::nvdrv::device {
|
|||||||
try {
|
try {
|
||||||
auto &object{maps.at(data.handle - 1)};
|
auto &object{maps.at(data.handle - 1)};
|
||||||
if (object.use_count() > 1) {
|
if (object.use_count() > 1) {
|
||||||
data.pointer = object->pointer;
|
data.ptr = object->ptr;
|
||||||
data.flags = 0x0;
|
data.flags = 0x0;
|
||||||
} else {
|
} else {
|
||||||
data.pointer = nullptr;
|
data.ptr = nullptr;
|
||||||
data.flags = 0x1; // Not free yet
|
data.flags = 0x1; // Not free yet
|
||||||
}
|
}
|
||||||
|
|
||||||
data.size = object->size;
|
data.size = object->size;
|
||||||
object = nullptr;
|
object = nullptr;
|
||||||
|
|
||||||
state.logger->Debug("Handle: 0x{:X} -> Pointer: 0x{:X}, Size: 0x{:X}, Flags: 0x{:X}", data.handle, data.pointer, data.size, data.flags);
|
state.logger->Debug("Handle: 0x{:X} -> Pointer: 0x{:X}, Size: 0x{:X}, Flags: 0x{:X}", data.handle, data.ptr, data.size, data.flags);
|
||||||
return NvStatus::Success;
|
return NvStatus::Success;
|
||||||
} catch (const std::out_of_range &) {
|
} catch (const std::out_of_range &) {
|
||||||
state.logger->Warn("Invalid NvMap handle: 0x{:X}", data.handle);
|
state.logger->Warn("Invalid NvMap handle: 0x{:X}", data.handle);
|
||||||
|
@ -18,7 +18,7 @@ namespace skyline::service::nvdrv::device {
|
|||||||
struct NvMapObject {
|
struct NvMapObject {
|
||||||
u32 id;
|
u32 id;
|
||||||
u32 size;
|
u32 size;
|
||||||
u8 *pointer{};
|
u8 *ptr{};
|
||||||
u32 flags{}; //!< The flag of the memory (0 = Read Only, 1 = Read-Write)
|
u32 flags{}; //!< The flag of the memory (0 = Read Only, 1 = Read-Write)
|
||||||
u32 align{};
|
u32 align{};
|
||||||
u32 heapMask{}; //!< This is set during Alloc and returned during Param
|
u32 heapMask{}; //!< This is set during Alloc and returned during Param
|
||||||
|
Loading…
Reference in New Issue
Block a user