mirror of
https://github.com/skyline-emu/skyline.git
synced 2024-11-16 04:19:15 +01:00
Restructure the memory manager
Changes and improves the accuracy of various features of the memory manager. Reduces stuttering in certain games.
This commit is contained in:
parent
35fb874a42
commit
dac180d7c1
@ -175,7 +175,6 @@ add_library(skyline SHARED
|
||||
${source_DIR}/skyline/kernel/types/KProcess.cpp
|
||||
${source_DIR}/skyline/kernel/types/KThread.cpp
|
||||
${source_DIR}/skyline/kernel/types/KSharedMemory.cpp
|
||||
${source_DIR}/skyline/kernel/types/KPrivateMemory.cpp
|
||||
${source_DIR}/skyline/kernel/types/KSyncObject.cpp
|
||||
${source_DIR}/skyline/audio.cpp
|
||||
${source_DIR}/skyline/gpu.cpp
|
||||
|
@ -7,13 +7,165 @@
|
||||
#include "types/KProcess.h"
|
||||
|
||||
namespace skyline::kernel {
|
||||
MemoryManager::MemoryManager(const DeviceState &state) : state(state) {}
|
||||
MemoryManager::MemoryManager(const DeviceState &state) noexcept : state(state), setHeapSize(), chunks() {}
|
||||
|
||||
MemoryManager::~MemoryManager() {
|
||||
MemoryManager::~MemoryManager() noexcept {
|
||||
if (base.valid() && !base.empty())
|
||||
munmap(reinterpret_cast<void *>(base.data()), base.size());
|
||||
}
|
||||
|
||||
std::map<u8 *,ChunkDescriptor>::iterator MemoryManager::upper_bound(u8 *address) {
|
||||
std::map<u8 *,ChunkDescriptor>::iterator result{chunks.begin()};
|
||||
|
||||
if (chunks.size() != 1) [[likely]]
|
||||
while (result->first <= address) {
|
||||
++result;
|
||||
if (result->first + result->second.size == addressSpace.end().base())
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void MemoryManager::MapInternal(std::pair<u8 *, ChunkDescriptor> *newDesc) {
|
||||
// The chunk that contains / precedes the new chunk base address
|
||||
auto firstChunkBase{upper_bound(newDesc->first)};
|
||||
while (newDesc->first <= firstChunkBase->first)
|
||||
--firstChunkBase;
|
||||
|
||||
// The chunk that contains / follows the end address of the new chunk
|
||||
auto lastChunkBase{upper_bound(newDesc->first + newDesc->second.size)};
|
||||
while ((newDesc->first + newDesc->second.size) < lastChunkBase->first)
|
||||
--lastChunkBase;
|
||||
|
||||
ChunkDescriptor firstChunk{firstChunkBase->second};
|
||||
ChunkDescriptor lastChunk{lastChunkBase->second};
|
||||
|
||||
bool needsReprotection{false};
|
||||
bool isUnmapping{newDesc->second.state == memory::states::Unmapped};
|
||||
|
||||
// We cut a hole in a single chunk
|
||||
if (firstChunkBase->first == lastChunkBase->first) {
|
||||
if (firstChunk.IsCompatible(newDesc->second)) [[unlikely]]
|
||||
// No editing necessary
|
||||
return;
|
||||
|
||||
if ((firstChunk.state == memory::states::Unmapped) != isUnmapping)
|
||||
needsReprotection = true;
|
||||
|
||||
// We edit the chunk's first half
|
||||
firstChunk.size = static_cast<size_t>(newDesc->first - firstChunkBase->first);
|
||||
chunks[firstChunkBase->first] = firstChunk;
|
||||
|
||||
// We create the chunk's second half
|
||||
lastChunk.size = static_cast<size_t>((lastChunkBase->first + lastChunk.size) - (newDesc->first + newDesc->second.size));
|
||||
chunks[newDesc->first + newDesc->second.size] = lastChunk;
|
||||
|
||||
// Insert new chunk in between
|
||||
chunks[newDesc->first] = newDesc->second;
|
||||
} else {
|
||||
// If there are descriptors between first and last chunk, delete them
|
||||
if ((firstChunkBase->first + firstChunk.size) != lastChunkBase->first) {
|
||||
auto tempChunkBase{firstChunkBase};
|
||||
|
||||
++tempChunkBase;
|
||||
while (tempChunkBase->first != lastChunkBase->first) {
|
||||
auto tmp{tempChunkBase++};
|
||||
if ((tmp->second.state == memory::states::Unmapped) != isUnmapping)
|
||||
needsReprotection = true;
|
||||
chunks.erase(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
bool shouldInsert{true};
|
||||
|
||||
if (firstChunk.IsCompatible(newDesc->second)) {
|
||||
shouldInsert = false;
|
||||
|
||||
firstChunk.size = static_cast<size_t>((newDesc->first + newDesc->second.size) - firstChunkBase->first);
|
||||
chunks[firstChunkBase->first] = firstChunk;
|
||||
} else if ((firstChunkBase->first + firstChunk.size) != newDesc->first) {
|
||||
firstChunk.size = static_cast<size_t>(newDesc->first - firstChunkBase->first);
|
||||
|
||||
chunks[firstChunkBase->first] = firstChunk;
|
||||
|
||||
if ((firstChunk.state == memory::states::Unmapped) != isUnmapping)
|
||||
needsReprotection = true;
|
||||
}
|
||||
|
||||
if (lastChunk.IsCompatible(newDesc->second)) {
|
||||
u8 *oldBase{lastChunkBase->first};
|
||||
chunks.erase(lastChunkBase);
|
||||
|
||||
if (shouldInsert) {
|
||||
shouldInsert = false;
|
||||
|
||||
lastChunk.size = static_cast<size_t>((lastChunk.size + oldBase) - (newDesc->first));
|
||||
|
||||
chunks[newDesc->first] = lastChunk;
|
||||
} else {
|
||||
firstChunk.size = static_cast<size_t>((lastChunk.size + oldBase) - firstChunkBase->first);
|
||||
chunks[firstChunkBase->first] = firstChunk;
|
||||
}
|
||||
} else if ((newDesc->first + newDesc->second.size) != lastChunkBase->first) {
|
||||
lastChunk.size = static_cast<size_t>((lastChunk.size + lastChunkBase->first) - (newDesc->first + newDesc->second.size));
|
||||
|
||||
chunks.erase(lastChunkBase);
|
||||
chunks[newDesc->first + newDesc->second.size] = lastChunk;
|
||||
|
||||
if ((lastChunk.state == memory::states::Unmapped) != isUnmapping)
|
||||
needsReprotection = true;
|
||||
}
|
||||
|
||||
// Insert if not merged
|
||||
if (shouldInsert)
|
||||
chunks[newDesc->first] = newDesc->second;
|
||||
}
|
||||
|
||||
if (needsReprotection)
|
||||
if (mprotect(newDesc->first, newDesc->second.size, !isUnmapping ? PROT_READ | PROT_WRITE | PROT_EXEC : PROT_NONE)) [[unlikely]]
|
||||
Logger::Warn("Reprotection failed: {}", strerror(errno));
|
||||
}
|
||||
|
||||
void MemoryManager::ForeachChunkinRange(span<skyline::u8> memory, auto editCallback) {
|
||||
auto chunkBase{upper_bound(memory.data())};
|
||||
if (memory.data() < chunkBase->first)
|
||||
--chunkBase;
|
||||
|
||||
ChunkDescriptor resultChunk{chunkBase->second};
|
||||
|
||||
size_t sizeLeft{memory.size()};
|
||||
|
||||
if (chunkBase->first < memory.data()) {
|
||||
size_t copySize{std::min<size_t>(resultChunk.size - (static_cast<size_t>(memory.data() - chunkBase->first)), memory.size())};
|
||||
|
||||
std::pair<u8 *, ChunkDescriptor> temp(memory.data(), resultChunk);
|
||||
temp.second.size = copySize;
|
||||
editCallback(temp);
|
||||
|
||||
++chunkBase;
|
||||
resultChunk = chunkBase->second;
|
||||
sizeLeft -= copySize;
|
||||
}
|
||||
|
||||
while (sizeLeft) {
|
||||
if (sizeLeft < resultChunk.size) {
|
||||
std::pair<u8 *, ChunkDescriptor> temp(chunkBase->first, resultChunk);
|
||||
temp.second.size = sizeLeft;
|
||||
editCallback(temp);
|
||||
break;
|
||||
} else [[likely]] {
|
||||
std::pair<u8 *, ChunkDescriptor> temp(chunkBase->first, resultChunk);
|
||||
|
||||
editCallback(temp);
|
||||
|
||||
sizeLeft = sizeLeft - resultChunk.size;
|
||||
++chunkBase;
|
||||
resultChunk = chunkBase->second;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr size_t RegionAlignment{1ULL << 21}; //!< The minimum alignment of a HOS memory region
|
||||
constexpr size_t CodeRegionSize{4ULL * 1024 * 1024 * 1024}; //!< The assumed maximum size of the code region (4GiB)
|
||||
|
||||
@ -44,11 +196,11 @@ namespace skyline::kernel {
|
||||
break;
|
||||
} while ((line = maps.find_first_of('\n', line)) != std::string::npos && line++);
|
||||
|
||||
if (!region.valid())
|
||||
if (!region.valid()) [[unlikely]]
|
||||
throw exception("Allocation failed");
|
||||
|
||||
auto result{mmap(reinterpret_cast<void *>(region.data()), size, PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_SHARED, -1, 0)};
|
||||
if (result == MAP_FAILED)
|
||||
if (result == MAP_FAILED) [[unlikely]]
|
||||
throw exception("Failed to mmap guest address space: {}", strerror(errno));
|
||||
|
||||
return region;
|
||||
@ -64,8 +216,9 @@ namespace skyline::kernel {
|
||||
throw exception("32-bit address spaces are not supported");
|
||||
|
||||
case memory::AddressSpaceType::AddressSpace36Bit: {
|
||||
addressSpace = span<u8>{reinterpret_cast<u8 *>(0x8000000), (1ULL << 39) - 0x8000000};
|
||||
baseSize = 0x180000000 + 0x78000000 + 0x180000000;
|
||||
addressSpace = span<u8>{reinterpret_cast<u8 *>(0), (1ULL << 36)};
|
||||
baseSize = 0x180000000 + 0x180000000;
|
||||
break;
|
||||
}
|
||||
|
||||
case memory::AddressSpaceType::AddressSpace39Bit: {
|
||||
@ -83,68 +236,50 @@ namespace skyline::kernel {
|
||||
if (type != memory::AddressSpaceType::AddressSpace36Bit) {
|
||||
base = AllocateMappedRange(baseSize, RegionAlignment, KgslReservedRegionSize, addressSpace.size(), false);
|
||||
|
||||
chunks = {
|
||||
ChunkDescriptor{
|
||||
.ptr = addressSpace.data(),
|
||||
.size = static_cast<size_t>(base.data() - addressSpace.data()),
|
||||
.state = memory::states::Reserved,
|
||||
},
|
||||
ChunkDescriptor{
|
||||
.ptr = base.data(),
|
||||
.size = base.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
},
|
||||
ChunkDescriptor{
|
||||
.ptr = base.end().base(),
|
||||
.size = addressSpace.size() - reinterpret_cast<u64>(base.end().base()),
|
||||
.state = memory::states::Reserved,
|
||||
}};
|
||||
chunks[addressSpace.data()] = ChunkDescriptor{
|
||||
.size = addressSpace.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
};
|
||||
|
||||
code = base;
|
||||
|
||||
} else {
|
||||
base = AllocateMappedRange(baseSize, 1ULL << 36, KgslReservedRegionSize, addressSpace.size(), false);
|
||||
codeBase36Bit = AllocateMappedRange(0x32000000, RegionAlignment, 0xC000000, 0x78000000ULL + reinterpret_cast<size_t>(addressSpace.data()), true);
|
||||
codeBase36Bit = AllocateMappedRange(0x78000000, RegionAlignment, 0x8000000, KgslReservedRegionSize, false);
|
||||
base = AllocateMappedRange(baseSize, RegionAlignment, KgslReservedRegionSize, addressSpace.size(), false);
|
||||
|
||||
if ((reinterpret_cast<u64>(base.data()) + baseSize) > (1ULL << 36)) {
|
||||
Logger::Warn("Couldn't fit regions into AS! Resizing AS instead!");
|
||||
addressSpace = span<u8>{reinterpret_cast<u8 *>(0), 1ULL << 39};
|
||||
}
|
||||
|
||||
chunks[addressSpace.data()] = ChunkDescriptor{
|
||||
.size = addressSpace.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
};
|
||||
|
||||
chunks = {
|
||||
ChunkDescriptor{
|
||||
.ptr = addressSpace.data(),
|
||||
.size = static_cast<size_t>(codeBase36Bit.data() - addressSpace.data()),
|
||||
.state = memory::states::Heap, // We can't use reserved here as rtld uses it to know when to halt memory walking
|
||||
},
|
||||
ChunkDescriptor{
|
||||
.ptr = codeBase36Bit.data(),
|
||||
.size = codeBase36Bit.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
},
|
||||
ChunkDescriptor{
|
||||
.ptr = codeBase36Bit.end().base(),
|
||||
.size = static_cast<u64>(base.data() - codeBase36Bit.end().base()),
|
||||
.state = memory::states::Heap,
|
||||
},
|
||||
ChunkDescriptor{
|
||||
.ptr = base.data(),
|
||||
.size = base.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
},
|
||||
ChunkDescriptor{
|
||||
.ptr = base.end().base(),
|
||||
.size = addressSpace.size() - reinterpret_cast<u64>(base.end().base()),
|
||||
.state = memory::states::Reserved,
|
||||
}};
|
||||
code = codeBase36Bit;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryManager::InitializeRegions(span<u8> codeRegion) {
|
||||
if (!util::IsAligned(codeRegion.data(), RegionAlignment))
|
||||
if (!util::IsAligned(codeRegion.data(), RegionAlignment)) [[unlikely]]
|
||||
throw exception("Non-aligned code region was used to initialize regions: 0x{:X} - 0x{:X}", codeRegion.data(), codeRegion.end().base());
|
||||
|
||||
switch (addressSpaceType) {
|
||||
case memory::AddressSpaceType::AddressSpace36Bit: {
|
||||
// Place code, stack and TLS/IO in the lower 36-bits of the host AS and heap past that
|
||||
code = span<u8>{codeBase36Bit.data(), util::AlignUp(codeRegion.size(), RegionAlignment)};
|
||||
stack = span<u8>{code.end().base(), codeBase36Bit.size() - code.size()};
|
||||
|
||||
// As a workaround if we can't place the code region at the base of the AS we mark it as inaccessible heap so rtld doesn't crash
|
||||
if (codeBase36Bit.data() != reinterpret_cast<u8 *>(0x8000000)) {
|
||||
std::pair<u8 *, ChunkDescriptor> tmp(reinterpret_cast<u8 *>(0x8000000), ChunkDescriptor{
|
||||
.size = reinterpret_cast<size_t>(codeBase36Bit.data() - 0x8000000),
|
||||
.state = memory::states::Heap,
|
||||
});
|
||||
MapInternal(&tmp);
|
||||
}
|
||||
|
||||
// Place code, stack and TLS/IO in the lower 36-bits of the host AS and heap and alias past that
|
||||
code = span<u8>{codeBase36Bit.data(), codeBase36Bit.data() + 0x70000000};
|
||||
stack = span<u8>{codeBase36Bit.data(), codeBase36Bit.data() + 0x78000000};
|
||||
tlsIo = stack; //!< TLS/IO is shared with Stack on 36-bit
|
||||
alias = span<u8>{base.data(), 0x180000000};
|
||||
heap = span<u8>{alias.end().base(), 0x180000000};
|
||||
@ -157,6 +292,15 @@ namespace skyline::kernel {
|
||||
heap = span<u8>{alias.end().base(), 0x180000000};
|
||||
stack = span<u8>{heap.end().base(), 0x80000000};
|
||||
tlsIo = span<u8>{stack.end().base(), 0x1000000000};
|
||||
|
||||
u64 newSize{code.size() + alias.size() + stack.size() + heap.size() + tlsIo.size()};
|
||||
|
||||
if (newSize > base.size()) [[unlikely]]
|
||||
throw exception("Guest VMM size has exceeded host carveout size: 0x{:X}/0x{:X} (Code: 0x{:X}/0x{:X})", newSize, base.size(), code.size(), CodeRegionSize);
|
||||
|
||||
if (newSize != base.size())
|
||||
munmap(base.end().base(), newSize - base.size());
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@ -164,31 +308,25 @@ namespace skyline::kernel {
|
||||
throw exception("Regions initialized without VMM initialization");
|
||||
}
|
||||
|
||||
auto newSize{code.size() + alias.size() + stack.size() + heap.size() + ((addressSpaceType == memory::AddressSpaceType::AddressSpace39Bit) ? tlsIo.size() : 0)};
|
||||
if (newSize > base.size())
|
||||
throw exception("Guest VMM size has exceeded host carveout size: 0x{:X}/0x{:X} (Code: 0x{:X}/0x{:X})", newSize, base.size(), code.size(), CodeRegionSize);
|
||||
if (newSize != base.size())
|
||||
munmap(base.end().base(), newSize - base.size());
|
||||
|
||||
if (codeRegion.size() > code.size())
|
||||
if (codeRegion.size() > code.size()) [[unlikely]]
|
||||
throw exception("Code region ({}) is smaller than mapped code size ({})", code.size(), codeRegion.size());
|
||||
|
||||
Logger::Debug("Region Map:\nVMM Base: 0x{:X}\nCode Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nAlias Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nHeap Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nStack Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nTLS/IO Region: 0x{:X} - 0x{:X} (Size: 0x{:X})", base.data(), code.data(), code.end().base(), code.size(), alias.data(), alias.end().base(), alias.size(), heap.data(), heap.end().base(), heap.size(), stack.data(), stack.end().base(), stack.size(), tlsIo.data(), tlsIo.end().base(), tlsIo.size());
|
||||
Logger::Debug("Region Map:\nVMM Base: 0x{:X}\nCode Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nAlias Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nHeap Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nStack Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nTLS/IO Region: 0x{:X} - 0x{:X} (Size: 0x{:X})", code.data(), code.data(), code.end().base(), code.size(), alias.data(), alias.end().base(), alias.size(), heap.data(), heap.end().base(), heap.size(), stack.data(), stack.end().base(), stack.size(), tlsIo.data(), tlsIo.end().base(), tlsIo.size());
|
||||
}
|
||||
|
||||
span<u8> MemoryManager::CreateMirror(span<u8> mapping) {
|
||||
if (!base.contains(mapping))
|
||||
if (!base.contains(mapping)) [[unlikely]]
|
||||
throw exception("Mapping is outside of VMM base: 0x{:X} - 0x{:X}", mapping.data(), mapping.end().base());
|
||||
|
||||
auto offset{static_cast<size_t>(mapping.data() - base.data())};
|
||||
if (!util::IsPageAligned(offset) || !util::IsPageAligned(mapping.size()))
|
||||
if (!util::IsPageAligned(offset) || !util::IsPageAligned(mapping.size())) [[unlikely]]
|
||||
throw exception("Mapping is not aligned to a page: 0x{:X}-0x{:X} (0x{:X})", mapping.data(), mapping.end().base(), offset);
|
||||
|
||||
auto mirror{mremap(mapping.data(), 0, mapping.size(), MREMAP_MAYMOVE)};
|
||||
if (mirror == MAP_FAILED)
|
||||
if (mirror == MAP_FAILED) [[unlikely]]
|
||||
throw exception("Failed to create mirror mapping at 0x{:X}-0x{:X} (0x{:X}): {}", mapping.data(), mapping.end().base(), offset, strerror(errno));
|
||||
|
||||
mprotect(mirror, mapping.size(), PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
mprotect(mirror, mapping.size(), PROT_READ | PROT_WRITE);
|
||||
|
||||
return span<u8>{reinterpret_cast<u8 *>(mirror), mapping.size()};
|
||||
}
|
||||
@ -199,113 +337,226 @@ namespace skyline::kernel {
|
||||
totalSize += region.size();
|
||||
|
||||
auto mirrorBase{mmap(nullptr, totalSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; // Reserve address space for all mirrors
|
||||
if (mirrorBase == MAP_FAILED)
|
||||
if (mirrorBase == MAP_FAILED) [[unlikely]]
|
||||
throw exception("Failed to create mirror base: {} (0x{:X} bytes)", strerror(errno), totalSize);
|
||||
|
||||
size_t mirrorOffset{};
|
||||
for (const auto ®ion : regions) {
|
||||
if (!base.contains(region))
|
||||
if (!base.contains(region)) [[unlikely]]
|
||||
throw exception("Mapping is outside of VMM base: 0x{:X} - 0x{:X}", region.data(), region.end().base());
|
||||
|
||||
auto offset{static_cast<size_t>(region.data() - base.data())};
|
||||
if (!util::IsPageAligned(offset) || !util::IsPageAligned(region.size()))
|
||||
if (!util::IsPageAligned(offset) || !util::IsPageAligned(region.size())) [[unlikely]]
|
||||
throw exception("Mapping is not aligned to a page: 0x{:X}-0x{:X} (0x{:X})", region.data(), region.end().base(), offset);
|
||||
|
||||
auto mirror{mremap(region.data(), 0, region.size(), MREMAP_FIXED | MREMAP_MAYMOVE, reinterpret_cast<u8 *>(mirrorBase) + mirrorOffset)};
|
||||
if (mirror == MAP_FAILED)
|
||||
if (mirror == MAP_FAILED) [[unlikely]]
|
||||
throw exception("Failed to create mirror mapping at 0x{:X}-0x{:X} (0x{:X}): {}", region.data(), region.end().base(), offset, strerror(errno));
|
||||
|
||||
mprotect(mirror, region.size(), PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
mprotect(mirror, region.size(), PROT_READ | PROT_WRITE);
|
||||
|
||||
mirrorOffset += region.size();
|
||||
}
|
||||
|
||||
if (mirrorOffset != totalSize)
|
||||
if (mirrorOffset != totalSize) [[unlikely]]
|
||||
throw exception("Mirror size mismatch: 0x{:X} != 0x{:X}", mirrorOffset, totalSize);
|
||||
|
||||
return span<u8>{reinterpret_cast<u8 *>(mirrorBase), totalSize};
|
||||
}
|
||||
|
||||
void MemoryManager::FreeMemory(span<u8> memory) {
|
||||
void MemoryManager::SetLockOnChunks(span<u8> memory, bool value) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
ForeachChunkinRange(memory, [&](std::pair<u8 *, ChunkDescriptor> &desc) __attribute__((always_inline)) {
|
||||
desc.second.attributes.isBorrowed = value;
|
||||
MapInternal(&desc);
|
||||
});
|
||||
}
|
||||
|
||||
void MemoryManager::SetCPUCachingOnChunks(span<u8> memory, bool value) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
ForeachChunkinRange(memory, [&](std::pair<u8 *, ChunkDescriptor> &desc) __attribute__((always_inline)) {
|
||||
desc.second.attributes.isUncached = value;
|
||||
MapInternal(&desc);
|
||||
});
|
||||
}
|
||||
|
||||
void MemoryManager::SetChunkPermission(span<u8> memory, memory::Permission permission) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
ForeachChunkinRange(memory, [&](std::pair<u8 *, ChunkDescriptor> &desc) __attribute__((always_inline)) {
|
||||
desc.second.permission = permission;
|
||||
MapInternal(&desc);
|
||||
});
|
||||
}
|
||||
|
||||
std::optional<std::pair<u8 *, ChunkDescriptor>> MemoryManager::GetChunk(u8 *addr) {
|
||||
std::shared_lock lock(mutex);
|
||||
|
||||
if (!addressSpace.contains(addr)) [[unlikely]]
|
||||
return std::nullopt;
|
||||
|
||||
auto chunkBase = upper_bound(addr);
|
||||
if (addr < chunkBase->first) [[likely]]
|
||||
--chunkBase;
|
||||
|
||||
return std::make_optional(*chunkBase);
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) void MemoryManager::MapCodeMemory(span<u8> memory, memory::Permission permission) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
std::pair<u8 *, ChunkDescriptor> temp(
|
||||
memory.data(),
|
||||
ChunkDescriptor{
|
||||
.size = memory.size(),
|
||||
.permission = permission,
|
||||
.state = memory::states::Code});
|
||||
|
||||
MapInternal(&temp);
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) void MemoryManager::MapMutableCodeMemory(span<u8> memory) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
std::pair<u8 *, ChunkDescriptor> temp(
|
||||
memory.data(),
|
||||
ChunkDescriptor{
|
||||
.size = memory.size(),
|
||||
.permission = {true, true, false},
|
||||
.state = memory::states::CodeMutable});
|
||||
|
||||
MapInternal(&temp);
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) void MemoryManager::MapStackMemory(span<u8> memory) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
std::pair<u8 *, ChunkDescriptor> temp(
|
||||
memory.data(),
|
||||
ChunkDescriptor{
|
||||
.size = memory.size(),
|
||||
.permission = {true, true, false},
|
||||
.state = memory::states::Stack,
|
||||
.isSrcMergeDisallowed = true});
|
||||
|
||||
MapInternal(&temp);
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) void MemoryManager::MapHeapMemory(span<u8> memory) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
std::pair<u8 *, ChunkDescriptor> temp(
|
||||
memory.data(),
|
||||
ChunkDescriptor{
|
||||
.size = memory.size(),
|
||||
.permission = {true, true, false},
|
||||
.state = memory::states::Heap});
|
||||
|
||||
MapInternal(&temp);
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) void MemoryManager::MapSharedMemory(span<u8> memory, memory::Permission permission) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
std::pair<u8 *, ChunkDescriptor> temp(
|
||||
memory.data(),
|
||||
ChunkDescriptor{
|
||||
.size = memory.size(),
|
||||
.permission = permission,
|
||||
.state = memory::states::SharedMemory,
|
||||
.isSrcMergeDisallowed = true});
|
||||
|
||||
MapInternal(&temp);
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) void MemoryManager::MapTransferMemory(span<u8> memory, memory::Permission permission) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
std::pair<u8 *, ChunkDescriptor> temp(
|
||||
memory.data(),
|
||||
ChunkDescriptor{
|
||||
.size = memory.size(),
|
||||
.permission = permission,
|
||||
.state = permission.raw ? memory::states::TransferMemory : memory::states::TransferMemoryIsolated,
|
||||
.isSrcMergeDisallowed = true});
|
||||
|
||||
MapInternal(&temp);
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) void MemoryManager::MapThreadLocalMemory(span<u8> memory) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
std::pair<u8 *, ChunkDescriptor> temp(
|
||||
memory.data(),
|
||||
ChunkDescriptor{
|
||||
.size = memory.size(),
|
||||
.permission = {true, true, false},
|
||||
.state = memory::states::ThreadLocal});
|
||||
MapInternal(&temp);
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) void MemoryManager::Reserve(span<u8> memory) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
std::pair<u8 *, ChunkDescriptor> temp(
|
||||
memory.data(),
|
||||
ChunkDescriptor{
|
||||
.size = memory.size(),
|
||||
.permission = {false, false, false},
|
||||
.state = memory::states::Reserved});
|
||||
MapInternal(&temp);
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) void MemoryManager::UnmapMemory(span<u8> memory) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
ForeachChunkinRange(memory, [&](std::pair<u8 *, ChunkDescriptor> &desc) {
|
||||
if (desc.second.state != memory::states::Unmapped)
|
||||
FreeMemory(span<u8>((u8 *)desc.first, desc.second.size));
|
||||
});
|
||||
|
||||
std::pair<u8 *, ChunkDescriptor> temp(
|
||||
memory.data(),
|
||||
ChunkDescriptor{
|
||||
.size = memory.size(),
|
||||
.permission = {false, false, false},
|
||||
.state = memory::states::Unmapped});
|
||||
MapInternal(&temp);
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) void MemoryManager::FreeMemory(span<u8> memory) {
|
||||
u8 *alignedStart{util::AlignUp(memory.data(), constant::PageSize)};
|
||||
u8 *alignedEnd{util::AlignDown(memory.end().base(), constant::PageSize)};
|
||||
|
||||
if (alignedStart < alignedEnd)
|
||||
if (madvise(alignedStart, static_cast<size_t>(alignedEnd - alignedStart), MADV_REMOVE) == -1)
|
||||
throw exception("Failed to free memory: {}", strerror(errno)) ;
|
||||
if (alignedStart < alignedEnd) [[likely]]
|
||||
if (madvise(alignedStart, static_cast<size_t>(alignedEnd - alignedStart), MADV_REMOVE) == -1) [[unlikely]]
|
||||
Logger::Error("Failed to free memory: {}", strerror(errno));
|
||||
}
|
||||
|
||||
void MemoryManager::InsertChunk(const ChunkDescriptor &chunk) {
|
||||
std::unique_lock lock(mutex);
|
||||
|
||||
auto upper{std::upper_bound(chunks.begin(), chunks.end(), chunk.ptr, [](const u8 *ptr, const ChunkDescriptor &chunk) -> bool { return ptr < chunk.ptr; })};
|
||||
if (upper == chunks.begin())
|
||||
throw exception("InsertChunk: Chunk inserted outside address space: 0x{:X} - 0x{:X} and 0x{:X} - 0x{:X}", upper->ptr, upper->ptr + upper->size, chunk.ptr, chunk.ptr + chunk.size);
|
||||
|
||||
upper = chunks.erase(upper, std::upper_bound(upper, chunks.end(), chunk.ptr + chunk.size, [](const u8 *ptr, const ChunkDescriptor &chunk) -> bool { return ptr < chunk.ptr + chunk.size; }));
|
||||
if (upper != chunks.end() && upper->ptr < chunk.ptr + chunk.size) {
|
||||
auto end{upper->ptr + upper->size};
|
||||
upper->ptr = chunk.ptr + chunk.size;
|
||||
upper->size = static_cast<size_t>(end - upper->ptr);
|
||||
}
|
||||
|
||||
auto lower{std::prev(upper)};
|
||||
if (lower->ptr == chunk.ptr && lower->size == chunk.size) {
|
||||
lower->state = chunk.state;
|
||||
lower->permission = chunk.permission;
|
||||
lower->attributes = chunk.attributes;
|
||||
lower->memory = chunk.memory;
|
||||
} else if (lower->ptr + lower->size > chunk.ptr + chunk.size) {
|
||||
auto lowerExtension{*lower};
|
||||
lowerExtension.ptr = chunk.ptr + chunk.size;
|
||||
lowerExtension.size = static_cast<size_t>((lower->ptr + lower->size) - lowerExtension.ptr);
|
||||
|
||||
lower->size = static_cast<size_t>(chunk.ptr - lower->ptr);
|
||||
if (lower->size) {
|
||||
upper = chunks.insert(upper, lowerExtension);
|
||||
chunks.insert(upper, chunk);
|
||||
} else {
|
||||
auto lower2{std::prev(lower)};
|
||||
if (chunk.IsCompatible(*lower2) && lower2->ptr + lower2->size >= chunk.ptr) {
|
||||
lower2->size = static_cast<size_t>(chunk.ptr + chunk.size - lower2->ptr);
|
||||
upper = chunks.erase(lower);
|
||||
} else {
|
||||
*lower = chunk;
|
||||
}
|
||||
upper = chunks.insert(upper, lowerExtension);
|
||||
}
|
||||
} else if (chunk.IsCompatible(*lower) && lower->ptr + lower->size >= chunk.ptr) {
|
||||
lower->size = static_cast<size_t>(chunk.ptr + chunk.size - lower->ptr);
|
||||
} else {
|
||||
if (lower->ptr + lower->size > chunk.ptr)
|
||||
lower->size = static_cast<size_t>(chunk.ptr - lower->ptr);
|
||||
if (upper != chunks.end() && chunk.IsCompatible(*upper) && chunk.ptr + chunk.size >= upper->ptr) {
|
||||
upper->ptr = chunk.ptr;
|
||||
upper->size = chunk.size + upper->size;
|
||||
} else {
|
||||
chunks.insert(upper, chunk);
|
||||
}
|
||||
}
|
||||
void MemoryManager::AddRef(const std::shared_ptr<type::KMemory> &ptr) {
|
||||
memRefs.push_back(ptr);
|
||||
}
|
||||
|
||||
std::optional<ChunkDescriptor> MemoryManager::Get(void *ptr) {
|
||||
std::shared_lock lock(mutex);
|
||||
void MemoryManager::RemoveRef(const std::shared_ptr<type::KMemory> &ptr) {
|
||||
std::vector<std::shared_ptr<type::KMemory>>::iterator i{std::find(memRefs.begin(), memRefs.end(), ptr)};
|
||||
|
||||
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), reinterpret_cast<u8 *>(ptr), [](const u8 *ptr, const ChunkDescriptor &chunk) -> bool { return ptr < chunk.ptr; })};
|
||||
if (chunk-- != chunks.begin())
|
||||
if ((chunk->ptr + chunk->size) > ptr)
|
||||
return std::make_optional(*chunk);
|
||||
|
||||
return std::nullopt;
|
||||
if (*i == ptr) {
|
||||
memRefs.erase(i);
|
||||
}
|
||||
}
|
||||
|
||||
size_t MemoryManager::GetUserMemoryUsage() {
|
||||
std::shared_lock lock(mutex);
|
||||
size_t size{};
|
||||
for (const auto &chunk : chunks)
|
||||
if (chunk.state == memory::states::Heap)
|
||||
size += chunk.size;
|
||||
return size + code.size() + state.process->mainThreadStack->guest.size();
|
||||
|
||||
for (auto &chunk : chunks) {
|
||||
if (chunk.second.state == memory::states::Heap)
|
||||
size += chunk.second.size;
|
||||
}
|
||||
|
||||
return size + code.size() + state.process->mainThreadStack.size();
|
||||
}
|
||||
|
||||
size_t MemoryManager::GetSystemResourceUsage() {
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <sys/mman.h>
|
||||
#include <common.h>
|
||||
#include <common/file_descriptor.h>
|
||||
#include <map>
|
||||
|
||||
namespace skyline {
|
||||
namespace kernel::type {
|
||||
@ -17,7 +18,7 @@ namespace skyline {
|
||||
/**
|
||||
* @brief Initializes all permissions to false
|
||||
*/
|
||||
constexpr Permission() : r(), w(), x() {}
|
||||
constexpr Permission() : raw() {}
|
||||
|
||||
/**
|
||||
* @brief Initializes permissions where the first three bits correspond to RWX
|
||||
@ -93,26 +94,28 @@ namespace skyline {
|
||||
enum class MemoryType : u8 {
|
||||
Unmapped = 0x0,
|
||||
Io = 0x1,
|
||||
Normal = 0x2,
|
||||
CodeStatic = 0x3,
|
||||
Static = 0x2,
|
||||
Code = 0x3,
|
||||
CodeMutable = 0x4,
|
||||
Heap = 0x5,
|
||||
SharedMemory = 0x6,
|
||||
Alias = 0x7,
|
||||
ModuleCodeStatic = 0x8,
|
||||
ModuleCodeMutable = 0x9,
|
||||
|
||||
AliasCode = 0x8,
|
||||
AliasCodeData = 0x9,
|
||||
Ipc = 0xA,
|
||||
Stack = 0xB,
|
||||
ThreadLocal = 0xC,
|
||||
TransferMemoryIsolated = 0xD,
|
||||
TransferMemory = 0xE,
|
||||
ProcessMemory = 0xF,
|
||||
SharedCode = 0xF,
|
||||
Reserved = 0x10,
|
||||
NonSecureIpc = 0x11,
|
||||
NonDeviceIpc = 0x12,
|
||||
KernelStack = 0x13,
|
||||
CodeReadOnly = 0x14,
|
||||
CodeWritable = 0x15,
|
||||
CodeGenerated = 0x14,
|
||||
CodeExternal = 0x15,
|
||||
Coverage = 0x16,
|
||||
InsecureMemory = 0x17
|
||||
};
|
||||
|
||||
/**
|
||||
@ -138,7 +141,7 @@ namespace skyline {
|
||||
bool ipcSendAllowed : 1; //!< If this block is allowed to be sent as an IPC buffer with flags=0
|
||||
bool nonDeviceIpcSendAllowed : 1; //!< If this block is allowed to be sent as an IPC buffer with flags=3
|
||||
bool nonSecureIpcSendAllowed : 1; //!< If this block is allowed to be sent as an IPC buffer with flags=1
|
||||
bool _pad0_ : 1;
|
||||
bool isMappedInKernel : 1; //!< If this block is mapped in kernel
|
||||
bool processPermissionChangeAllowed : 1; //!< If the application can use svcSetProcessMemoryPermission on this block
|
||||
bool mapAllowed : 1; //!< If the application can use svcMapMemory on this block
|
||||
bool unmapProcessCodeMemoryAllowed : 1; //!< If the application can use svcUnmapProcessCodeMemory on this block
|
||||
@ -151,6 +154,7 @@ namespace skyline {
|
||||
bool mapProcessAllowed : 1; //!< If the application can use svcMapProcessMemory on this block
|
||||
bool attributeChangeAllowed : 1; //!< If the application can use svcSetMemoryAttribute on this block
|
||||
bool codeMemoryAllowed : 1; //!< If the application can use svcCreateCodeMemory on this block
|
||||
bool isLinearMapped : 1; //!< If this block is mapped linearly
|
||||
};
|
||||
u32 value{};
|
||||
};
|
||||
@ -162,26 +166,29 @@ namespace skyline {
|
||||
*/
|
||||
namespace states {
|
||||
constexpr MemoryState Unmapped{0x00000000};
|
||||
constexpr MemoryState Io{0x00002001};
|
||||
constexpr MemoryState CodeStatic{0x00DC7E03};
|
||||
constexpr MemoryState CodeMutable{0x03FEBD04};
|
||||
constexpr MemoryState Heap{0x037EBD05};
|
||||
constexpr MemoryState SharedMemory{0x00402006};
|
||||
constexpr MemoryState Alias{0x00482907};
|
||||
constexpr MemoryState AliasCode{0x00DD7E08};
|
||||
constexpr MemoryState AliasCodeData{0x03FFBD09};
|
||||
constexpr MemoryState Ipc{0x005C3C0A};
|
||||
constexpr MemoryState Stack{0x005C3C0B};
|
||||
constexpr MemoryState ThreadLocal{0x0040200C};
|
||||
constexpr MemoryState TransferMemoryIsolated{0x015C3C0D};
|
||||
constexpr MemoryState TransferMemory{0x005C380E};
|
||||
constexpr MemoryState SharedCode{0x0040380F};
|
||||
constexpr MemoryState Io{0x00182001};
|
||||
constexpr MemoryState Static{0x00042002};
|
||||
constexpr MemoryState Code{0x04DC7E03};
|
||||
constexpr MemoryState CodeMutable{0x07FEBD04};
|
||||
constexpr MemoryState Heap{0x077EBD05};
|
||||
constexpr MemoryState SharedMemory{0x04402006};
|
||||
|
||||
constexpr MemoryState AliasCode{0x04DD7E08};
|
||||
constexpr MemoryState AliasCodeData{0x07FFBD09};
|
||||
constexpr MemoryState Ipc{0x045C3C0A};
|
||||
constexpr MemoryState Stack{0x045C3C0B};
|
||||
constexpr MemoryState ThreadLocal{0x0400200C};
|
||||
constexpr MemoryState TransferMemoryIsolated{0x055C3C0D};
|
||||
constexpr MemoryState TransferMemory{0x045C380E};
|
||||
constexpr MemoryState SharedCode{0x0440380F};
|
||||
constexpr MemoryState Reserved{0x00000010};
|
||||
constexpr MemoryState NonSecureIpc{0x005C3811};
|
||||
constexpr MemoryState NonDeviceIpc{0x004C2812};
|
||||
constexpr MemoryState NonSecureIpc{0x045C3811};
|
||||
constexpr MemoryState NonDeviceIpc{0x044C2812};
|
||||
constexpr MemoryState KernelStack{0x00002013};
|
||||
constexpr MemoryState CodeReadOnly{0x00402214};
|
||||
constexpr MemoryState CodeWritable{0x00402015};
|
||||
constexpr MemoryState CodeGenerated{0x04402214};
|
||||
constexpr MemoryState CodeExternal{0x04402015};
|
||||
constexpr MemoryState Coverage{0x00002016};
|
||||
constexpr MemoryState InsecureMemory{0x05583817};
|
||||
}
|
||||
|
||||
enum class AddressSpaceType : u8 {
|
||||
@ -194,15 +201,14 @@ namespace skyline {
|
||||
|
||||
namespace kernel {
|
||||
struct ChunkDescriptor {
|
||||
u8 *ptr;
|
||||
bool isSrcMergeDisallowed;
|
||||
size_t size;
|
||||
memory::Permission permission;
|
||||
memory::MemoryState state;
|
||||
memory::MemoryAttribute attributes;
|
||||
kernel::type::KMemory *memory{};
|
||||
|
||||
constexpr bool IsCompatible(const ChunkDescriptor &chunk) const {
|
||||
return chunk.permission == permission && chunk.state.value == state.value && chunk.attributes.value == attributes.value && chunk.memory == memory;
|
||||
constexpr bool IsCompatible(const ChunkDescriptor &chunk) const noexcept {
|
||||
return chunk.permission == permission && chunk.state.value == state.value && chunk.attributes.value == attributes.value && !isSrcMergeDisallowed;
|
||||
}
|
||||
};
|
||||
|
||||
@ -212,7 +218,16 @@ namespace skyline {
|
||||
class MemoryManager {
|
||||
private:
|
||||
const DeviceState &state;
|
||||
std::vector<ChunkDescriptor> chunks;
|
||||
std::map<u8 *, ChunkDescriptor> chunks;
|
||||
|
||||
std::vector<std::shared_ptr<type::KMemory>> memRefs;
|
||||
|
||||
// Workaround for broken std implementation
|
||||
std::map<u8 *, ChunkDescriptor>::iterator upper_bound(u8 *address);
|
||||
|
||||
void MapInternal(std::pair<u8 *, ChunkDescriptor> *newDesc);
|
||||
|
||||
void ForeachChunkinRange(span<u8> memory, auto editCallback);
|
||||
|
||||
public:
|
||||
memory::AddressSpaceType addressSpaceType{};
|
||||
@ -225,11 +240,13 @@ namespace skyline {
|
||||
span<u8> stack{};
|
||||
span<u8> tlsIo{}; //!< TLS/IO
|
||||
|
||||
size_t setHeapSize; //!< For use by svcSetHeapSize
|
||||
|
||||
std::shared_mutex mutex; //!< Synchronizes any operations done on the VMM, it's locked in shared mode by readers and exclusive mode by writers
|
||||
|
||||
MemoryManager(const DeviceState &state);
|
||||
MemoryManager(const DeviceState &state) noexcept;
|
||||
|
||||
~MemoryManager();
|
||||
~MemoryManager() noexcept;
|
||||
|
||||
/**
|
||||
* @note This should be called before any mappings in the VMM or calls to InitalizeRegions are done
|
||||
@ -255,14 +272,57 @@ namespace skyline {
|
||||
span<u8> CreateMirrors(const std::vector<span<u8>> ®ions);
|
||||
|
||||
/**
|
||||
* @brief Frees the underlying physical memory for all full pages in the contained mapping
|
||||
* @note All subsequent accesses to freed memory will return 0s
|
||||
* @brief Sets the attributes for chunks within a certain range
|
||||
*/
|
||||
void SetLockOnChunks(span<u8> memory, bool value);
|
||||
|
||||
void SetCPUCachingOnChunks(span<u8> memory, bool value);
|
||||
|
||||
/**
|
||||
* @brief Sets the permission for chunks within a certain range
|
||||
* @note The permissions set here are not accurate to the actual permissions set on the chunk and are only for the guest
|
||||
*/
|
||||
void SetChunkPermission(span<u8> memory, memory::Permission permission);
|
||||
|
||||
/**
|
||||
* @brief Gets the highest chunk's descriptor that contains this address
|
||||
*/
|
||||
std::optional<std::pair<u8 *, ChunkDescriptor>> GetChunk(u8 *addr);
|
||||
|
||||
/**
|
||||
* Various mapping functions for use by the guest
|
||||
* @note UnmapMemory frees the underlying memory as well
|
||||
*/
|
||||
void MapCodeMemory(span<u8> memory, memory::Permission permission);
|
||||
|
||||
void MapMutableCodeMemory(span<u8> memory);
|
||||
|
||||
void MapStackMemory(span<u8> memory);
|
||||
|
||||
void MapHeapMemory(span<u8> memory);
|
||||
|
||||
void MapSharedMemory(span<u8> memory, memory::Permission permission);
|
||||
|
||||
void MapTransferMemory(span<u8> memory, memory::Permission permission);
|
||||
|
||||
void MapThreadLocalMemory(span<u8> memory);
|
||||
|
||||
void Reserve(span<u8> memory);
|
||||
|
||||
void UnmapMemory(span<u8> memory);
|
||||
|
||||
/**
|
||||
* Frees the underlying memory
|
||||
* @note Memory that's not aligned to page boundaries at the edges of the span will not be freed
|
||||
*/
|
||||
void FreeMemory(span<u8> memory);
|
||||
|
||||
void InsertChunk(const ChunkDescriptor &chunk);
|
||||
/**
|
||||
* Allows you to add/remove references to shared/transfer memory
|
||||
*/
|
||||
void AddRef(const std::shared_ptr<type::KMemory> &ptr);
|
||||
|
||||
std::optional<ChunkDescriptor> Get(void *ptr);
|
||||
void RemoveRef(const std::shared_ptr<type::KMemory> &ptr);
|
||||
|
||||
/**
|
||||
* @return The cumulative size of all heap (Physical Memory + Process Heap) memory mappings, the code region and the main thread stack in bytes
|
||||
|
@ -11,37 +11,56 @@
|
||||
|
||||
namespace skyline::kernel::svc {
|
||||
void SetHeapSize(const DeviceState &state) {
|
||||
u32 size{state.ctx->gpr.w1};
|
||||
u64 size{state.ctx->gpr.w1};
|
||||
|
||||
if (!util::IsAligned(size, 0x200000)) {
|
||||
if (!util::IsAligned(size, 0x200000)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidSize;
|
||||
state.ctx->gpr.x1 = 0;
|
||||
|
||||
Logger::Warn("'size' not divisible by 2MB: {}", size);
|
||||
Logger::Warn("'size' not divisible by 2MB: 0x{:X}", size);
|
||||
return;
|
||||
} else if (state.process->memory.heap.size() < size) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidSize;
|
||||
state.ctx->gpr.x1 = 0;
|
||||
|
||||
Logger::Warn("'size' exceeded size of heap region: 0x{:X}", size);
|
||||
return;
|
||||
}
|
||||
|
||||
auto &heap{state.process->heap};
|
||||
heap->Resize(size);
|
||||
size_t heapCurrSize{state.process->memory.setHeapSize};
|
||||
u8 *heapBaseAddr{state.process->memory.heap.data()};
|
||||
|
||||
if (heapCurrSize < size)
|
||||
state.process->memory.MapHeapMemory(span<u8>{heapBaseAddr + heapCurrSize, size - heapCurrSize});
|
||||
else if (size < heapCurrSize)
|
||||
state.process->memory.UnmapMemory(span<u8>{heapBaseAddr + size, heapCurrSize - size});
|
||||
|
||||
state.process->memory.setHeapSize = size;
|
||||
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
state.ctx->gpr.x1 = reinterpret_cast<u64>(heap->guest.data());
|
||||
state.ctx->gpr.x1 = reinterpret_cast<u64>(heapBaseAddr);
|
||||
|
||||
Logger::Debug("Allocated at 0x{:X} - 0x{:X} (0x{:X} bytes)", heap->guest.data(), heap->guest.end().base(), heap->guest.size());
|
||||
Logger::Debug("Heap size changed to 0x{:X} bytes (0x{:X} - 0x{:X})", size, heapBaseAddr, heapBaseAddr + size);
|
||||
}
|
||||
|
||||
void SetMemoryAttribute(const DeviceState &state) {
|
||||
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
|
||||
if (!util::IsPageAligned(pointer)) {
|
||||
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
|
||||
if (!util::IsPageAligned(address)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
||||
Logger::Warn("'pointer' not page aligned: 0x{:X}", pointer);
|
||||
Logger::Warn("'address' not page aligned: 0x{:X}", address);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t size{state.ctx->gpr.x1};
|
||||
if (!util::IsPageAligned(size)) {
|
||||
u64 size{state.ctx->gpr.x1};
|
||||
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidSize;
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (address >= (address + size) || !state.process->memory.AddressSpaceContains(span<u8>{address, size})) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
|
||||
Logger::Warn("Invalid address and size combination: 'address': 0x{:X}, 'size': 0x{:X} ", address, size);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -49,171 +68,152 @@ namespace skyline::kernel::svc {
|
||||
memory::MemoryAttribute value{.value = state.ctx->gpr.w3};
|
||||
|
||||
auto maskedValue{mask.value | value.value};
|
||||
if (maskedValue != mask.value || !mask.isUncached || mask.isDeviceShared || mask.isBorrowed || mask.isIpcLocked) {
|
||||
if (maskedValue != mask.value || !mask.isUncached || mask.isDeviceShared || mask.isBorrowed || mask.isIpcLocked) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidCombination;
|
||||
Logger::Warn("'mask' invalid: 0x{:X}, 0x{:X}", mask.value, value.value);
|
||||
return;
|
||||
}
|
||||
|
||||
auto chunk{state.process->memory.Get(pointer)};
|
||||
if (!chunk) {
|
||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
||||
Logger::Warn("Cannot find memory region: 0x{:X}", pointer);
|
||||
return;
|
||||
}
|
||||
auto chunk{state.process->memory.GetChunk(address)};
|
||||
|
||||
if (!chunk->state.attributeChangeAllowed) {
|
||||
// We only check the first found chunk for whatever reason.
|
||||
if (!chunk->second.state.attributeChangeAllowed) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidState;
|
||||
Logger::Warn("Attribute change not allowed for chunk: 0x{:X}", pointer);
|
||||
Logger::Warn("Attribute change not allowed for chunk: 0x{:X}", chunk->first);
|
||||
return;
|
||||
}
|
||||
|
||||
auto newChunk{*chunk};
|
||||
newChunk.ptr = pointer;
|
||||
newChunk.size = size;
|
||||
newChunk.attributes.isUncached = value.isUncached;
|
||||
state.process->memory.InsertChunk(newChunk);
|
||||
state.process->memory.SetCPUCachingOnChunks(span<u8>{address, size}, value.isUncached);
|
||||
|
||||
Logger::Debug("Set CPU caching to {} at 0x{:X} - 0x{:X} (0x{:X} bytes)", !static_cast<bool>(value.isUncached), pointer, pointer + size, size);
|
||||
Logger::Debug("Set CPU caching to {} at 0x{:X} - 0x{:X} (0x{:X} bytes)", static_cast<bool>(value.isUncached), address, address + size, size);
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
}
|
||||
|
||||
void MapMemory(const DeviceState &state) {
|
||||
auto destination{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
|
||||
auto source{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
|
||||
u8 *destination{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
|
||||
u8 *source{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
|
||||
size_t size{state.ctx->gpr.x2};
|
||||
|
||||
if (!util::IsPageAligned(destination) || !util::IsPageAligned(source)) {
|
||||
if (!util::IsPageAligned(destination) || !util::IsPageAligned(source)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
||||
Logger::Warn("Addresses not page aligned: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
||||
Logger::Warn("Addresses not page aligned: 'source': 0x{:X}, 'destination': 0x{:X}, 'size': 0x{:X} bytes", source, destination, size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!util::IsPageAligned(size)) {
|
||||
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidSize;
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
|
||||
return;
|
||||
}
|
||||
|
||||
auto stack{state.process->memory.stack};
|
||||
if (!stack.contains(span<u8>{destination, size})) {
|
||||
if (destination >= (destination + size) || !state.process->memory.AddressSpaceContains(span<u8>{destination, size})) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
|
||||
Logger::Warn("Invalid address and size combination: 'destination': 0x{:X}, 'size': 0x{:X} bytes", destination, size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (source >= (source + size) || !state.process->memory.AddressSpaceContains(span<u8>{source, size})) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
|
||||
Logger::Warn("Invalid address and size combination: 'source': 0x{:X}, 'size': 0x{:X} bytes", source, size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!state.process->memory.stack.contains(span<u8>{destination, size})) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
|
||||
Logger::Warn("Destination not within stack region: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
||||
Logger::Warn("Destination not within stack region: 'source': 0x{:X}, 'destination': 0x{:X}, 'size': 0x{:X} bytes", source, destination, size);
|
||||
return;
|
||||
}
|
||||
|
||||
auto chunk{state.process->memory.Get(source)};
|
||||
if (!chunk) {
|
||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
||||
Logger::Warn("Source has no descriptor: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
||||
return;
|
||||
}
|
||||
if (!chunk->state.mapAllowed) {
|
||||
auto chunk{state.process->memory.GetChunk(source)};
|
||||
if (!chunk->second.state.mapAllowed) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidState;
|
||||
Logger::Warn("Source doesn't allow usage of svcMapMemory: Source: 0x{:X}, Destination: 0x{:X}, Size: 0x{:X}, MemoryState: 0x{:X}", source, destination, size, chunk->state.value);
|
||||
Logger::Warn("Source doesn't allow usage of svcMapMemory: 'source': 0x{:X}, 'size': 0x{:X}, MemoryState: 0x{:X}", source, size, chunk->second.state.value);
|
||||
return;
|
||||
}
|
||||
|
||||
state.process->NewHandle<type::KPrivateMemory>(span<u8>{destination, size}, chunk->permission, memory::states::Stack);
|
||||
state.process->memory.MapStackMemory(span<u8>{destination, size});
|
||||
std::memcpy(destination, source, size);
|
||||
|
||||
auto object{state.process->GetMemoryObject(source)};
|
||||
if (!object)
|
||||
throw exception("svcMapMemory: Cannot find memory object in handle table for address 0x{:X}", source);
|
||||
object->item->UpdatePermission(span<u8>{source, size}, {false, false, false});
|
||||
state.process->memory.SetChunkPermission(span<u8>{source, size}, {false, false, false});
|
||||
state.process->memory.SetLockOnChunks(span<u8>{source, size}, true);
|
||||
|
||||
Logger::Debug("Mapped range 0x{:X} - 0x{:X} to 0x{:X} - 0x{:X} (Size: 0x{:X} bytes)", source, source + size, destination, destination + size, size);
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
}
|
||||
|
||||
void UnmapMemory(const DeviceState &state) {
|
||||
auto source{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
|
||||
auto destination{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
|
||||
u8 *destination{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
|
||||
u8 *source{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
|
||||
size_t size{state.ctx->gpr.x2};
|
||||
|
||||
if (!util::IsPageAligned(destination) || !util::IsPageAligned(source)) {
|
||||
if (!util::IsPageAligned(destination) || !util::IsPageAligned(source)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
||||
Logger::Warn("Addresses not page aligned: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
||||
Logger::Warn("Addresses not page aligned: 'source': 0x{:X}, 'destination': 0x{:X}, 'size': {} bytes", source, destination, size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!util::IsPageAligned(size)) {
|
||||
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidSize;
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
|
||||
return;
|
||||
}
|
||||
|
||||
auto stack{state.process->memory.stack};
|
||||
if (!stack.contains(span<u8>{source, size})) {
|
||||
if (!state.process->memory.stack.contains(span<u8>{destination, size})) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
|
||||
Logger::Warn("Source not within stack region: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
||||
Logger::Warn("Source not within stack region: 'source': 0x{:X}, 'destination': 0x{:X}, 'size': 0x{:X} bytes", source, destination, size);
|
||||
return;
|
||||
}
|
||||
|
||||
auto sourceChunk{state.process->memory.Get(source)};
|
||||
auto destChunk{state.process->memory.Get(destination)};
|
||||
if (!sourceChunk || !destChunk) {
|
||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
||||
Logger::Warn("Addresses have no descriptor: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
||||
return;
|
||||
auto dstChunk{state.process->memory.GetChunk(destination)};
|
||||
while (dstChunk->second.state.value == memory::states::Unmapped)
|
||||
dstChunk = state.process->memory.GetChunk(dstChunk->first + dstChunk->second.size);
|
||||
|
||||
if ((destination + size) > dstChunk->first) [[likely]] {
|
||||
state.process->memory.SetChunkPermission(span<u8>{source + (dstChunk->first - destination), dstChunk->second.size}, dstChunk->second.permission);
|
||||
state.process->memory.SetLockOnChunks(span<u8>{source + (dstChunk->first - destination), dstChunk->second.size}, false);
|
||||
|
||||
std::memcpy(source + (dstChunk->first - destination), dstChunk->first, dstChunk->second.size);
|
||||
|
||||
state.process->memory.UnmapMemory(span<u8>{destination, size});
|
||||
}
|
||||
|
||||
if (!destChunk->state.mapAllowed) {
|
||||
state.ctx->gpr.w0 = result::InvalidState;
|
||||
Logger::Warn("Destination doesn't allow usage of svcMapMemory: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes) 0x{:X}", source, destination, size, destChunk->state.value);
|
||||
return;
|
||||
}
|
||||
|
||||
auto destObject{state.process->GetMemoryObject(destination)};
|
||||
if (!destObject)
|
||||
throw exception("svcUnmapMemory: Cannot find destination memory object in handle table for address 0x{:X}", destination);
|
||||
|
||||
destObject->item->UpdatePermission(span<u8>{destination, size}, sourceChunk->permission);
|
||||
|
||||
std::memcpy(source, destination, size);
|
||||
|
||||
auto sourceObject{state.process->GetMemoryObject(source)};
|
||||
if (!sourceObject)
|
||||
throw exception("svcUnmapMemory: Cannot find source memory object in handle table for address 0x{:X}", source);
|
||||
|
||||
state.process->memory.FreeMemory(std::span<u8>(source, size));
|
||||
state.process->CloseHandle(sourceObject->handle);
|
||||
|
||||
Logger::Debug("Unmapped range 0x{:X} - 0x{:X} to 0x{:X} - 0x{:X} (Size: 0x{:X} bytes)", source, source + size, destination, destination + size, size);
|
||||
Logger::Debug("Unmapped range 0x{:X} - 0x{:X} to 0x{:X} - 0x{:X} (Size: 0x{:X} bytes)", destination, destination + size, source, source + size, size);
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
}
|
||||
|
||||
void QueryMemory(const DeviceState &state) {
|
||||
memory::MemoryInfo memInfo{};
|
||||
|
||||
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x2)};
|
||||
auto chunk{state.process->memory.Get(pointer)};
|
||||
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x2)};
|
||||
auto chunk{state.process->memory.GetChunk(address)};
|
||||
|
||||
if (chunk) {
|
||||
memInfo = {
|
||||
.address = reinterpret_cast<u64>(chunk->ptr),
|
||||
.size = chunk->size,
|
||||
.type = static_cast<u32>(chunk->state.type),
|
||||
.attributes = chunk->attributes.value,
|
||||
.permissions = static_cast<u32>(chunk->permission.Get()),
|
||||
.address = reinterpret_cast<u64>(chunk->first),
|
||||
.size = chunk->second.size,
|
||||
.type = static_cast<u32>(chunk->second.state.type),
|
||||
.attributes = chunk->second.attributes.value,
|
||||
.permissions = static_cast<u32>(chunk->second.permission.Get()),
|
||||
.deviceRefCount = 0,
|
||||
.ipcRefCount = 0,
|
||||
};
|
||||
|
||||
Logger::Debug("Address: 0x{:X}, Region Start: 0x{:X}, Size: 0x{:X}, Type: 0x{:X}, Is Uncached: {}, Permissions: {}{}{}", pointer, memInfo.address, memInfo.size, memInfo.type, static_cast<bool>(chunk->attributes.isUncached), chunk->permission.r ? 'R' : '-', chunk->permission.w ? 'W' : '-', chunk->permission.x ? 'X' : '-');
|
||||
Logger::Debug("Address: 0x{:X}, Region Start: 0x{:X}, Size: 0x{:X}, Type: 0x{:X}, Attributes: 0x{:X}, Permissions: {}{}{}", address, memInfo.address, memInfo.size, memInfo.type, memInfo.attributes, chunk->second.permission.r ? 'R' : '-', chunk->second.permission.w ? 'W' : '-', chunk->second.permission.x ? 'X' : '-');
|
||||
} else {
|
||||
auto addressSpaceEnd{reinterpret_cast<u64>(state.process->memory.addressSpace.end().base())};
|
||||
u64 addressSpaceEnd{reinterpret_cast<u64>(state.process->memory.addressSpace.end().base())};
|
||||
|
||||
memInfo = {
|
||||
.address = addressSpaceEnd,
|
||||
.size = ~addressSpaceEnd + 1,
|
||||
.size = 0 - addressSpaceEnd,
|
||||
.type = static_cast<u32>(memory::MemoryType::Reserved),
|
||||
};
|
||||
|
||||
Logger::Debug("Trying to query memory outside of the application's address space: 0x{:X}", pointer);
|
||||
Logger::Debug("Trying to query memory outside of the application's address space: 0x{:X}", address);
|
||||
}
|
||||
|
||||
*reinterpret_cast<memory::MemoryInfo *>(state.ctx->gpr.x0) = memInfo;
|
||||
// The page info, which is always 0
|
||||
state.ctx->gpr.x1 = 0;
|
||||
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
}
|
||||
@ -247,10 +247,6 @@ namespace skyline::kernel::svc {
|
||||
return;
|
||||
}
|
||||
|
||||
auto stack{state.process->GetMemoryObject(stackTop)};
|
||||
if (!stack)
|
||||
throw exception("svcCreateThread: Cannot find memory object in handle table for thread stack: 0x{:X}", stackTop);
|
||||
|
||||
auto thread{state.process->CreateThread(entry, entryArgument, stackTop, priority, static_cast<u8>(idealCore))};
|
||||
if (thread) {
|
||||
Logger::Debug("Created thread #{} with handle 0x{:X} (Entry Point: 0x{:X}, Argument: 0x{:X}, Stack Pointer: 0x{:X}, Priority: {}, Ideal Core: {})", thread->id, thread->handle, entry, entryArgument, stackTop, priority, idealCore);
|
||||
@ -476,31 +472,38 @@ namespace skyline::kernel::svc {
|
||||
try {
|
||||
KHandle handle{state.ctx->gpr.w0};
|
||||
auto object{state.process->GetHandle<type::KSharedMemory>(handle)};
|
||||
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
|
||||
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
|
||||
|
||||
if (!util::IsPageAligned(pointer)) {
|
||||
if (!util::IsPageAligned(address)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
||||
Logger::Warn("'pointer' not page aligned: 0x{:X}", pointer);
|
||||
Logger::Warn("'address' not page aligned: 0x{:X}", address);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t size{state.ctx->gpr.x2};
|
||||
if (!util::IsPageAligned(size)) {
|
||||
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidSize;
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (address >= (address + size) || !state.process->memory.AddressSpaceContains(span<u8>{address, size})) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
|
||||
Logger::Warn("Invalid address and size combination: 'address': 0x{:X}, 'size': 0x{:X}", address, size);
|
||||
return;
|
||||
}
|
||||
|
||||
memory::Permission permission(static_cast<u8>(state.ctx->gpr.w3));
|
||||
if ((permission.w && !permission.r) || (permission.x && !permission.r)) {
|
||||
Logger::Warn("'permission' invalid: {}{}{}", permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||
if ((!permission.r && !permission.w && !permission.x) || (permission.w && !permission.r) || permission.x) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidNewMemoryPermission;
|
||||
Logger::Warn("'permission' invalid: {}{}{}", permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||
return;
|
||||
}
|
||||
|
||||
Logger::Debug("Mapping shared memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes) ({}{}{})", handle, pointer, pointer + size, size, permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||
Logger::Debug("Mapping shared memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes), with permissions: ({}{}{})", handle, address, address + size, size, permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||
|
||||
object->Map(span<u8>{pointer, size}, permission);
|
||||
object->Map(span<u8>{address, size}, permission);
|
||||
state.process->memory.AddRef(object);
|
||||
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
} catch (const std::out_of_range &) {
|
||||
@ -513,24 +516,31 @@ namespace skyline::kernel::svc {
|
||||
try {
|
||||
KHandle handle{state.ctx->gpr.w0};
|
||||
auto object{state.process->GetHandle<type::KSharedMemory>(handle)};
|
||||
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
|
||||
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
|
||||
|
||||
if (!util::IsPageAligned(pointer)) {
|
||||
if (!util::IsPageAligned(address)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
||||
Logger::Warn("'pointer' not page aligned: 0x{:X}", pointer);
|
||||
Logger::Warn("'address' not page aligned: 0x{:X}", address);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t size{state.ctx->gpr.x2};
|
||||
if (!util::IsPageAligned(size)) {
|
||||
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidSize;
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
|
||||
return;
|
||||
}
|
||||
|
||||
Logger::Debug("Unmapping shared memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes)", handle, pointer, pointer + size, size);
|
||||
if (address >= (address + size) || !state.process->memory.AddressSpaceContains(span<u8>{address, size})) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
|
||||
Logger::Warn("Invalid address and size combination: 'address': 0x{:X}, 'size': 0x{:X}", address, size);
|
||||
return;
|
||||
}
|
||||
|
||||
object->Unmap(span<u8>{pointer, size});
|
||||
Logger::Debug("Unmapping shared memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes)", handle, address, address + size, size);
|
||||
|
||||
object->Unmap(span<u8>{address, size});
|
||||
state.process->memory.RemoveRef(object);
|
||||
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
} catch (const std::out_of_range &) {
|
||||
@ -540,29 +550,37 @@ namespace skyline::kernel::svc {
|
||||
}
|
||||
|
||||
void CreateTransferMemory(const DeviceState &state) {
|
||||
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
|
||||
if (!util::IsPageAligned(pointer)) {
|
||||
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
|
||||
if (!util::IsPageAligned(address)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
||||
Logger::Warn("'pointer' not page aligned: 0x{:X}", pointer);
|
||||
Logger::Warn("'address' not page aligned: 0x{:X}", address);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t size{state.ctx->gpr.x2};
|
||||
if (!util::IsPageAligned(size)) {
|
||||
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidSize;
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (address >= (address + size) || !state.process->memory.AddressSpaceContains(span<u8>{address, size})) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
|
||||
Logger::Warn("Invalid address and size combination: 'address': 0x{:X}, 'size': 0x{:X}", address, size);
|
||||
return;
|
||||
}
|
||||
|
||||
memory::Permission permission(static_cast<u8>(state.ctx->gpr.w3));
|
||||
if ((permission.w && !permission.r) || (permission.x && !permission.r)) {
|
||||
if ((permission.w && !permission.r) || permission.x) [[unlikely]] {
|
||||
Logger::Warn("'permission' invalid: {}{}{}", permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||
state.ctx->gpr.w0 = result::InvalidNewMemoryPermission;
|
||||
return;
|
||||
}
|
||||
|
||||
auto tmem{state.process->NewHandle<type::KTransferMemory>(pointer, size, permission, permission.raw ? memory::states::TransferMemory : memory::states::TransferMemoryIsolated)};
|
||||
Logger::Debug("Creating transfer memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes) ({}{}{})", tmem.handle, pointer, pointer + size, size, permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||
auto tmem{state.process->NewHandle<kernel::type::KTransferMemory>(address, size, permission)};
|
||||
state.process->memory.AddRef(tmem.item);
|
||||
|
||||
Logger::Debug("Creating transfer memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes) ({}{}{})", tmem.handle, address, address + size, size, permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
state.ctx->gpr.w1 = tmem.handle;
|
||||
@ -896,8 +914,8 @@ namespace skyline::kernel::svc {
|
||||
IdleTickCount = 10,
|
||||
RandomEntropy = 11,
|
||||
// 2.0.0+
|
||||
AddressSpaceBaseAddr = 12,
|
||||
AddressSpaceSize = 13,
|
||||
AslrRegionBaseAddr = 12,
|
||||
AslrRegionSize = 13,
|
||||
StackRegionBaseAddr = 14,
|
||||
StackRegionSize = 15,
|
||||
// 3.0.0+
|
||||
@ -965,11 +983,11 @@ namespace skyline::kernel::svc {
|
||||
out = util::GetTimeTicks();
|
||||
break;
|
||||
|
||||
case InfoState::AddressSpaceBaseAddr:
|
||||
case InfoState::AslrRegionBaseAddr:
|
||||
out = reinterpret_cast<u64>(state.process->memory.base.data());
|
||||
break;
|
||||
|
||||
case InfoState::AddressSpaceSize:
|
||||
case InfoState::AslrRegionSize:
|
||||
out = state.process->memory.base.size();
|
||||
break;
|
||||
|
||||
@ -1019,93 +1037,64 @@ namespace skyline::kernel::svc {
|
||||
}
|
||||
|
||||
void MapPhysicalMemory(const DeviceState &state) {
|
||||
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
|
||||
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
|
||||
size_t size{state.ctx->gpr.x1};
|
||||
|
||||
if (!util::IsPageAligned(pointer)) {
|
||||
Logger::Warn("Pointer 0x{:X} is not page aligned", pointer);
|
||||
if (!util::IsPageAligned(address)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
||||
Logger::Warn("'address' not page aligned: 0x{:X}", address);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!size || !util::IsPageAligned(size)) {
|
||||
Logger::Warn("Size 0x{:X} is not page aligned", size);
|
||||
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidSize;
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!state.process->memory.alias.contains(span<u8>{pointer, size})) {
|
||||
Logger::Warn("Memory region 0x{:X} - 0x{:X} (0x{:X}) is invalid", pointer, pointer + size, size);
|
||||
if (address >= (address + size)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
|
||||
Logger::Warn("Invalid address and size combination: 'address': 0x{:X}, 'size': 0x{:X}", address, size);
|
||||
return;
|
||||
}
|
||||
|
||||
state.process->NewHandle<type::KPrivateMemory>(span<u8>{pointer, size}, memory::Permission{true, true, false}, memory::states::Heap);
|
||||
if (!state.process->memory.alias.contains(span<u8>{address, size})) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
|
||||
Logger::Warn("Tried to map physical memory outside of alias region: 0x{:X} - 0x{:X} (0x{:X} bytes)", address, address + size, size);
|
||||
return;
|
||||
}
|
||||
|
||||
Logger::Debug("Mapped physical memory at 0x{:X} - 0x{:X} (0x{:X})", pointer, pointer + size, size);
|
||||
state.process->memory.MapHeapMemory(span<u8>{address, size});
|
||||
|
||||
Logger::Debug("Mapped physical memory at 0x{:X} - 0x{:X} (0x{:X} bytes)", address, address + size, size);
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
}
|
||||
|
||||
void UnmapPhysicalMemory(const DeviceState &state) {
|
||||
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
|
||||
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
|
||||
size_t size{state.ctx->gpr.x1};
|
||||
|
||||
if (!util::IsPageAligned(pointer)) {
|
||||
Logger::Warn("Pointer 0x{:X} is not page aligned", pointer);
|
||||
if (!util::IsPageAligned(address)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
||||
Logger::Warn("'address' not page aligned: 0x{:X}", address);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!size || !util::IsPageAligned(size)) {
|
||||
Logger::Warn("Size 0x{:X} is not page aligned", size);
|
||||
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidSize;
|
||||
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!state.process->memory.alias.contains(span<u8>{pointer, size})) {
|
||||
Logger::Warn("Memory region 0x{:X} - 0x{:X} (0x{:X}) is invalid", pointer, pointer + size, size);
|
||||
if (!state.process->memory.alias.contains(span<u8>{address, size})) [[unlikely]] {
|
||||
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
|
||||
Logger::Warn("Tried to unmap physical memory outside of alias region: 0x{:X} - 0x{:X} (0x{:X} bytes)", address, address + size, size);
|
||||
return;
|
||||
}
|
||||
|
||||
Logger::Debug("Unmapped physical memory at 0x{:X} - 0x{:X} (0x{:X})", pointer, pointer + size, size);
|
||||
|
||||
auto end{pointer + size};
|
||||
while (pointer < end) {
|
||||
auto chunk{state.process->memory.Get(pointer)};
|
||||
if (chunk && chunk->memory) {
|
||||
if (chunk->memory->objectType != type::KType::KPrivateMemory)
|
||||
throw exception("Trying to unmap non-private memory");
|
||||
|
||||
auto memory{static_cast<type::KPrivateMemory *>(chunk->memory)};
|
||||
auto initialSize{memory->guest.size()};
|
||||
if (memory->memoryState == memory::states::Heap) {
|
||||
if (memory->guest.data() >= pointer) {
|
||||
if (memory->guest.size() <= size) {
|
||||
memory->Resize(0);
|
||||
state.process->CloseHandle(memory->handle);
|
||||
} else {
|
||||
memory->Remap(span<u8>{pointer + size, static_cast<size_t>((pointer + memory->guest.size() - memory->guest.data())) - size});
|
||||
}
|
||||
} else if (memory->guest.data() < pointer) {
|
||||
memory->Resize(static_cast<size_t>(pointer - memory->guest.data()));
|
||||
|
||||
if (memory->guest.data() + initialSize > end)
|
||||
state.process->NewHandle<type::KPrivateMemory>(span<u8>{end, static_cast<size_t>(memory->guest.data() + initialSize - end)}, memory::Permission{true, true, false}, memory::states::Heap);
|
||||
}
|
||||
}
|
||||
pointer += initialSize;
|
||||
size -= initialSize;
|
||||
} else {
|
||||
auto block{*state.process->memory.Get(pointer)};
|
||||
pointer += block.size;
|
||||
size -= block.size;
|
||||
}
|
||||
}
|
||||
|
||||
state.process->memory.FreeMemory(std::span<u8>(pointer, size));
|
||||
state.process->memory.UnmapMemory(span<u8>{address, size});
|
||||
|
||||
Logger::Debug("Unmapped physical memory at 0x{:X} - 0x{:X} (0x{:X} bytes)", address, address + size, size);
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
}
|
||||
|
||||
|
@ -17,15 +17,7 @@ namespace skyline::kernel::type {
|
||||
/**
|
||||
* @return A span representing the memory object on the guest
|
||||
*/
|
||||
span <u8> guest;
|
||||
|
||||
/**
|
||||
* @brief Updates the permissions of a block of mapped memory
|
||||
* @param ptr The starting address to change the permissions at
|
||||
* @param size The size of the partition to change the permissions of
|
||||
* @param permission The new permissions to be set for the memory
|
||||
*/
|
||||
virtual void UpdatePermission(span <u8> map, memory::Permission permission) = 0;
|
||||
span<u8> guest;
|
||||
|
||||
virtual ~KMemory() = default;
|
||||
};
|
||||
|
@ -14,7 +14,6 @@ namespace skyline::kernel::type {
|
||||
KProcess,
|
||||
KSharedMemory,
|
||||
KTransferMemory,
|
||||
KPrivateMemory,
|
||||
KSession,
|
||||
KEvent,
|
||||
};
|
||||
|
@ -1,96 +0,0 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
|
||||
|
||||
#include <android/sharedmem.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <unistd.h>
|
||||
#include "KPrivateMemory.h"
|
||||
#include "KProcess.h"
|
||||
|
||||
namespace skyline::kernel::type {
|
||||
KPrivateMemory::KPrivateMemory(const DeviceState &state, KHandle handle, span<u8> guest, memory::Permission permission, memory::MemoryState memState)
|
||||
: permission(permission),
|
||||
memoryState(memState),
|
||||
handle(handle),
|
||||
KMemory(state, KType::KPrivateMemory, guest) {
|
||||
if (!state.process->memory.AddressSpaceContains(guest))
|
||||
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", guest.data(), guest.data() + guest.size());
|
||||
if (!util::IsPageAligned(guest.data()) || !util::IsPageAligned(guest.size()))
|
||||
throw exception("KPrivateMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", guest.data(), guest.data() + guest.size(), guest.size());
|
||||
|
||||
if (mprotect(guest.data(), guest.size(), PROT_READ | PROT_WRITE | PROT_EXEC) < 0) // We only need to reprotect as the allocation has already been reserved by the MemoryManager
|
||||
throw exception("An occurred while mapping private memory: {} with 0x{:X} @ 0x{:X}", strerror(errno), guest.data(), guest.size());
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = guest.data(),
|
||||
.size = guest.size(),
|
||||
.permission = permission,
|
||||
.state = memState,
|
||||
.memory = this,
|
||||
});
|
||||
}
|
||||
|
||||
void KPrivateMemory::Resize(size_t nSize) {
|
||||
if (mprotect(guest.data(), nSize, PROT_READ | PROT_WRITE | PROT_EXEC) < 0)
|
||||
throw exception("An occurred while resizing private memory: {}", strerror(errno));
|
||||
|
||||
if (nSize < guest.size()) {
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = guest.data() + nSize,
|
||||
.size = guest.size() - nSize,
|
||||
.state = memory::states::Unmapped,
|
||||
});
|
||||
} else if (guest.size() < nSize) {
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = guest.data() + guest.size(),
|
||||
.size = nSize - guest.size(),
|
||||
.permission = permission,
|
||||
.state = memoryState,
|
||||
.memory = this,
|
||||
});
|
||||
}
|
||||
guest = span<u8>{guest.data(), nSize};
|
||||
}
|
||||
|
||||
void KPrivateMemory::Remap(span<u8> map) {
|
||||
if (!state.process->memory.AddressSpaceContains(map))
|
||||
throw exception("KPrivateMemory remapping isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
|
||||
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size()))
|
||||
throw exception("KPrivateMemory remapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size());
|
||||
|
||||
if (mprotect(guest.data(), guest.size(), PROT_NONE) < 0)
|
||||
throw exception("An occurred while remapping private memory: {}", strerror(errno));
|
||||
|
||||
if (mprotect(map.data(), map.size(), PROT_READ | PROT_WRITE | PROT_EXEC) < 0)
|
||||
throw exception("An occurred while remapping private memory: {}", strerror(errno));
|
||||
}
|
||||
|
||||
void KPrivateMemory::UpdatePermission(span<u8> map, memory::Permission pPermission) {
|
||||
auto ptr{std::clamp(map.data(), guest.data(), guest.end().base())};
|
||||
auto size{std::min(map.size(), static_cast<size_t>((guest.end().base()) - ptr))};
|
||||
|
||||
if (ptr && !util::IsPageAligned(ptr))
|
||||
throw exception("KPrivateMemory permission updated with a non-page-aligned address: 0x{:X}", ptr);
|
||||
|
||||
// If a static code region has been mapped as writable it needs to be changed to mutable
|
||||
if (memoryState == memory::states::CodeStatic && pPermission.w)
|
||||
memoryState = memory::states::CodeMutable;
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = ptr,
|
||||
.size = size,
|
||||
.permission = pPermission,
|
||||
.state = memoryState,
|
||||
.memory = this,
|
||||
});
|
||||
}
|
||||
|
||||
KPrivateMemory::~KPrivateMemory() {
|
||||
mprotect(guest.data(), guest.size(), PROT_NONE);
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = guest.data(),
|
||||
.size = guest.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
});
|
||||
}
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "KMemory.h"
|
||||
|
||||
namespace skyline::kernel::type {
|
||||
/**
|
||||
* @brief KPrivateMemory is used to map memory local to the guest process
|
||||
* @note This does not reflect a kernel object in Horizon OS, it is an abstraction which makes things simpler to manage in Skyline instead
|
||||
*/
|
||||
class KPrivateMemory : public KMemory {
|
||||
public:
|
||||
memory::Permission permission;
|
||||
memory::MemoryState memoryState;
|
||||
KHandle handle;
|
||||
|
||||
/**
|
||||
* @param permission The permissions for the allocated memory (As reported to the application, host memory permissions aren't reflected by this)
|
||||
* @note 'ptr' needs to be in guest-reserved address space
|
||||
*/
|
||||
KPrivateMemory(const DeviceState &state, KHandle handle, span<u8> guest, memory::Permission permission, memory::MemoryState memState);
|
||||
|
||||
/**
|
||||
* @note There is no check regarding if any expansions will cause the memory mapping to leak into other mappings
|
||||
* @note Any extensions will have the same permissions and memory state as the initial mapping as opposed to extending the end
|
||||
*/
|
||||
void Resize(size_t size);
|
||||
|
||||
/**
|
||||
* @note This does not copy over anything, only contents of any overlapping regions will be retained
|
||||
*/
|
||||
void Remap(span<u8> map);
|
||||
|
||||
void UpdatePermission(span<u8> map, memory::Permission pPermission) override;
|
||||
|
||||
/**
|
||||
* @brief The destructor of private memory, it deallocates the memory
|
||||
*/
|
||||
~KPrivateMemory();
|
||||
};
|
||||
}
|
@ -8,12 +8,12 @@
|
||||
#include "KProcess.h"
|
||||
|
||||
namespace skyline::kernel::type {
|
||||
KProcess::TlsPage::TlsPage(std::shared_ptr<KPrivateMemory> memory) : memory(std::move(memory)) {}
|
||||
KProcess::TlsPage::TlsPage(u8 *memory) : memory(memory) {}
|
||||
|
||||
u8 *KProcess::TlsPage::ReserveSlot() {
|
||||
if (index == constant::TlsSlots)
|
||||
return nullptr;
|
||||
return memory->guest.data() + (constant::TlsSlotSize * index++);
|
||||
return memory + (constant::TlsSlotSize * index++);
|
||||
}
|
||||
|
||||
KProcess::KProcess(const DeviceState &state) : memory(state), KSyncObject(state, KType::KProcess) {}
|
||||
@ -26,7 +26,7 @@ namespace skyline::kernel::type {
|
||||
}
|
||||
|
||||
void KProcess::Kill(bool join, bool all, bool disableCreation) {
|
||||
Logger::Warn("Killing {}{}KProcess{}", join ? "and joining " : "", all ? "all threads in " : "HOS-0 in ", disableCreation ? " with new thread creation disabled" : "");
|
||||
Logger::Warn("Killing {}{}KProcess{}", join ? "and joining " : "", all ? "all threads in " : "HOS-1 in ", disableCreation ? " with new thread creation disabled" : "");
|
||||
Logger::EmulationContext.Flush();
|
||||
|
||||
bool expected{false};
|
||||
@ -49,8 +49,8 @@ namespace skyline::kernel::type {
|
||||
|
||||
void KProcess::InitializeHeapTls() {
|
||||
constexpr size_t DefaultHeapSize{0x200000};
|
||||
heap = std::make_shared<KPrivateMemory>(state, 0, span<u8>{state.process->memory.heap.data(), DefaultHeapSize}, memory::Permission{true, true, false}, memory::states::Heap);
|
||||
InsertItem(heap); // Insert it into the handle table so GetMemoryObject will contain it
|
||||
memory.MapHeapMemory(span<u8>{state.process->memory.heap.data(), DefaultHeapSize});
|
||||
memory.setHeapSize = DefaultHeapSize;
|
||||
tlsExceptionContext = AllocateTlsSlot();
|
||||
}
|
||||
|
||||
@ -61,8 +61,26 @@ namespace skyline::kernel::type {
|
||||
if ((slot = tlsPage->ReserveSlot()))
|
||||
return slot;
|
||||
|
||||
slot = tlsPages.empty() ? reinterpret_cast<u8 *>(memory.tlsIo.data()) : ((*(tlsPages.end() - 1))->memory->guest.data() + constant::PageSize);
|
||||
auto tlsPage{std::make_shared<TlsPage>(std::make_shared<KPrivateMemory>(state, 0, span<u8>{slot, constant::PageSize}, memory::Permission(true, true, false), memory::states::ThreadLocal))};
|
||||
bool isAllocated{};
|
||||
|
||||
u8 *pageCandidate{state.process->memory.tlsIo.data()};
|
||||
std::pair<u8 *, ChunkDescriptor> chunk;
|
||||
while (state.process->memory.tlsIo.contains(span<u8>(pageCandidate, constant::PageSize))) {
|
||||
chunk = memory.GetChunk(pageCandidate).value();
|
||||
|
||||
if (chunk.second.state == memory::states::Unmapped) {
|
||||
memory.MapThreadLocalMemory(span<u8>{pageCandidate, constant::PageSize});
|
||||
isAllocated = true;
|
||||
break;
|
||||
} else {
|
||||
pageCandidate = chunk.first + chunk.second.size;
|
||||
}
|
||||
}
|
||||
|
||||
if (!isAllocated) [[unlikely]]
|
||||
throw exception("Failed to find free memory for a tls slot!");
|
||||
|
||||
auto tlsPage{std::make_shared<TlsPage>(pageCandidate)};
|
||||
tlsPages.push_back(tlsPage);
|
||||
return tlsPage->ReserveSlot();
|
||||
}
|
||||
@ -72,8 +90,27 @@ namespace skyline::kernel::type {
|
||||
if (disableThreadCreation)
|
||||
return nullptr;
|
||||
if (!stackTop && threads.empty()) { //!< Main thread stack is created by the kernel and owned by the process
|
||||
mainThreadStack = std::make_shared<KPrivateMemory>(state, 0, span<u8>{state.process->memory.stack.data(), state.process->npdm.meta.mainThreadStackSize}, memory::Permission{true, true, false}, memory::states::Stack);
|
||||
stackTop = mainThreadStack->guest.end().base();
|
||||
bool isAllocated{};
|
||||
|
||||
u8 *pageCandidate{memory.stack.data()};
|
||||
std::pair<u8 *, ChunkDescriptor> chunk;
|
||||
while (state.process->memory.stack.contains(span<u8>(pageCandidate, state.process->npdm.meta.mainThreadStackSize))) {
|
||||
chunk = memory.GetChunk(pageCandidate).value();
|
||||
|
||||
if (chunk.second.state == memory::states::Unmapped && chunk.second.size >= state.process->npdm.meta.mainThreadStackSize) {
|
||||
memory.MapStackMemory(span<u8>{pageCandidate, state.process->npdm.meta.mainThreadStackSize});
|
||||
isAllocated = true;
|
||||
break;
|
||||
} else {
|
||||
pageCandidate = chunk.first + chunk.second.size;
|
||||
}
|
||||
}
|
||||
|
||||
if (!isAllocated) [[unlikely]]
|
||||
throw exception("Failed to map main thread stack!");
|
||||
|
||||
stackTop = pageCandidate + state.process->npdm.meta.mainThreadStackSize;
|
||||
mainThreadStack = span<u8>(pageCandidate, state.process->npdm.meta.mainThreadStackSize);
|
||||
}
|
||||
size_t tid{threads.size() + 1}; //!< The first thread is HOS-1 rather than HOS-0, this is to match the HOS kernel's behaviour
|
||||
auto thread{NewHandle<KThread>(this, tid, entry, argument, stackTop, priority ? *priority : state.process->npdm.meta.mainThreadPriority, idealCore ? *idealCore : state.process->npdm.meta.idealCore).item};
|
||||
@ -81,29 +118,6 @@ namespace skyline::kernel::type {
|
||||
return thread;
|
||||
}
|
||||
|
||||
std::optional<KProcess::HandleOut<KMemory>> KProcess::GetMemoryObject(u8 *ptr) {
|
||||
std::shared_lock lock(handleMutex);
|
||||
|
||||
for (KHandle index{}; index < handles.size(); index++) {
|
||||
auto &object{handles[index]};
|
||||
if (object) {
|
||||
switch (object->objectType) {
|
||||
case type::KType::KPrivateMemory:
|
||||
case type::KType::KSharedMemory:
|
||||
case type::KType::KTransferMemory: {
|
||||
auto mem{std::static_pointer_cast<type::KMemory>(object)};
|
||||
if (mem->guest.contains(ptr))
|
||||
return std::make_optional<KProcess::HandleOut<KMemory>>({mem, constant::BaseHandleIndex + index});
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void KProcess::ClearHandleTable() {
|
||||
std::shared_lock lock(handleMutex);
|
||||
handles.clear();
|
||||
|
@ -42,9 +42,9 @@ namespace skyline {
|
||||
*/
|
||||
struct TlsPage {
|
||||
u8 index{}; //!< The slots are assigned sequentially, this holds the index of the last TLS slot reserved
|
||||
std::shared_ptr<KPrivateMemory> memory; //!< A single page sized memory allocation for this TLS page
|
||||
u8 *memory; //!< A single page sized memory allocation for this TLS page
|
||||
|
||||
TlsPage(std::shared_ptr<KPrivateMemory> memory);
|
||||
TlsPage(u8 *memory);
|
||||
|
||||
/**
|
||||
* @return A non-null pointer to a TLS page slot on success, a nullptr will be returned if this page is full
|
||||
@ -57,10 +57,8 @@ namespace skyline {
|
||||
u8 *tlsExceptionContext{}; //!< A pointer to the TLS exception handling context slot
|
||||
std::mutex tlsMutex; //!< A mutex to synchronize allocation of TLS pages to prevent extra pages from being created
|
||||
std::vector<std::shared_ptr<TlsPage>> tlsPages; //!< All TLS pages allocated by this process
|
||||
std::shared_ptr<KPrivateMemory> mainThreadStack; //!< The stack memory of the main thread stack is owned by the KProcess itself
|
||||
std::shared_ptr<KPrivateMemory> heap;
|
||||
vfs::NPDM npdm;
|
||||
|
||||
span<u8> mainThreadStack;
|
||||
private:
|
||||
std::shared_mutex handleMutex;
|
||||
std::vector<std::shared_ptr<KObject>> handles;
|
||||
@ -117,7 +115,7 @@ namespace skyline {
|
||||
std::unique_lock lock(handleMutex);
|
||||
|
||||
std::shared_ptr<objectClass> item;
|
||||
if constexpr (std::is_same<objectClass, KThread>() || std::is_same<objectClass, KPrivateMemory>())
|
||||
if constexpr (std::is_same<objectClass, KThread>())
|
||||
item = std::make_shared<objectClass>(state, constant::BaseHandleIndex + handles.size(), args...);
|
||||
else
|
||||
item = std::make_shared<objectClass>(state, args...);
|
||||
@ -156,8 +154,6 @@ namespace skyline {
|
||||
objectType = KType::KSharedMemory;
|
||||
} else if constexpr(std::is_same<objectClass, KTransferMemory>()) {
|
||||
objectType = KType::KTransferMemory;
|
||||
} else if constexpr(std::is_same<objectClass, KPrivateMemory>()) {
|
||||
objectType = KType::KPrivateMemory;
|
||||
} else if constexpr(std::is_same<objectClass, KSession>()) {
|
||||
objectType = KType::KSession;
|
||||
} else if constexpr(std::is_same<objectClass, KEvent>()) {
|
||||
@ -188,13 +184,6 @@ namespace skyline {
|
||||
throw std::out_of_range(fmt::format("GetHandle was called with a deleted handle: 0x{:X}", handle));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Retrieves a kernel memory object that owns the specified address
|
||||
* @param address The address to look for
|
||||
* @return A shared pointer to the corresponding KMemory object
|
||||
*/
|
||||
std::optional<HandleOut<KMemory>> GetMemoryObject(u8 *ptr);
|
||||
|
||||
/**
|
||||
* @brief Closes a handle in the handle table
|
||||
*/
|
||||
|
@ -8,122 +8,66 @@
|
||||
#include "KProcess.h"
|
||||
|
||||
namespace skyline::kernel::type {
|
||||
KSharedMemory::KSharedMemory(const DeviceState &state, size_t size, memory::MemoryState memState, KType type)
|
||||
: memoryState(memState),
|
||||
KMemory(state, type, span<u8>{}) {
|
||||
KSharedMemory::KSharedMemory(const DeviceState &state, size_t size, KType type)
|
||||
: KMemory(state, type, span<u8>{}) {
|
||||
fd = ASharedMemory_create(type == KType::KSharedMemory ? "HOS-KSharedMemory" : "HOS-KTransferMemory", size);
|
||||
if (fd < 0)
|
||||
if (fd < 0) [[unlikely]]
|
||||
throw exception("An error occurred while creating shared memory: {}", fd);
|
||||
|
||||
auto hostPtr{static_cast<u8 *>(mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED, fd, 0))};
|
||||
if (hostPtr == MAP_FAILED)
|
||||
u8 *hostPtr{static_cast<u8 *>(mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0))};
|
||||
if (hostPtr == MAP_FAILED) [[unlikely]]
|
||||
throw exception("An occurred while mapping shared memory: {}", strerror(errno));
|
||||
|
||||
host = span<u8>{hostPtr, size};
|
||||
}
|
||||
|
||||
u8 *KSharedMemory::Map(span<u8> map, memory::Permission permission) {
|
||||
if (!state.process->memory.AddressSpaceContains(map))
|
||||
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
|
||||
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size()))
|
||||
if (!state.process->memory.AddressSpaceContains(map)) [[unlikely]]
|
||||
throw exception("KSharedMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
|
||||
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size())) [[unlikely]]
|
||||
throw exception("KSharedMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size());
|
||||
if (guest.valid())
|
||||
if (guest.valid()) [[unlikely]]
|
||||
throw exception("Mapping KSharedMemory multiple times on guest is not supported: Requested Mapping: 0x{:X} - 0x{:X} (0x{:X}), Current Mapping: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size(), guest.data(), guest.end().base(), guest.size());
|
||||
|
||||
auto guestPtr{static_cast<u8 *>(mmap(map.data(), map.size(), permission.Get(), MAP_SHARED | (map.data() ? MAP_FIXED : 0), fd, 0))};
|
||||
if (guestPtr == MAP_FAILED)
|
||||
if (guestPtr == MAP_FAILED) [[unlikely]]
|
||||
throw exception("An error occurred while mapping shared memory in guest: {}", strerror(errno));
|
||||
guest = span<u8>{guestPtr, map.size()};
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = guest.data(),
|
||||
.size = guest.size(),
|
||||
.permission = permission,
|
||||
.state = memoryState,
|
||||
.attributes = memory::MemoryAttribute{
|
||||
.isBorrowed = objectType == KType::KTransferMemory,
|
||||
},
|
||||
.memory = this
|
||||
});
|
||||
if (objectType == KType::KTransferMemory) {
|
||||
state.process->memory.MapTransferMemory(guest, permission);
|
||||
state.process->memory.SetLockOnChunks(guest, true);
|
||||
} else {
|
||||
state.process->memory.MapSharedMemory(guest, permission);
|
||||
}
|
||||
|
||||
return guest.data();
|
||||
}
|
||||
|
||||
void KSharedMemory::Unmap(span<u8> map) {
|
||||
auto &memoryManager{state.process->memory};
|
||||
if (!memoryManager.AddressSpaceContains(map))
|
||||
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
|
||||
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size()))
|
||||
throw exception("KSharedMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size());
|
||||
if (guest.data() != map.data() && guest.size() != map.size())
|
||||
if (!memoryManager.AddressSpaceContains(map)) [[unlikely]]
|
||||
throw exception("KSharedMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
|
||||
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size())) [[unlikely]]
|
||||
throw exception("KSharedMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} ({} bytes)", map.data(), map.end().base(), map.size());
|
||||
if (guest.data() != map.data() && guest.size() != map.size()) [[unlikely]]
|
||||
throw exception("Unmapping KSharedMemory partially is not supported: Requested Unmap: 0x{:X} - 0x{:X} (0x{:X}), Current Mapping: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size(), guest.data(), guest.end().base(), guest.size());
|
||||
|
||||
if (mmap(map.data(), map.size(), PROT_NONE, MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
|
||||
if (mmap(map.data(), map.size(), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED) [[unlikely]]
|
||||
throw exception("An error occurred while unmapping shared memory in guest: {}", strerror(errno));
|
||||
|
||||
guest = span<u8>{};
|
||||
memoryManager.InsertChunk(ChunkDescriptor{
|
||||
.ptr = map.data(),
|
||||
.size = map.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
});
|
||||
}
|
||||
|
||||
void KSharedMemory::UpdatePermission(span<u8> map, memory::Permission permission) {
|
||||
if (map.valid() && !util::IsPageAligned(map.data()))
|
||||
throw exception("KSharedMemory permission updated with a non-page-aligned address: 0x{:X}", map.data());
|
||||
|
||||
if (guest.valid()) {
|
||||
mprotect(map.data(), map.size(), permission.Get());
|
||||
if (guest.data() == MAP_FAILED)
|
||||
throw exception("An error occurred while updating shared memory's permissions in guest: {}", strerror(errno));
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = map.data(),
|
||||
.size = map.size(),
|
||||
.permission = permission,
|
||||
.state = memoryState,
|
||||
.attributes = memory::MemoryAttribute{
|
||||
.isBorrowed = objectType == KType::KTransferMemory,
|
||||
},
|
||||
.memory = this
|
||||
});
|
||||
}
|
||||
memoryManager.UnmapMemory(map);
|
||||
}
|
||||
|
||||
KSharedMemory::~KSharedMemory() {
|
||||
if (state.process && guest.valid()) {
|
||||
auto &memoryManager{state.process->memory};
|
||||
if (objectType != KType::KTransferMemory) {
|
||||
if (mmap(guest.data(), guest.size(), PROT_NONE, MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
|
||||
Logger::Warn("An error occurred while unmapping shared memory: {}", strerror(errno));
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = guest.data(),
|
||||
.size = guest.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
});
|
||||
} else {
|
||||
// KTransferMemory remaps the region with R/W permissions during destruction
|
||||
constexpr memory::Permission UnborrowPermission{true, true, false};
|
||||
if (mmap(guest.data(), guest.size(), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED) [[unlikely]]
|
||||
Logger::Warn("An error occurred while unmapping shared memory: {}", strerror(errno));
|
||||
|
||||
if (mmap(guest.data(), guest.size(), UnborrowPermission.Get(), MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
|
||||
Logger::Warn("An error occurred while remapping transfer memory: {}", strerror(errno));
|
||||
else if (!host.valid())
|
||||
Logger::Warn("Expected host mapping of transfer memory to be valid during KTransferMemory destruction");
|
||||
guest.copy_from(host);
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = guest.data(),
|
||||
.size = guest.size(),
|
||||
.permission = UnborrowPermission,
|
||||
.state = memoryState,
|
||||
.attributes = memory::MemoryAttribute{
|
||||
.isBorrowed = false,
|
||||
},
|
||||
.memory = this
|
||||
});
|
||||
}
|
||||
state.process->memory.UnmapMemory(guest);
|
||||
}
|
||||
|
||||
if (host.valid())
|
||||
|
@ -12,12 +12,11 @@ namespace skyline::kernel::type {
|
||||
class KSharedMemory : public KMemory {
|
||||
private:
|
||||
int fd; //!< A file descriptor to the underlying shared memory
|
||||
memory::MemoryState memoryState; //!< The state of the memory as supplied initially, this is retained for any mappings
|
||||
|
||||
public:
|
||||
span<u8> host; //!< We also keep a host mirror of the underlying shared memory for host access, it is persistently mapped and should be used by anything accessing the memory on the host
|
||||
|
||||
KSharedMemory(const DeviceState &state, size_t size, memory::MemoryState memState = memory::states::SharedMemory, KType type = KType::KSharedMemory);
|
||||
KSharedMemory(const DeviceState &state, size_t size, KType type = KType::KSharedMemory);
|
||||
|
||||
/**
|
||||
* @note 'ptr' needs to be in guest-reserved address space
|
||||
@ -29,8 +28,6 @@ namespace skyline::kernel::type {
|
||||
*/
|
||||
void Unmap(span<u8> map);
|
||||
|
||||
void UpdatePermission(span<u8> map, memory::Permission permission) override;
|
||||
|
||||
/**
|
||||
* @brief The destructor of shared memory, it deallocates the memory from all processes
|
||||
*/
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <common/signal.h>
|
||||
#include <common/spin_lock.h>
|
||||
#include "KSyncObject.h"
|
||||
#include "KPrivateMemory.h"
|
||||
#include "KSharedMemory.h"
|
||||
|
||||
namespace skyline {
|
||||
|
@ -15,8 +15,8 @@ namespace skyline::kernel::type {
|
||||
/**
|
||||
* @note 'ptr' needs to be in guest-reserved address space
|
||||
*/
|
||||
KTransferMemory(const DeviceState &state, u8 *ptr, size_t size, memory::Permission permission, memory::MemoryState memState = memory::states::TransferMemory)
|
||||
: KSharedMemory(state, size, memState, KType::KTransferMemory) {
|
||||
KTransferMemory(const DeviceState &state, u8 *ptr, size_t size, memory::Permission permission)
|
||||
: KSharedMemory(state, size, KType::KTransferMemory) {
|
||||
std::memcpy(host.data(), ptr, size);
|
||||
Map(span<u8>{ptr, size}, permission);
|
||||
}
|
||||
|
@ -89,20 +89,24 @@ namespace skyline::loader {
|
||||
hookSize = util::AlignUp(state.nce->GetHookSectionSize(executableSymbols), PAGE_SIZE);
|
||||
}
|
||||
|
||||
auto patchType{process->memory.addressSpaceType == memory::AddressSpaceType::AddressSpace36Bit ? memory::states::Heap : memory::states::Reserved};
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{base, patch.size + hookSize}, memory::Permission{false, false, false}, patchType); // ---
|
||||
if (process->memory.addressSpaceType == memory::AddressSpaceType::AddressSpace36Bit) {
|
||||
process->memory.MapHeapMemory(span<u8>{base, patch.size + hookSize}); // ---
|
||||
process->memory.SetChunkPermission(span<u8>{base, patch.size + hookSize}, memory::Permission{false, false, false});
|
||||
} else {
|
||||
process->memory.Reserve(span<u8>{base, patch.size + hookSize}); // ---
|
||||
}
|
||||
Logger::Debug("Successfully mapped section .patch @ 0x{:X}, Size = 0x{:X}", base, patch.size);
|
||||
if (hookSize > 0)
|
||||
Logger::Debug("Successfully mapped section .hook @ 0x{:X}, Size = 0x{:X}", base + patch.size, hookSize);
|
||||
|
||||
u8 *executableBase{base + patch.size + hookSize};
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{executableBase + executable.text.offset, textSize}, memory::Permission{true, false, true}, memory::states::CodeStatic); // R-X
|
||||
process->memory.MapCodeMemory(span<u8>{executableBase + executable.text.offset, textSize}, memory::Permission{true, false, true}); // R-X
|
||||
Logger::Debug("Successfully mapped section .text @ 0x{:X}, Size = 0x{:X}", executableBase, textSize);
|
||||
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{executableBase + executable.ro.offset, roSize}, memory::Permission{true, false, false}, memory::states::CodeStatic); // R--
|
||||
process->memory.MapCodeMemory(span<u8>{executableBase + executable.ro.offset, roSize}, memory::Permission{true, false, false}); // R--
|
||||
Logger::Debug("Successfully mapped section .rodata @ 0x{:X}, Size = 0x{:X}", executableBase + executable.ro.offset, roSize);
|
||||
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{executableBase + executable.data.offset, dataSize}, memory::Permission{true, true, false}, memory::states::CodeMutable); // RW-
|
||||
process->memory.MapMutableCodeMemory(span<u8>{executableBase + executable.data.offset, dataSize}); // RW-
|
||||
Logger::Debug("Successfully mapped section .data + .bss @ 0x{:X}, Size = 0x{:X}", executableBase + executable.data.offset, dataSize);
|
||||
|
||||
size_t size{patch.size + hookSize + textSize + roSize + dataSize};
|
||||
|
@ -71,8 +71,8 @@ namespace skyline::service::ro {
|
||||
if (state.process->memory.heap.contains(ptr) || state.process->memory.alias.contains(ptr))
|
||||
continue;
|
||||
|
||||
auto desc{state.process->memory.Get(ptr)};
|
||||
if (!desc || desc->state != memory::states::Unmapped || (static_cast<size_t>(ptr - desc->ptr) + size) < desc->size)
|
||||
auto desc{state.process->memory.GetChunk(ptr)};
|
||||
if (!desc || desc->second.state != memory::states::Unmapped || (static_cast<size_t>(ptr - desc->first) + size) < desc->second.size)
|
||||
continue;
|
||||
} while (!ptr);
|
||||
|
||||
@ -85,26 +85,21 @@ namespace skyline::service::ro {
|
||||
Result IRoInterface::UnloadModule(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
|
||||
Logger::Error("Module unloading is unimplemented!");
|
||||
return {};
|
||||
|
||||
}
|
||||
|
||||
Result IRoInterface::RegisterModuleInfo(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
|
||||
return {};
|
||||
|
||||
}
|
||||
|
||||
Result IRoInterface::UnregisterModuleInfo(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
|
||||
return {};
|
||||
|
||||
}
|
||||
|
||||
Result IRoInterface::RegisterProcessHandle(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
|
||||
return {};
|
||||
|
||||
}
|
||||
|
||||
Result IRoInterface::RegisterProcessModuleInfo(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
|
||||
return {};
|
||||
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user