This commit is contained in:
TheASVigilante 2023-05-13 15:32:34 +01:00 committed by GitHub
commit 4e38344a2c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 990 additions and 720 deletions

View File

@ -174,8 +174,9 @@ add_library(skyline SHARED
${source_DIR}/skyline/kernel/svc.cpp
${source_DIR}/skyline/kernel/types/KProcess.cpp
${source_DIR}/skyline/kernel/types/KThread.cpp
${source_DIR}/skyline/kernel/types/KTransferMemory.cpp
${source_DIR}/skyline/kernel/types/KSharedMemory.cpp
${source_DIR}/skyline/kernel/types/KPrivateMemory.cpp
${source_DIR}/skyline/kernel/types/KMemory.cpp
${source_DIR}/skyline/kernel/types/KSyncObject.cpp
${source_DIR}/skyline/audio.cpp
${source_DIR}/skyline/gpu.cpp

View File

@ -7,11 +7,150 @@
#include "types/KProcess.h"
namespace skyline::kernel {
MemoryManager::MemoryManager(const DeviceState &state) : state(state) {}
MemoryManager::MemoryManager(const DeviceState &state) noexcept : state{state}, processHeapSize{}, memRefs{} {}
MemoryManager::~MemoryManager() {
MemoryManager::~MemoryManager() noexcept {
if (base.valid() && !base.empty())
munmap(reinterpret_cast<void *>(base.data()), base.size());
if (addressSpaceType != memory::AddressSpaceType::AddressSpace39Bit)
if (codeBase36Bit.valid() && !codeBase36Bit.empty())
munmap(reinterpret_cast<void *>(codeBase36Bit.data()), codeBase36Bit.size());
}
void MemoryManager::MapInternal(const std::pair<u8 *, ChunkDescriptor> &newDesc) {
// The chunk that contains / precedes the new chunk base address
auto firstChunkBase{chunks.lower_bound(newDesc.first)};
if (newDesc.first <= firstChunkBase->first)
--firstChunkBase;
// The chunk that contains / follows the end address of the new chunk
auto lastChunkBase{chunks.lower_bound(newDesc.first + newDesc.second.size)};
if ((newDesc.first + newDesc.second.size) < lastChunkBase->first)
--lastChunkBase;
ChunkDescriptor firstChunk{firstChunkBase->second};
ChunkDescriptor lastChunk{lastChunkBase->second};
bool needsReprotection{false};
bool isUnmapping{newDesc.second.state == memory::states::Unmapped};
// We cut a hole in a single chunk
if (firstChunkBase->first == lastChunkBase->first) {
if (firstChunk.IsCompatible(newDesc.second)) [[unlikely]]
// No editing necessary
return;
if ((firstChunk.state == memory::states::Unmapped) != isUnmapping)
needsReprotection = true;
// We reduce the size of the first half
firstChunk.size = static_cast<size_t>(newDesc.first - firstChunkBase->first);
chunks[firstChunkBase->first] = firstChunk;
// We create the chunk's second half
lastChunk.size = static_cast<size_t>((lastChunkBase->first + lastChunk.size) - (newDesc.first + newDesc.second.size));
chunks.insert({newDesc.first + newDesc.second.size, lastChunk});
// Insert new chunk in between
chunks.insert(newDesc);
} else {
// If there are descriptors between first and last chunk, delete them
if ((firstChunkBase->first + firstChunk.size) != lastChunkBase->first) {
auto tempChunkBase{std::next(firstChunkBase)};
while (tempChunkBase->first != lastChunkBase->first) {
auto tmp{tempChunkBase++};
if ((tmp->second.state == memory::states::Unmapped) != isUnmapping)
needsReprotection = true;
}
chunks.erase(std::next(firstChunkBase), lastChunkBase);
}
bool shouldInsert{true};
// We check if the new chunk and the first chunk is mergable
if (firstChunk.IsCompatible(newDesc.second)) {
shouldInsert = false;
firstChunk.size = static_cast<size_t>((newDesc.first + newDesc.second.size) - firstChunkBase->first);
chunks[firstChunkBase->first] = firstChunk;
} else if ((firstChunkBase->first + firstChunk.size) != newDesc.first) { // If it's not mergable check if it needs resizing
firstChunk.size = static_cast<size_t>(newDesc.first - firstChunkBase->first);
chunks[firstChunkBase->first] = firstChunk;
if ((firstChunk.state == memory::states::Unmapped) != isUnmapping)
needsReprotection = true;
}
// We check if the new chunk and the last chunk is mergable
if (lastChunk.IsCompatible(newDesc.second)) {
u8 *oldBase{lastChunkBase->first};
chunks.erase(lastChunkBase);
if (shouldInsert) {
shouldInsert = false;
lastChunk.size = static_cast<size_t>((lastChunk.size + oldBase) - (newDesc.first));
chunks[newDesc.first] = lastChunk;
} else {
firstChunk.size = static_cast<size_t>((lastChunk.size + oldBase) - firstChunkBase->first);
chunks[firstChunkBase->first] = firstChunk;
}
} else if ((newDesc.first + newDesc.second.size) != lastChunkBase->first) { // If it's not mergable check if it needs resizing
lastChunk.size = static_cast<size_t>((lastChunk.size + lastChunkBase->first) - (newDesc.first + newDesc.second.size));
chunks.erase(lastChunkBase);
chunks[newDesc.first + newDesc.second.size] = lastChunk;
if ((lastChunk.state == memory::states::Unmapped) != isUnmapping)
needsReprotection = true;
}
// Insert if not merged
if (shouldInsert)
chunks.insert(newDesc);
}
if (needsReprotection)
if (mprotect(newDesc.first, newDesc.second.size, !isUnmapping ? PROT_READ | PROT_WRITE | PROT_EXEC : PROT_NONE)) [[unlikely]]
Logger::Warn("Reprotection failed: {}", strerror(errno));
}
void MemoryManager::ForeachChunkInRange(span<u8> memory, auto editCallback) {
auto chunkBase{chunks.lower_bound(memory.data())};
if (memory.data() < chunkBase->first)
--chunkBase;
size_t sizeLeft{memory.size()};
if (chunkBase->first < memory.data()) [[unlikely]] {
size_t chunkSize{std::min<size_t>(chunkBase->second.size - (static_cast<size_t>(memory.data() - chunkBase->first)), memory.size())};
std::pair<u8 *, ChunkDescriptor> temp{memory.data(), chunkBase->second};
temp.second.size = chunkSize;
editCallback(temp);
++chunkBase;
sizeLeft -= chunkSize;
}
while (sizeLeft) {
if (sizeLeft < chunkBase->second.size) {
std::pair<u8 *, ChunkDescriptor> temp(*chunkBase);
temp.second.size = sizeLeft;
editCallback(temp);
break;
} else [[likely]] {
std::pair<u8 *, ChunkDescriptor> temp(*chunkBase);
editCallback(temp);
sizeLeft = sizeLeft - chunkBase->second.size;
++chunkBase;
}
}
}
constexpr size_t RegionAlignment{1ULL << 21}; //!< The minimum alignment of a HOS memory region
@ -44,11 +183,11 @@ namespace skyline::kernel {
break;
} while ((line = maps.find_first_of('\n', line)) != std::string::npos && line++);
if (!region.valid())
if (!region.valid()) [[unlikely]]
throw exception("Allocation failed");
auto result{mmap(reinterpret_cast<void *>(region.data()), size, PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_SHARED, -1, 0)};
if (result == MAP_FAILED)
if (result == MAP_FAILED) [[unlikely]]
throw exception("Failed to mmap guest address space: {}", strerror(errno));
return region;
@ -64,8 +203,9 @@ namespace skyline::kernel {
throw exception("32-bit address spaces are not supported");
case memory::AddressSpaceType::AddressSpace36Bit: {
addressSpace = span<u8>{reinterpret_cast<u8 *>(0x8000000), (1ULL << 39) - 0x8000000};
baseSize = 0x180000000 + 0x78000000 + 0x180000000;
addressSpace = span<u8>{reinterpret_cast<u8 *>(0), (1ULL << 36)};
baseSize = 0x180000000 + 0x180000000;
break;
}
case memory::AddressSpaceType::AddressSpace39Bit: {
@ -80,71 +220,47 @@ namespace skyline::kernel {
// Qualcomm KGSL (Kernel Graphic Support Layer/Kernel GPU driver) maps below 35-bits, reserving it causes KGSL to go OOM
static constexpr size_t KgslReservedRegionSize{1ULL << 35};
base = AllocateMappedRange(baseSize, RegionAlignment, KgslReservedRegionSize, addressSpace.size(), false);
if (type != memory::AddressSpaceType::AddressSpace36Bit) {
base = AllocateMappedRange(baseSize, RegionAlignment, KgslReservedRegionSize, addressSpace.size(), false);
chunks = {
ChunkDescriptor{
.ptr = addressSpace.data(),
.size = static_cast<size_t>(base.data() - addressSpace.data()),
.state = memory::states::Reserved,
},
ChunkDescriptor{
.ptr = base.data(),
.size = base.size(),
.state = memory::states::Unmapped,
},
ChunkDescriptor{
.ptr = base.end().base(),
.size = addressSpace.size() - reinterpret_cast<u64>(base.end().base()),
.state = memory::states::Reserved,
}};
code = base;
} else {
base = AllocateMappedRange(baseSize, 1ULL << 36, KgslReservedRegionSize, addressSpace.size(), false);
codeBase36Bit = AllocateMappedRange(0x32000000, RegionAlignment, 0xC000000, 0x78000000ULL + reinterpret_cast<size_t>(addressSpace.data()), true);
code = codeBase36Bit = AllocateMappedRange(0x78000000, RegionAlignment, 0x8000000, KgslReservedRegionSize, false);
chunks = {
ChunkDescriptor{
.ptr = addressSpace.data(),
.size = static_cast<size_t>(codeBase36Bit.data() - addressSpace.data()),
.state = memory::states::Heap, // We can't use reserved here as rtld uses it to know when to halt memory walking
},
ChunkDescriptor{
.ptr = codeBase36Bit.data(),
.size = codeBase36Bit.size(),
.state = memory::states::Unmapped,
},
ChunkDescriptor{
.ptr = codeBase36Bit.end().base(),
.size = static_cast<u64>(base.data() - codeBase36Bit.end().base()),
.state = memory::states::Heap,
},
ChunkDescriptor{
.ptr = base.data(),
.size = base.size(),
.state = memory::states::Unmapped,
},
ChunkDescriptor{
.ptr = base.end().base(),
.size = addressSpace.size() - reinterpret_cast<u64>(base.end().base()),
.state = memory::states::Reserved,
}};
code = codeBase36Bit;
if ((reinterpret_cast<u64>(base.data()) + baseSize) > (1ULL << 36)) {
Logger::Warn("Couldn't fit regions into 36 bit AS! Resizing AS to 39 bits!");
addressSpace = span<u8>{reinterpret_cast<u8 *>(0), 1ULL << 39};
}
}
// Insert a placeholder element at the end of the map to make sure upper_bound/lower_bound never triggers std::map::end() which is broken
chunks = {{addressSpace.data(),{
.size = addressSpace.size(),
.state = memory::states::Unmapped,
}}, {reinterpret_cast<u8 *>(UINT64_MAX), {
.state = memory::states::Reserved,
}}};
}
void MemoryManager::InitializeRegions(span<u8> codeRegion) {
if (!util::IsAligned(codeRegion.data(), RegionAlignment))
if (!util::IsAligned(codeRegion.data(), RegionAlignment)) [[unlikely]]
throw exception("Non-aligned code region was used to initialize regions: 0x{:X} - 0x{:X}", codeRegion.data(), codeRegion.end().base());
switch (addressSpaceType) {
case memory::AddressSpaceType::AddressSpace36Bit: {
// Place code, stack and TLS/IO in the lower 36-bits of the host AS and heap past that
code = span<u8>{codeBase36Bit.data(), util::AlignUp(codeRegion.size(), RegionAlignment)};
stack = span<u8>{code.end().base(), codeBase36Bit.size() - code.size()};
// As a workaround if we can't place the code region at the base of the AS we mark it as inaccessible heap so rtld doesn't crash
if (codeBase36Bit.data() != reinterpret_cast<u8 *>(0x8000000)) {
MapInternal(std::pair<u8 *, ChunkDescriptor>(reinterpret_cast<u8 *>(0x8000000),{
.size = reinterpret_cast<size_t>(codeBase36Bit.data() - 0x8000000),
.state = memory::states::Heap
}));
}
// Place code, stack and TLS/IO in the lower 36-bits of the host AS and heap and alias past that
code = span<u8>{codeBase36Bit.data(), codeBase36Bit.data() + 0x70000000};
stack = span<u8>{codeBase36Bit.data(), codeBase36Bit.data() + 0x78000000};
tlsIo = stack; //!< TLS/IO is shared with Stack on 36-bit
alias = span<u8>{base.data(), 0x180000000};
heap = span<u8>{alias.end().base(), 0x180000000};
@ -157,6 +273,15 @@ namespace skyline::kernel {
heap = span<u8>{alias.end().base(), 0x180000000};
stack = span<u8>{heap.end().base(), 0x80000000};
tlsIo = span<u8>{stack.end().base(), 0x1000000000};
u64 newSize{code.size() + alias.size() + stack.size() + heap.size() + tlsIo.size()};
if (newSize > base.size()) [[unlikely]]
throw exception("Guest VMM size has exceeded host carveout size: 0x{:X}/0x{:X} (Code: 0x{:X}/0x{:X})", newSize, base.size(), code.size(), CodeRegionSize);
if (newSize != base.size()) [[likely]]
munmap(base.end().base(), newSize - base.size());
break;
}
@ -164,31 +289,25 @@ namespace skyline::kernel {
throw exception("Regions initialized without VMM initialization");
}
auto newSize{code.size() + alias.size() + stack.size() + heap.size() + ((addressSpaceType == memory::AddressSpaceType::AddressSpace39Bit) ? tlsIo.size() : 0)};
if (newSize > base.size())
throw exception("Guest VMM size has exceeded host carveout size: 0x{:X}/0x{:X} (Code: 0x{:X}/0x{:X})", newSize, base.size(), code.size(), CodeRegionSize);
if (newSize != base.size())
munmap(base.end().base(), newSize - base.size());
if (codeRegion.size() > code.size())
if (codeRegion.size() > code.size()) [[unlikely]]
throw exception("Code region ({}) is smaller than mapped code size ({})", code.size(), codeRegion.size());
Logger::Debug("Region Map:\nVMM Base: 0x{:X}\nCode Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nAlias Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nHeap Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nStack Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nTLS/IO Region: 0x{:X} - 0x{:X} (Size: 0x{:X})", base.data(), code.data(), code.end().base(), code.size(), alias.data(), alias.end().base(), alias.size(), heap.data(), heap.end().base(), heap.size(), stack.data(), stack.end().base(), stack.size(), tlsIo.data(), tlsIo.end().base(), tlsIo.size());
Logger::Debug("Region Map:\nVMM Base: 0x{:X}\nCode Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nAlias Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nHeap Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nStack Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nTLS/IO Region: 0x{:X} - 0x{:X} (Size: 0x{:X})", code.data(), code.data(), code.end().base(), code.size(), alias.data(), alias.end().base(), alias.size(), heap.data(), heap.end().base(), heap.size(), stack.data(), stack.end().base(), stack.size(), tlsIo.data(), tlsIo.end().base(), tlsIo.size());
}
span<u8> MemoryManager::CreateMirror(span<u8> mapping) {
if (!base.contains(mapping))
if (!base.contains(mapping)) [[unlikely]]
throw exception("Mapping is outside of VMM base: 0x{:X} - 0x{:X}", mapping.data(), mapping.end().base());
auto offset{static_cast<size_t>(mapping.data() - base.data())};
if (!util::IsPageAligned(offset) || !util::IsPageAligned(mapping.size()))
if (!util::IsPageAligned(offset) || !util::IsPageAligned(mapping.size())) [[unlikely]]
throw exception("Mapping is not aligned to a page: 0x{:X}-0x{:X} (0x{:X})", mapping.data(), mapping.end().base(), offset);
auto mirror{mremap(mapping.data(), 0, mapping.size(), MREMAP_MAYMOVE)};
if (mirror == MAP_FAILED)
if (mirror == MAP_FAILED) [[unlikely]]
throw exception("Failed to create mirror mapping at 0x{:X}-0x{:X} (0x{:X}): {}", mapping.data(), mapping.end().base(), offset, strerror(errno));
mprotect(mirror, mapping.size(), PROT_READ | PROT_WRITE | PROT_EXEC);
mprotect(mirror, mapping.size(), PROT_READ | PROT_WRITE);
return span<u8>{reinterpret_cast<u8 *>(mirror), mapping.size()};
}
@ -199,117 +318,257 @@ namespace skyline::kernel {
totalSize += region.size();
auto mirrorBase{mmap(nullptr, totalSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; // Reserve address space for all mirrors
if (mirrorBase == MAP_FAILED)
if (mirrorBase == MAP_FAILED) [[unlikely]]
throw exception("Failed to create mirror base: {} (0x{:X} bytes)", strerror(errno), totalSize);
size_t mirrorOffset{};
for (const auto &region : regions) {
if (!base.contains(region))
if (!base.contains(region)) [[unlikely]]
throw exception("Mapping is outside of VMM base: 0x{:X} - 0x{:X}", region.data(), region.end().base());
auto offset{static_cast<size_t>(region.data() - base.data())};
if (!util::IsPageAligned(offset) || !util::IsPageAligned(region.size()))
if (!util::IsPageAligned(offset) || !util::IsPageAligned(region.size())) [[unlikely]]
throw exception("Mapping is not aligned to a page: 0x{:X}-0x{:X} (0x{:X})", region.data(), region.end().base(), offset);
auto mirror{mremap(region.data(), 0, region.size(), MREMAP_FIXED | MREMAP_MAYMOVE, reinterpret_cast<u8 *>(mirrorBase) + mirrorOffset)};
if (mirror == MAP_FAILED)
if (mirror == MAP_FAILED) [[unlikely]]
throw exception("Failed to create mirror mapping at 0x{:X}-0x{:X} (0x{:X}): {}", region.data(), region.end().base(), offset, strerror(errno));
mprotect(mirror, region.size(), PROT_READ | PROT_WRITE | PROT_EXEC);
mprotect(mirror, region.size(), PROT_READ | PROT_WRITE);
mirrorOffset += region.size();
}
if (mirrorOffset != totalSize)
if (mirrorOffset != totalSize) [[unlikely]]
throw exception("Mirror size mismatch: 0x{:X} != 0x{:X}", mirrorOffset, totalSize);
return span<u8>{reinterpret_cast<u8 *>(mirrorBase), totalSize};
}
void MemoryManager::FreeMemory(span<u8> memory) {
void MemoryManager::SetRegionBorrowed(span<u8> memory, bool value) {
std::unique_lock lock{mutex};
ForeachChunkInRange(memory, [&](std::pair<u8 *, ChunkDescriptor> &desc) __attribute__((always_inline)) {
desc.second.attributes.isBorrowed = value;
MapInternal(desc);
});
}
void MemoryManager::SetRegionCpuCaching(span<u8> memory, bool value) {
std::unique_lock lock{mutex};
ForeachChunkInRange(memory, [&](std::pair<u8 *, ChunkDescriptor> &desc) __attribute__((always_inline)) {
desc.second.attributes.isUncached = value;
MapInternal(desc);
});
}
void MemoryManager::SetRegionPermission(span<u8> memory, memory::Permission permission) {
std::unique_lock lock{mutex};
ForeachChunkInRange(memory, [&](std::pair<u8 *, ChunkDescriptor> &desc) __attribute__((always_inline)) {
desc.second.permission = permission;
MapInternal(desc);
});
}
std::optional<std::pair<u8 *, ChunkDescriptor>> MemoryManager::GetChunk(u8 *addr) {
std::shared_lock lock{mutex};
if (!addressSpace.contains(addr)) [[unlikely]]
return std::nullopt;
auto chunkBase{chunks.lower_bound(addr)};
if (addr < chunkBase->first)
--chunkBase;
return std::make_optional(*chunkBase);
}
__attribute__((always_inline)) void MemoryManager::MapCodeMemory(span<u8> memory, memory::Permission permission) {
std::unique_lock lock{mutex};
MapInternal(std::pair<u8 *, ChunkDescriptor>(
memory.data(),{
.size = memory.size(),
.permission = permission,
.state = memory::states::Code
}));
}
__attribute__((always_inline)) void MemoryManager::MapMutableCodeMemory(span<u8> memory) {
std::unique_lock lock{mutex};
MapInternal(std::pair<u8 *, ChunkDescriptor>(
memory.data(),{
.size = memory.size(),
.permission = {true, true, false},
.state = memory::states::CodeMutable
}));
}
__attribute__((always_inline)) void MemoryManager::MapStackMemory(span<u8> memory) {
std::unique_lock lock{mutex};
MapInternal(std::pair<u8 *, ChunkDescriptor>(
memory.data(),{
.size = memory.size(),
.permission = {true, true, false},
.state = memory::states::Stack,
.isSrcMergeDisallowed = true
}));
}
__attribute__((always_inline)) void MemoryManager::MapHeapMemory(span<u8> memory) {
std::unique_lock lock{mutex};
MapInternal(std::pair<u8 *, ChunkDescriptor>(
memory.data(),{
.size = memory.size(),
.permission = {true, true, false},
.state = memory::states::Heap
}));
}
__attribute__((always_inline)) void MemoryManager::MapSharedMemory(span<u8> memory, memory::Permission permission) {
std::unique_lock lock{mutex};
MapInternal(std::pair<u8 *, ChunkDescriptor>(
memory.data(),{
.size = memory.size(),
.permission = permission,
.state = memory::states::SharedMemory,
.isSrcMergeDisallowed = true
}));
}
__attribute__((always_inline)) void MemoryManager::MapTransferMemory(span<u8> memory, memory::Permission permission) {
std::unique_lock lock{mutex};
MapInternal(std::pair<u8 *, ChunkDescriptor>(
memory.data(),{
.size = memory.size(),
.permission = permission,
.state = permission.raw ? memory::states::TransferMemory : memory::states::TransferMemoryIsolated,
.isSrcMergeDisallowed = true
}));
}
__attribute__((always_inline)) void MemoryManager::MapThreadLocalMemory(span<u8> memory) {
std::unique_lock lock{mutex};
MapInternal(std::pair<u8 *, ChunkDescriptor>(
memory.data(),{
.size = memory.size(),
.permission = {true, true, false},
.state = memory::states::ThreadLocal
}));
}
__attribute__((always_inline)) void MemoryManager::Reserve(span<u8> memory) {
std::unique_lock lock{mutex};
MapInternal(std::pair<u8 *, ChunkDescriptor>(
memory.data(),{
.size = memory.size(),
.permission = {false, false, false},
.state = memory::states::Reserved
}));
}
__attribute__((always_inline)) void MemoryManager::UnmapMemory(span<u8> memory) {
std::unique_lock lock{mutex};
ForeachChunkInRange(memory, [&](const std::pair<u8 *, ChunkDescriptor> &desc) {
if (desc.second.state != memory::states::Unmapped)
FreeMemory(span<u8>(desc.first, desc.second.size));
});
MapInternal(std::pair<u8 *, ChunkDescriptor>(
memory.data(),{
.size = memory.size(),
.permission = {false, false, false},
.state = memory::states::Unmapped
}));
}
__attribute__((always_inline)) void MemoryManager::FreeMemory(span<u8> memory) {
u8 *alignedStart{util::AlignUp(memory.data(), constant::PageSize)};
u8 *alignedEnd{util::AlignDown(memory.end().base(), constant::PageSize)};
if (alignedStart < alignedEnd)
if (madvise(alignedStart, static_cast<size_t>(alignedEnd - alignedStart), MADV_REMOVE) == -1)
throw exception("Failed to free memory: {}", strerror(errno)) ;
if (alignedStart < alignedEnd) [[likely]]
if (madvise(alignedStart, static_cast<size_t>(alignedEnd - alignedStart), MADV_REMOVE) == -1) [[unlikely]]
Logger::Error("Failed to free memory: {}", strerror(errno));
}
void MemoryManager::InsertChunk(const ChunkDescriptor &chunk) {
std::unique_lock lock(mutex);
void MemoryManager::SvcMapMemory(span<u8> source, span<u8> destination) {
std::unique_lock lock{mutex};
auto upper{std::upper_bound(chunks.begin(), chunks.end(), chunk.ptr, [](const u8 *ptr, const ChunkDescriptor &chunk) -> bool { return ptr < chunk.ptr; })};
if (upper == chunks.begin())
throw exception("InsertChunk: Chunk inserted outside address space: 0x{:X} - 0x{:X} and 0x{:X} - 0x{:X}", upper->ptr, upper->ptr + upper->size, chunk.ptr, chunk.ptr + chunk.size);
MapInternal(std::pair<u8 *, ChunkDescriptor>(
destination.data(),{
.size = destination.size(),
.permission = {true, true, false},
.state = memory::states::Stack,
.isSrcMergeDisallowed = true
}));
upper = chunks.erase(upper, std::upper_bound(upper, chunks.end(), chunk.ptr + chunk.size, [](const u8 *ptr, const ChunkDescriptor &chunk) -> bool { return ptr < chunk.ptr + chunk.size; }));
if (upper != chunks.end() && upper->ptr < chunk.ptr + chunk.size) {
auto end{upper->ptr + upper->size};
upper->ptr = chunk.ptr + chunk.size;
upper->size = static_cast<size_t>(end - upper->ptr);
}
std::memcpy(destination.data(), source.data(), source.size());
auto lower{std::prev(upper)};
if (lower->ptr == chunk.ptr && lower->size == chunk.size) {
lower->state = chunk.state;
lower->permission = chunk.permission;
lower->attributes = chunk.attributes;
lower->memory = chunk.memory;
} else if (lower->ptr + lower->size > chunk.ptr + chunk.size) {
auto lowerExtension{*lower};
lowerExtension.ptr = chunk.ptr + chunk.size;
lowerExtension.size = static_cast<size_t>((lower->ptr + lower->size) - lowerExtension.ptr);
ForeachChunkInRange(source, [&](std::pair<u8 *, ChunkDescriptor> &desc) __attribute__((always_inline)) {
desc.second.permission = {false, false, false};
desc.second.attributes.isBorrowed = true;
MapInternal(desc);
});
}
lower->size = static_cast<size_t>(chunk.ptr - lower->ptr);
if (lower->size) {
upper = chunks.insert(upper, lowerExtension);
chunks.insert(upper, chunk);
} else {
auto lower2{std::prev(lower)};
if (chunk.IsCompatible(*lower2) && lower2->ptr + lower2->size >= chunk.ptr) {
lower2->size = static_cast<size_t>(chunk.ptr + chunk.size - lower2->ptr);
upper = chunks.erase(lower);
} else {
*lower = chunk;
}
upper = chunks.insert(upper, lowerExtension);
}
} else if (chunk.IsCompatible(*lower) && lower->ptr + lower->size >= chunk.ptr) {
lower->size = static_cast<size_t>(chunk.ptr + chunk.size - lower->ptr);
} else {
if (lower->ptr + lower->size > chunk.ptr)
lower->size = static_cast<size_t>(chunk.ptr - lower->ptr);
if (upper != chunks.end() && chunk.IsCompatible(*upper) && chunk.ptr + chunk.size >= upper->ptr) {
upper->ptr = chunk.ptr;
upper->size = chunk.size + upper->size;
} else {
chunks.insert(upper, chunk);
}
void MemoryManager::SvcUnmapMemory(span<u8> source, span<u8> destination) {
std::unique_lock lock{mutex};
auto dstChunk = chunks.lower_bound(destination.data());
if (destination.data() < dstChunk->first)
--dstChunk;
while (dstChunk->second.state.value == memory::states::Unmapped)
++dstChunk;
if ((destination.data() + destination.size()) > dstChunk->first) [[likely]] {
ForeachChunkInRange(span<u8>{source.data() + (dstChunk->first - destination.data()), dstChunk->second.size}, [&](std::pair<u8 *, ChunkDescriptor> &desc) __attribute__((always_inline)) {
desc.second.permission = dstChunk->second.permission;
desc.second.attributes.isBorrowed = false;
MapInternal(desc);
});
std::memcpy(source.data() + (dstChunk->first - destination.data()), dstChunk->first, dstChunk->second.size);
}
}
std::optional<ChunkDescriptor> MemoryManager::Get(void *ptr) {
std::shared_lock lock(mutex);
void MemoryManager::AddRef(std::shared_ptr<type::KMemory> ptr) {
memRefs.push_back(std::move(ptr));
}
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), reinterpret_cast<u8 *>(ptr), [](const u8 *ptr, const ChunkDescriptor &chunk) -> bool { return ptr < chunk.ptr; })};
if (chunk-- != chunks.begin())
if ((chunk->ptr + chunk->size) > ptr)
return std::make_optional(*chunk);
void MemoryManager::RemoveRef(std::shared_ptr<type::KMemory> ptr) {
auto i = std::find(memRefs.begin(), memRefs.end(), ptr);
return std::nullopt;
if (*i == ptr) [[likely]]
memRefs.erase(i);
}
size_t MemoryManager::GetUserMemoryUsage() {
std::shared_lock lock(mutex);
std::shared_lock lock{mutex};
size_t size{};
for (const auto &chunk : chunks)
if (chunk.state == memory::states::Heap)
size += chunk.size;
return size + code.size() + state.process->mainThreadStack->guest.size();
auto currChunk = chunks.lower_bound(heap.data());
while (currChunk->first < heap.end().base()) {
if (currChunk->second.state == memory::states::Heap)
size += currChunk->second.size;
++currChunk;
}
return size + code.size() + state.process->mainThreadStack.size();
}
size_t MemoryManager::GetSystemResourceUsage() {
std::shared_lock lock(mutex);
std::shared_lock lock{mutex};
constexpr size_t KMemoryBlockSize{0x40};
return std::min(static_cast<size_t>(state.process->npdm.meta.systemResourceSize), util::AlignUp(chunks.size() * KMemoryBlockSize, constant::PageSize));
}

View File

@ -6,6 +6,7 @@
#include <sys/mman.h>
#include <common.h>
#include <common/file_descriptor.h>
#include <map>
namespace skyline {
namespace kernel::type {
@ -17,19 +18,19 @@ namespace skyline {
/**
* @brief Initializes all permissions to false
*/
constexpr Permission() : r(), w(), x() {}
constexpr Permission() : raw{} {}
/**
* @brief Initializes permissions where the first three bits correspond to RWX
*/
constexpr explicit Permission(u8 raw) : raw(raw) {}
constexpr explicit Permission(u8 raw) : raw{raw} {}
/**
* @param read If memory has read permission
* @param write If memory has write permission
* @param execute If memory has execute permission
*/
constexpr Permission(bool read, bool write, bool execute) : r(read), w(write), x(execute) {}
constexpr Permission(bool read, bool write, bool execute) : r{read}, w{write}, x{execute} {}
inline bool operator==(const Permission &rhs) const { return r == rhs.r && w == rhs.w && x == rhs.x; }
@ -62,13 +63,20 @@ namespace skyline {
* @url https://switchbrew.org/wiki/SVC#MemoryAttribute
*/
union MemoryAttribute {
/**
* @brief Initializes all atrributes to false
*/
constexpr MemoryAttribute() : value{} {}
constexpr explicit MemoryAttribute(u8 value) : value{value} {}
struct {
bool isBorrowed : 1; //!< This is required for async IPC user buffers
bool isIpcLocked : 1; //!< True when IpcRefCount > 0
bool isDeviceShared : 1; //!< True when DeviceRefCount > 0
bool isUncached : 1; //!< This is used to disable memory caching to share memory with the GPU
};
u32 value{};
u8 value;
};
/**
@ -93,35 +101,37 @@ namespace skyline {
enum class MemoryType : u8 {
Unmapped = 0x0,
Io = 0x1,
Normal = 0x2,
CodeStatic = 0x3,
Static = 0x2,
Code = 0x3,
CodeMutable = 0x4,
Heap = 0x5,
SharedMemory = 0x6,
Alias = 0x7,
ModuleCodeStatic = 0x8,
ModuleCodeMutable = 0x9,
AliasCode = 0x8,
AliasCodeData = 0x9,
Ipc = 0xA,
Stack = 0xB,
ThreadLocal = 0xC,
TransferMemoryIsolated = 0xD,
TransferMemory = 0xE,
ProcessMemory = 0xF,
SharedCode = 0xF,
Reserved = 0x10,
NonSecureIpc = 0x11,
NonDeviceIpc = 0x12,
KernelStack = 0x13,
CodeReadOnly = 0x14,
CodeWritable = 0x15,
CodeGenerated = 0x14,
CodeExternal = 0x15,
Coverage = 0x16,
InsecureMemory = 0x17
};
/**
* @url https://switchbrew.org/wiki/SVC#MemoryState
*/
union MemoryState {
constexpr MemoryState(const u32 value) : value(value) {}
constexpr MemoryState(const u32 value) : value{value} {}
constexpr MemoryState() : value(0) {}
constexpr MemoryState() : value{} {}
constexpr bool operator==(const MemoryState &other) const {
return value == other.value;
@ -138,7 +148,7 @@ namespace skyline {
bool ipcSendAllowed : 1; //!< If this block is allowed to be sent as an IPC buffer with flags=0
bool nonDeviceIpcSendAllowed : 1; //!< If this block is allowed to be sent as an IPC buffer with flags=3
bool nonSecureIpcSendAllowed : 1; //!< If this block is allowed to be sent as an IPC buffer with flags=1
bool _pad0_ : 1;
bool isMappedInKernel : 1; //!< If this block is mapped in kernel
bool processPermissionChangeAllowed : 1; //!< If the application can use svcSetProcessMemoryPermission on this block
bool mapAllowed : 1; //!< If the application can use svcMapMemory on this block
bool unmapProcessCodeMemoryAllowed : 1; //!< If the application can use svcUnmapProcessCodeMemory on this block
@ -151,8 +161,9 @@ namespace skyline {
bool mapProcessAllowed : 1; //!< If the application can use svcMapProcessMemory on this block
bool attributeChangeAllowed : 1; //!< If the application can use svcSetMemoryAttribute on this block
bool codeMemoryAllowed : 1; //!< If the application can use svcCreateCodeMemory on this block
bool isLinearMapped : 1; //!< If this block is mapped linearly
};
u32 value{};
u32 value;
};
static_assert(sizeof(MemoryState) == sizeof(u32));
@ -162,26 +173,29 @@ namespace skyline {
*/
namespace states {
constexpr MemoryState Unmapped{0x00000000};
constexpr MemoryState Io{0x00002001};
constexpr MemoryState CodeStatic{0x00DC7E03};
constexpr MemoryState CodeMutable{0x03FEBD04};
constexpr MemoryState Heap{0x037EBD05};
constexpr MemoryState SharedMemory{0x00402006};
constexpr MemoryState Alias{0x00482907};
constexpr MemoryState AliasCode{0x00DD7E08};
constexpr MemoryState AliasCodeData{0x03FFBD09};
constexpr MemoryState Ipc{0x005C3C0A};
constexpr MemoryState Stack{0x005C3C0B};
constexpr MemoryState ThreadLocal{0x0040200C};
constexpr MemoryState TransferMemoryIsolated{0x015C3C0D};
constexpr MemoryState TransferMemory{0x005C380E};
constexpr MemoryState SharedCode{0x0040380F};
constexpr MemoryState Io{0x00182001};
constexpr MemoryState Static{0x00042002};
constexpr MemoryState Code{0x04DC7E03};
constexpr MemoryState CodeMutable{0x07FEBD04};
constexpr MemoryState Heap{0x077EBD05};
constexpr MemoryState SharedMemory{0x04402006};
constexpr MemoryState AliasCode{0x04DD7E08};
constexpr MemoryState AliasCodeData{0x07FFBD09};
constexpr MemoryState Ipc{0x045C3C0A};
constexpr MemoryState Stack{0x045C3C0B};
constexpr MemoryState ThreadLocal{0x0400200C};
constexpr MemoryState TransferMemoryIsolated{0x055C3C0D};
constexpr MemoryState TransferMemory{0x045C380E};
constexpr MemoryState SharedCode{0x0440380F};
constexpr MemoryState Reserved{0x00000010};
constexpr MemoryState NonSecureIpc{0x005C3811};
constexpr MemoryState NonDeviceIpc{0x004C2812};
constexpr MemoryState NonSecureIpc{0x045C3811};
constexpr MemoryState NonDeviceIpc{0x044C2812};
constexpr MemoryState KernelStack{0x00002013};
constexpr MemoryState CodeReadOnly{0x00402214};
constexpr MemoryState CodeWritable{0x00402015};
constexpr MemoryState CodeGenerated{0x04402214};
constexpr MemoryState CodeExternal{0x04402015};
constexpr MemoryState Coverage{0x00002016};
constexpr MemoryState InsecureMemory{0x05583817};
}
enum class AddressSpaceType : u8 {
@ -194,15 +208,14 @@ namespace skyline {
namespace kernel {
struct ChunkDescriptor {
u8 *ptr;
size_t size;
bool isSrcMergeDisallowed;
memory::Permission permission;
memory::MemoryState state;
memory::MemoryAttribute attributes;
kernel::type::KMemory *memory{};
memory::MemoryState state;
size_t size;
constexpr bool IsCompatible(const ChunkDescriptor &chunk) const {
return chunk.permission == permission && chunk.state.value == state.value && chunk.attributes.value == attributes.value && chunk.memory == memory;
constexpr bool IsCompatible(const ChunkDescriptor &chunk) const noexcept {
return chunk.permission == permission && chunk.state.value == state.value && chunk.attributes.value == attributes.value && !isSrcMergeDisallowed;
}
};
@ -212,7 +225,13 @@ namespace skyline {
class MemoryManager {
private:
const DeviceState &state;
std::vector<ChunkDescriptor> chunks;
std::map<u8 *, ChunkDescriptor> chunks;
std::vector<std::shared_ptr<type::KMemory>> memRefs;
void MapInternal(const std::pair<u8 *, ChunkDescriptor> &newDesc);
void ForeachChunkInRange(span<u8> memory, auto editCallback);
public:
memory::AddressSpaceType addressSpaceType{};
@ -225,11 +244,13 @@ namespace skyline {
span<u8> stack{};
span<u8> tlsIo{}; //!< TLS/IO
size_t processHeapSize; //!< For use by svcSetHeapSize
std::shared_mutex mutex; //!< Synchronizes any operations done on the VMM, it's locked in shared mode by readers and exclusive mode by writers
MemoryManager(const DeviceState &state);
MemoryManager(const DeviceState &state) noexcept;
~MemoryManager();
~MemoryManager() noexcept;
/**
* @note This should be called before any mappings in the VMM or calls to InitalizeRegions are done
@ -240,7 +261,7 @@ namespace skyline {
/**
* @brief Mirrors a page-aligned mapping in the guest address space to the host address space
* @return A span to the host address space mirror mapped as RWX, unmapping it is the responsibility of the caller
* @return A span to the host address space mirror mapped as RW, unmapping it is the responsibility of the caller
* @note The supplied mapping **must** be page-aligned and inside the guest address space
*/
span<u8> CreateMirror(span<u8> mapping);
@ -248,21 +269,82 @@ namespace skyline {
/**
* @brief Mirrors multiple page-aligned mapping in the guest address space to the host address space
* @param totalSize The total size of all the regions to be mirrored combined
* @return A span to the host address space mirror mapped as RWX, unmapping it is the responsibility of the caller
* @return A span to the host address space mirror mapped as RW, unmapping it is the responsibility of the caller
* @note The supplied mapping **must** be page-aligned and inside the guest address space
* @note If a single mapping is mirrored, it is recommended to use CreateMirror instead
*/
span<u8> CreateMirrors(const std::vector<span<u8>> &regions);
/**
* @brief Frees the underlying physical memory for all full pages in the contained mapping
* @note All subsequent accesses to freed memory will return 0s
* @brief Sets the isBorrowed attribute for chunks within a certain range
*/
void SetRegionBorrowed(span<u8> memory, bool value);
/**
* @brief Sets the isUncached attribute for chunks within a certain range
*/
void SetRegionCpuCaching(span<u8> memory, bool value);
/**
* @brief Sets the permissions for chunks within a certain range
* @note The permissions set here are not accurate to the actual permissions set on the chunk and are only for the guest
*/
void SetRegionPermission(span<u8> memory, memory::Permission permission);
/**
* @brief Gets the highest chunk's descriptor that contains this address
*/
std::optional<std::pair<u8 *, ChunkDescriptor>> GetChunk(u8 *addr);
// Various mapping functions for use by the guest, argument validity must be checked by the caller
void MapCodeMemory(span<u8> memory, memory::Permission permission);
void MapMutableCodeMemory(span<u8> memory);
void MapStackMemory(span<u8> memory);
void MapHeapMemory(span<u8> memory);
void MapSharedMemory(span<u8> memory, memory::Permission permission);
void MapTransferMemory(span<u8> memory, memory::Permission permission);
void MapThreadLocalMemory(span<u8> memory);
void Reserve(span<u8> memory);
/**
* @note `UnmapMemory` also calls `FreeMemory` on the unmapped memory range
*/
void UnmapMemory(span<u8> memory);
/**
* Frees the underlying memory
* @note Memory that's not aligned to page boundaries at the edges of the span will not be freed
*/
void FreeMemory(span<u8> memory);
void InsertChunk(const ChunkDescriptor &chunk);
/**
* Implements the memory manager side functionality of svcMapMemory
* @note Argument validity must be checked by the caller
*/
void SvcMapMemory(span<u8> source, span<u8> destination);
std::optional<ChunkDescriptor> Get(void *ptr);
/**
* Implements the memory manager side functionality of svcUnmapMemory
* @note Argument validity must be checked by the caller
*/
void SvcUnmapMemory(span<u8> source, span<u8> destination);
/**
* @brief Adds a reference to shared memory, extending its lifetime until `RemoveRef` is called
*/
void AddRef(std::shared_ptr<type::KMemory> ptr);
/**
* @brief Removes the reference added by `AddRef`
*/
void RemoveRef(std::shared_ptr<type::KMemory> ptr);
/**
* @return The cumulative size of all heap (Physical Memory + Process Heap) memory mappings, the code region and the main thread stack in bytes
@ -278,7 +360,7 @@ namespace skyline {
/**
* @return If the supplied region is contained withing the accessible guest address space
*/
bool AddressSpaceContains(span<u8> region) const {
constexpr bool AddressSpaceContains(span<u8> region) const {
if (addressSpaceType == memory::AddressSpaceType::AddressSpace36Bit)
return codeBase36Bit.contains(region) || base.contains(region);
else
@ -287,3 +369,16 @@ namespace skyline {
};
}
}
template<> struct fmt::formatter<skyline::memory::Permission> {
template<typename ParseContext>
constexpr auto parse(ParseContext& ctx)
{
return ctx.begin();
}
template<typename FormatContext>
constexpr auto format(skyline::memory::Permission const& permission, FormatContext& ctx)
{
return fmt::format_to(ctx.out(), "{}{}{}", permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
}
};

View File

@ -13,207 +13,234 @@ namespace skyline::kernel::svc {
void SetHeapSize(const DeviceState &state) {
u32 size{state.ctx->gpr.w1};
if (!util::IsAligned(size, 0x200000)) {
if (!util::IsAligned(size, 0x200000)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidSize;
state.ctx->gpr.x1 = 0;
Logger::Warn("'size' not divisible by 2MB: {}", size);
Logger::Warn("'size' not divisible by 2MB: 0x{:X}", size);
return;
} else if (state.process->memory.heap.size() < size) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidSize;
state.ctx->gpr.x1 = 0;
Logger::Warn("'size' exceeded size of heap region: 0x{:X}", size);
return;
}
auto &heap{state.process->heap};
heap->Resize(size);
size_t heapCurrSize{state.process->memory.processHeapSize};
u8 *heapBaseAddr{state.process->memory.heap.data()};
if (heapCurrSize < size)
state.process->memory.MapHeapMemory(span<u8>{heapBaseAddr + heapCurrSize, size - heapCurrSize});
else if (size < heapCurrSize)
state.process->memory.UnmapMemory(span<u8>{heapBaseAddr + size, heapCurrSize - size});
state.process->memory.processHeapSize = size;
state.ctx->gpr.w0 = Result{};
state.ctx->gpr.x1 = reinterpret_cast<u64>(heap->guest.data());
state.ctx->gpr.x1 = reinterpret_cast<u64>(heapBaseAddr);
Logger::Debug("Allocated at 0x{:X} - 0x{:X} (0x{:X} bytes)", heap->guest.data(), heap->guest.end().base(), heap->guest.size());
Logger::Debug("Heap size changed to 0x{:X} bytes (0x{:X} - 0x{:X})", size, heapBaseAddr, heapBaseAddr + size);
}
void SetMemoryPermission(const DeviceState &state) {
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
if (!util::IsPageAligned(address)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("'address' not page aligned: 0x{:X}", address);
return;
}
u64 size{state.ctx->gpr.x1};
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidSize;
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
return;
}
if (address >= (address + size) || !state.process->memory.AddressSpaceContains(span<u8>{address, size})) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
Logger::Warn("Invalid address and size combination: 'address': 0x{:X}, 'size': 0x{:X} ", address, size);
return;
}
memory::Permission newPermission(static_cast<u8>(state.ctx->gpr.w2));
if ((!newPermission.r && newPermission.w) || newPermission.x) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidNewMemoryPermission;
Logger::Warn("'permission' invalid: {}", newPermission);
return;
}
auto chunk{state.process->memory.GetChunk(address).value()};
if (!chunk.second.state.permissionChangeAllowed) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidState;
Logger::Warn("Permission change not allowed for chunk at: 0x{:X}, state: 0x{:X}", chunk.first, chunk.second.state.value);
return;
}
state.process->memory.SetRegionPermission(span<u8>(address, size), newPermission);
Logger::Debug("Set permission to {}{}{} at 0x{:X} - 0x{:X} (0x{:X} bytes)", newPermission.r ? 'R' : '-', newPermission.w ? 'W' : '-', newPermission.x ? 'X' : '-', address, address + size, size);
state.ctx->gpr.w0 = Result{};
}
void SetMemoryAttribute(const DeviceState &state) {
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
if (!util::IsPageAligned(pointer)) {
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
if (!util::IsPageAligned(address)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("'pointer' not page aligned: 0x{:X}", pointer);
Logger::Warn("'address' not page aligned: 0x{:X}", address);
return;
}
size_t size{state.ctx->gpr.x1};
if (!util::IsPageAligned(size)) {
u64 size{state.ctx->gpr.x1};
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidSize;
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
return;
}
memory::MemoryAttribute mask{.value = state.ctx->gpr.w2};
memory::MemoryAttribute value{.value = state.ctx->gpr.w3};
if (address >= (address + size) || !state.process->memory.AddressSpaceContains(span<u8>{address, size})) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
Logger::Warn("Invalid address and size combination: 'address': 0x{:X}, 'size': 0x{:X} ", address, size);
return;
}
memory::MemoryAttribute mask{static_cast<u8>(state.ctx->gpr.w2)};
memory::MemoryAttribute value{static_cast<u8>(state.ctx->gpr.w3)};
auto maskedValue{mask.value | value.value};
if (maskedValue != mask.value || !mask.isUncached || mask.isDeviceShared || mask.isBorrowed || mask.isIpcLocked) {
if (maskedValue != mask.value || !mask.isUncached || mask.isDeviceShared || mask.isBorrowed || mask.isIpcLocked) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidCombination;
Logger::Warn("'mask' invalid: 0x{:X}, 0x{:X}", mask.value, value.value);
return;
}
auto chunk{state.process->memory.Get(pointer)};
if (!chunk) {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("Cannot find memory region: 0x{:X}", pointer);
return;
}
auto chunk{state.process->memory.GetChunk(address).value()};
if (!chunk->state.attributeChangeAllowed) {
// We only check the first found chunk for whatever reason.
if (!chunk.second.state.attributeChangeAllowed) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidState;
Logger::Warn("Attribute change not allowed for chunk: 0x{:X}", pointer);
Logger::Warn("Attribute change not allowed for chunk: 0x{:X}", chunk.first);
return;
}
auto newChunk{*chunk};
newChunk.ptr = pointer;
newChunk.size = size;
newChunk.attributes.isUncached = value.isUncached;
state.process->memory.InsertChunk(newChunk);
state.process->memory.SetRegionCpuCaching(span<u8>{address, size}, value.isUncached);
Logger::Debug("Set CPU caching to {} at 0x{:X} - 0x{:X} (0x{:X} bytes)", !static_cast<bool>(value.isUncached), pointer, pointer + size, size);
Logger::Debug("Set CPU caching to {} at 0x{:X} - 0x{:X} (0x{:X} bytes)", static_cast<bool>(value.isUncached), address, address + size, size);
state.ctx->gpr.w0 = Result{};
}
void MapMemory(const DeviceState &state) {
auto destination{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
auto source{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
u8 *destination{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
u8 *source{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
size_t size{state.ctx->gpr.x2};
if (!util::IsPageAligned(destination) || !util::IsPageAligned(source)) {
if (!util::IsPageAligned(destination) || !util::IsPageAligned(source)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("Addresses not page aligned: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
Logger::Warn("Addresses not page aligned: 'source': 0x{:X}, 'destination': 0x{:X}, 'size': 0x{:X} bytes", source, destination, size);
return;
}
if (!util::IsPageAligned(size)) {
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidSize;
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
return;
}
auto stack{state.process->memory.stack};
if (!stack.contains(span<u8>{destination, size})) {
if (destination >= (destination + size) || !state.process->memory.AddressSpaceContains(span<u8>{destination, size})) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
Logger::Warn("Invalid address and size combination: 'destination': 0x{:X}, 'size': 0x{:X} bytes", destination, size);
return;
}
if (source >= (source + size) || !state.process->memory.AddressSpaceContains(span<u8>{source, size})) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
Logger::Warn("Invalid address and size combination: 'source': 0x{:X}, 'size': 0x{:X} bytes", source, size);
return;
}
if (!state.process->memory.stack.contains(span<u8>{destination, size})) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
Logger::Warn("Destination not within stack region: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
Logger::Warn("Destination not within stack region: 'source': 0x{:X}, 'destination': 0x{:X}, 'size': 0x{:X} bytes", source, destination, size);
return;
}
auto chunk{state.process->memory.Get(source)};
if (!chunk) {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("Source has no descriptor: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
return;
}
if (!chunk->state.mapAllowed) {
auto chunk{state.process->memory.GetChunk(source)};
if (!chunk->second.state.mapAllowed) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidState;
Logger::Warn("Source doesn't allow usage of svcMapMemory: Source: 0x{:X}, Destination: 0x{:X}, Size: 0x{:X}, MemoryState: 0x{:X}", source, destination, size, chunk->state.value);
Logger::Warn("Source doesn't allow usage of svcMapMemory: 'source': 0x{:X}, 'size': 0x{:X}, MemoryState: 0x{:X}", source, size, chunk->second.state.value);
return;
}
state.process->NewHandle<type::KPrivateMemory>(span<u8>{destination, size}, chunk->permission, memory::states::Stack);
std::memcpy(destination, source, size);
auto object{state.process->GetMemoryObject(source)};
if (!object)
throw exception("svcMapMemory: Cannot find memory object in handle table for address 0x{:X}", source);
object->item->UpdatePermission(span<u8>{source, size}, {false, false, false});
state.process->memory.SvcMapMemory(span<u8>{source, size}, span<u8>{destination, size});
Logger::Debug("Mapped range 0x{:X} - 0x{:X} to 0x{:X} - 0x{:X} (Size: 0x{:X} bytes)", source, source + size, destination, destination + size, size);
state.ctx->gpr.w0 = Result{};
}
void UnmapMemory(const DeviceState &state) {
auto source{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
auto destination{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
u8 *destination{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
u8 *source{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
size_t size{state.ctx->gpr.x2};
if (!util::IsPageAligned(destination) || !util::IsPageAligned(source)) {
if (!util::IsPageAligned(destination) || !util::IsPageAligned(source)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("Addresses not page aligned: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
Logger::Warn("Addresses not page aligned: 'source': 0x{:X}, 'destination': 0x{:X}, 'size': {} bytes", source, destination, size);
return;
}
if (!util::IsPageAligned(size)) {
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidSize;
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
return;
}
auto stack{state.process->memory.stack};
if (!stack.contains(span<u8>{source, size})) {
if (!state.process->memory.stack.contains(span<u8>{destination, size})) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
Logger::Warn("Source not within stack region: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
Logger::Warn("Source not within stack region: 'source': 0x{:X}, 'destination': 0x{:X}, 'size': 0x{:X} bytes", source, destination, size);
return;
}
auto sourceChunk{state.process->memory.Get(source)};
auto destChunk{state.process->memory.Get(destination)};
if (!sourceChunk || !destChunk) {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("Addresses have no descriptor: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
return;
}
state.process->memory.SvcUnmapMemory(span<u8>{source, size}, span<u8>{destination, size});
state.process->memory.UnmapMemory(span<u8>{destination, size});
if (!destChunk->state.mapAllowed) {
state.ctx->gpr.w0 = result::InvalidState;
Logger::Warn("Destination doesn't allow usage of svcMapMemory: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes) 0x{:X}", source, destination, size, destChunk->state.value);
return;
}
auto destObject{state.process->GetMemoryObject(destination)};
if (!destObject)
throw exception("svcUnmapMemory: Cannot find destination memory object in handle table for address 0x{:X}", destination);
destObject->item->UpdatePermission(span<u8>{destination, size}, sourceChunk->permission);
std::memcpy(source, destination, size);
auto sourceObject{state.process->GetMemoryObject(source)};
if (!sourceObject)
throw exception("svcUnmapMemory: Cannot find source memory object in handle table for address 0x{:X}", source);
state.process->memory.FreeMemory(std::span<u8>(source, size));
state.process->CloseHandle(sourceObject->handle);
Logger::Debug("Unmapped range 0x{:X} - 0x{:X} to 0x{:X} - 0x{:X} (Size: 0x{:X} bytes)", source, source + size, destination, destination + size, size);
Logger::Debug("Unmapped range 0x{:X} - 0x{:X} to 0x{:X} - 0x{:X} (Size: 0x{:X} bytes)", destination, destination + size, source, source + size, size);
state.ctx->gpr.w0 = Result{};
}
void QueryMemory(const DeviceState &state) {
memory::MemoryInfo memInfo{};
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x2)};
auto chunk{state.process->memory.Get(pointer)};
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x2)};
auto chunk{state.process->memory.GetChunk(address)};
if (chunk) {
memInfo = {
.address = reinterpret_cast<u64>(chunk->ptr),
.size = chunk->size,
.type = static_cast<u32>(chunk->state.type),
.attributes = chunk->attributes.value,
.permissions = static_cast<u32>(chunk->permission.Get()),
.address = reinterpret_cast<u64>(chunk->first),
.size = chunk->second.size,
.type = static_cast<u32>(chunk->second.state.type),
.attributes = chunk->second.attributes.value,
.permissions = static_cast<u32>(chunk->second.permission.Get()),
.deviceRefCount = 0,
.ipcRefCount = 0,
};
Logger::Debug("Address: 0x{:X}, Region Start: 0x{:X}, Size: 0x{:X}, Type: 0x{:X}, Is Uncached: {}, Permissions: {}{}{}", pointer, memInfo.address, memInfo.size, memInfo.type, static_cast<bool>(chunk->attributes.isUncached), chunk->permission.r ? 'R' : '-', chunk->permission.w ? 'W' : '-', chunk->permission.x ? 'X' : '-');
Logger::Debug("Address: 0x{:X}, Region Start: 0x{:X}, Size: 0x{:X}, Type: 0x{:X}, Attributes: 0x{:X}, Permissions: {}", address, memInfo.address, memInfo.size, memInfo.type, memInfo.attributes, chunk->second.permission);
} else {
auto addressSpaceEnd{reinterpret_cast<u64>(state.process->memory.addressSpace.end().base())};
u64 addressSpaceEnd{reinterpret_cast<u64>(state.process->memory.addressSpace.end().base())};
memInfo = {
.address = addressSpaceEnd,
.size = ~addressSpaceEnd + 1,
.size = 0 - addressSpaceEnd,
.type = static_cast<u32>(memory::MemoryType::Reserved),
};
Logger::Debug("Trying to query memory outside of the application's address space: 0x{:X}", pointer);
Logger::Debug("Trying to query memory outside of the application's address space: 0x{:X}", address);
}
*reinterpret_cast<memory::MemoryInfo *>(state.ctx->gpr.x0) = memInfo;
// The page info, which is always 0
state.ctx->gpr.w1 = 0;
state.ctx->gpr.w0 = Result{};
}
@ -247,10 +274,6 @@ namespace skyline::kernel::svc {
return;
}
auto stack{state.process->GetMemoryObject(stackTop)};
if (!stack)
throw exception("svcCreateThread: Cannot find memory object in handle table for thread stack: 0x{:X}", stackTop);
auto thread{state.process->CreateThread(entry, entryArgument, stackTop, priority, static_cast<u8>(idealCore))};
if (thread) {
Logger::Debug("Created thread #{} with handle 0x{:X} (Entry Point: 0x{:X}, Argument: 0x{:X}, Stack Pointer: 0x{:X}, Priority: {}, Ideal Core: {})", thread->id, thread->handle, entry, entryArgument, stackTop, priority, idealCore);
@ -476,31 +499,38 @@ namespace skyline::kernel::svc {
try {
KHandle handle{state.ctx->gpr.w0};
auto object{state.process->GetHandle<type::KSharedMemory>(handle)};
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
if (!util::IsPageAligned(pointer)) {
if (!util::IsPageAligned(address)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("'pointer' not page aligned: 0x{:X}", pointer);
Logger::Warn("'address' not page aligned: 0x{:X}", address);
return;
}
size_t size{state.ctx->gpr.x2};
if (!util::IsPageAligned(size)) {
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidSize;
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
return;
}
if (address >= (address + size) || !state.process->memory.AddressSpaceContains(span<u8>{address, size})) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
Logger::Warn("Invalid address and size combination: 'address': 0x{:X}, 'size': 0x{:X}", address, size);
return;
}
memory::Permission permission(static_cast<u8>(state.ctx->gpr.w3));
if ((permission.w && !permission.r) || (permission.x && !permission.r)) {
Logger::Warn("'permission' invalid: {}{}{}", permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
if ((!permission.r && !permission.w && !permission.x) || (permission.w && !permission.r) || permission.x) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidNewMemoryPermission;
Logger::Warn("'permission' invalid: {}", permission);
return;
}
Logger::Debug("Mapping shared memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes) ({}{}{})", handle, pointer, pointer + size, size, permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
Logger::Debug("Mapping shared memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes), with permissions: ({}{}{})", handle, address, address + size, size, permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
object->Map(span<u8>{pointer, size}, permission);
object->Map(span<u8>{address, size}, permission);
state.process->memory.AddRef(object);
state.ctx->gpr.w0 = Result{};
} catch (const std::out_of_range &) {
@ -513,24 +543,31 @@ namespace skyline::kernel::svc {
try {
KHandle handle{state.ctx->gpr.w0};
auto object{state.process->GetHandle<type::KSharedMemory>(handle)};
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
if (!util::IsPageAligned(pointer)) {
if (!util::IsPageAligned(address)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("'pointer' not page aligned: 0x{:X}", pointer);
Logger::Warn("'address' not page aligned: 0x{:X}", address);
return;
}
size_t size{state.ctx->gpr.x2};
if (!util::IsPageAligned(size)) {
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidSize;
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
return;
}
Logger::Debug("Unmapping shared memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes)", handle, pointer, pointer + size, size);
if (address >= (address + size) || !state.process->memory.AddressSpaceContains(span<u8>{address, size})) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
Logger::Warn("Invalid address and size combination: 'address': 0x{:X}, 'size': 0x{:X}", address, size);
return;
}
object->Unmap(span<u8>{pointer, size});
Logger::Debug("Unmapping shared memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes)", handle, address, address + size, size);
object->Unmap(span<u8>{address, size});
state.process->memory.RemoveRef(object);
state.ctx->gpr.w0 = Result{};
} catch (const std::out_of_range &) {
@ -540,29 +577,40 @@ namespace skyline::kernel::svc {
}
void CreateTransferMemory(const DeviceState &state) {
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
if (!util::IsPageAligned(pointer)) {
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x1)};
if (!util::IsPageAligned(address)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("'pointer' not page aligned: 0x{:X}", pointer);
Logger::Warn("'address' not page aligned: 0x{:X}", address);
return;
}
size_t size{state.ctx->gpr.x2};
if (!util::IsPageAligned(size)) {
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidSize;
Logger::Warn("'size' {}: 0x{:X}", size ? "not page aligned" : "is zero", size);
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
return;
}
if (address >= (address + size) || !state.process->memory.AddressSpaceContains(span<u8>{address, size})) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidCurrentMemory;
Logger::Warn("Invalid address and size combination: 'address': 0x{:X}, 'size': 0x{:X}", address, size);
return;
}
memory::Permission permission(static_cast<u8>(state.ctx->gpr.w3));
if ((permission.w && !permission.r) || (permission.x && !permission.r)) {
Logger::Warn("'permission' invalid: {}{}{}", permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
if ((permission.w && !permission.r) || permission.x) [[unlikely]] {
Logger::Warn("'permission' invalid: {}", permission);
state.ctx->gpr.w0 = result::InvalidNewMemoryPermission;
return;
}
auto tmem{state.process->NewHandle<type::KTransferMemory>(pointer, size, permission, permission.raw ? memory::states::TransferMemory : memory::states::TransferMemoryIsolated)};
Logger::Debug("Creating transfer memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes) ({}{}{})", tmem.handle, pointer, pointer + size, size, permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
auto tmem{state.process->NewHandle<kernel::type::KTransferMemory>(size)};
if (!tmem.item->Map(span<u8>{address, size}, permission)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidState;
return;
}
Logger::Debug("Creating transfer memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes) ({}{}{})", tmem.handle, address, address + size, size, permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
state.ctx->gpr.w0 = Result{};
state.ctx->gpr.w1 = tmem.handle;
@ -896,8 +944,8 @@ namespace skyline::kernel::svc {
IdleTickCount = 10,
RandomEntropy = 11,
// 2.0.0+
AddressSpaceBaseAddr = 12,
AddressSpaceSize = 13,
AslrRegionBaseAddr = 12,
AslrRegionSize = 13,
StackRegionBaseAddr = 14,
StackRegionSize = 15,
// 3.0.0+
@ -965,11 +1013,11 @@ namespace skyline::kernel::svc {
out = util::GetTimeTicks();
break;
case InfoState::AddressSpaceBaseAddr:
case InfoState::AslrRegionBaseAddr:
out = reinterpret_cast<u64>(state.process->memory.base.data());
break;
case InfoState::AddressSpaceSize:
case InfoState::AslrRegionSize:
out = state.process->memory.base.size();
break;
@ -1019,93 +1067,64 @@ namespace skyline::kernel::svc {
}
void MapPhysicalMemory(const DeviceState &state) {
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
size_t size{state.ctx->gpr.x1};
if (!util::IsPageAligned(pointer)) {
Logger::Warn("Pointer 0x{:X} is not page aligned", pointer);
if (!util::IsPageAligned(address)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("'address' not page aligned: 0x{:X}", address);
return;
}
if (!size || !util::IsPageAligned(size)) {
Logger::Warn("Size 0x{:X} is not page aligned", size);
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidSize;
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
return;
}
if (!state.process->memory.alias.contains(span<u8>{pointer, size})) {
Logger::Warn("Memory region 0x{:X} - 0x{:X} (0x{:X}) is invalid", pointer, pointer + size, size);
if (address >= (address + size)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
Logger::Warn("Invalid address and size combination: 'address': 0x{:X}, 'size': 0x{:X}", address, size);
return;
}
state.process->NewHandle<type::KPrivateMemory>(span<u8>{pointer, size}, memory::Permission{true, true, false}, memory::states::Heap);
if (!state.process->memory.alias.contains(span<u8>{address, size})) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
Logger::Warn("Tried to map physical memory outside of alias region: 0x{:X} - 0x{:X} (0x{:X} bytes)", address, address + size, size);
return;
}
Logger::Debug("Mapped physical memory at 0x{:X} - 0x{:X} (0x{:X})", pointer, pointer + size, size);
state.process->memory.MapHeapMemory(span<u8>{address, size});
Logger::Debug("Mapped physical memory at 0x{:X} - 0x{:X} (0x{:X} bytes)", address, address + size, size);
state.ctx->gpr.w0 = Result{};
}
void UnmapPhysicalMemory(const DeviceState &state) {
auto pointer{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
u8 *address{reinterpret_cast<u8 *>(state.ctx->gpr.x0)};
size_t size{state.ctx->gpr.x1};
if (!util::IsPageAligned(pointer)) {
Logger::Warn("Pointer 0x{:X} is not page aligned", pointer);
if (!util::IsPageAligned(address)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidAddress;
Logger::Warn("'address' not page aligned: 0x{:X}", address);
return;
}
if (!size || !util::IsPageAligned(size)) {
Logger::Warn("Size 0x{:X} is not page aligned", size);
if (!size || !util::IsPageAligned(size)) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidSize;
Logger::Warn("'size' {}: 0x{:X}", size ? "is not page aligned" : "is zero", size);
return;
}
if (!state.process->memory.alias.contains(span<u8>{pointer, size})) {
Logger::Warn("Memory region 0x{:X} - 0x{:X} (0x{:X}) is invalid", pointer, pointer + size, size);
if (!state.process->memory.alias.contains(span<u8>{address, size})) [[unlikely]] {
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
Logger::Warn("Tried to unmap physical memory outside of alias region: 0x{:X} - 0x{:X} (0x{:X} bytes)", address, address + size, size);
return;
}
Logger::Debug("Unmapped physical memory at 0x{:X} - 0x{:X} (0x{:X})", pointer, pointer + size, size);
auto end{pointer + size};
while (pointer < end) {
auto chunk{state.process->memory.Get(pointer)};
if (chunk && chunk->memory) {
if (chunk->memory->objectType != type::KType::KPrivateMemory)
throw exception("Trying to unmap non-private memory");
auto memory{static_cast<type::KPrivateMemory *>(chunk->memory)};
auto initialSize{memory->guest.size()};
if (memory->memoryState == memory::states::Heap) {
if (memory->guest.data() >= pointer) {
if (memory->guest.size() <= size) {
memory->Resize(0);
state.process->CloseHandle(memory->handle);
} else {
memory->Remap(span<u8>{pointer + size, static_cast<size_t>((pointer + memory->guest.size() - memory->guest.data())) - size});
}
} else if (memory->guest.data() < pointer) {
memory->Resize(static_cast<size_t>(pointer - memory->guest.data()));
if (memory->guest.data() + initialSize > end)
state.process->NewHandle<type::KPrivateMemory>(span<u8>{end, static_cast<size_t>(memory->guest.data() + initialSize - end)}, memory::Permission{true, true, false}, memory::states::Heap);
}
}
pointer += initialSize;
size -= initialSize;
} else {
auto block{*state.process->memory.Get(pointer)};
pointer += block.size;
size -= block.size;
}
}
state.process->memory.FreeMemory(std::span<u8>(pointer, size));
state.process->memory.UnmapMemory(span<u8>{address, size});
Logger::Debug("Unmapped physical memory at 0x{:X} - 0x{:X} (0x{:X} bytes)", address, address + size, size);
state.ctx->gpr.w0 = Result{};
}

View File

@ -12,6 +12,12 @@ namespace skyline::kernel::svc {
*/
void SetHeapSize(const DeviceState &state);
/**
* @brief Reprotects a page-aligned memory region.
* @url https://switchbrew.org/wiki/SVC#SetMemoryPermission
*/
void SetMemoryPermission(const DeviceState &state);
/**
* @brief Change attribute of page-aligned memory region, this is used to turn on/off caching for a given memory area
* @url https://switchbrew.org/wiki/SVC#SetMemoryAttribute
@ -20,7 +26,7 @@ namespace skyline::kernel::svc {
/**
* @brief Maps a memory range into a different range, mainly used for adding guard pages around stack
* @url https://switchbrew.org/wiki/SVC#SetMemoryAttribute
* @url https://switchbrew.org/wiki/SVC#MapMemory
*/
void MapMemory(const DeviceState &state);
@ -115,7 +121,7 @@ namespace skyline::kernel::svc {
void UnmapSharedMemory(const DeviceState &state);
/**
* @brief Returns a handle to a KSharedMemory object
* @brief Returns a handle to a KTransferMemory object
* @url https://switchbrew.org/wiki/SVC#CreateTransferMemory
*/
void CreateTransferMemory(const DeviceState &state);
@ -269,7 +275,7 @@ namespace skyline::kernel::svc {
static constexpr std::array<SvcDescriptor, 0x80> SvcTable{
SVC_NONE, // 0x00 (Does not exist)
SVC_ENTRY(SetHeapSize), // 0x01
SVC_NONE, // 0x02
SVC_ENTRY(SetMemoryPermission), // 0x02
SVC_ENTRY(SetMemoryAttribute), // 0x03
SVC_ENTRY(MapMemory), // 0x04
SVC_ENTRY(UnmapMemory), // 0x05

View File

@ -0,0 +1,56 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2023 Skyline Team and Contributors (https://github.com/skyline-emu/)
#include <android/sharedmem.h>
#include <unistd.h>
#include <asm/unistd.h>
#include "KMemory.h"
#include "KProcess.h"
namespace skyline::kernel::type {
KMemory::KMemory(const DeviceState &state, KType objectType, size_t size) : KObject{state, objectType}, guest{} {
fileDescriptor = ASharedMemory_create(objectType == KType::KSharedMemory ? "HOS-KSharedMemory" : "HOS-KTransferMemory", size);
if (fileDescriptor < 0) [[unlikely]]
throw exception("An error occurred while creating shared memory: {}", fileDescriptor);
u8 *hostPtr{static_cast<u8 *>(mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileDescriptor, 0))};
if (hostPtr == MAP_FAILED) [[unlikely]]
throw exception("An occurred while mapping shared memory: {}", strerror(errno));
host = span<u8>{hostPtr, size};
}
u8 *KMemory::Map(span<u8> map, memory::Permission permission) {
if (!state.process->memory.AddressSpaceContains(map)) [[unlikely]]
throw exception("KMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size())) [[unlikely]]
throw exception("KMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size());
if (guest.valid()) [[unlikely]]
throw exception("Mapping KMemory multiple times on guest is not supported: Requested Mapping: 0x{:X} - 0x{:X} (0x{:X}), Current Mapping: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size(), guest.data(), guest.end().base(), guest.size());
if (mmap(map.data(), map.size(), permission.Get() ? PROT_READ | PROT_WRITE : PROT_NONE, MAP_SHARED | (map.data() ? MAP_FIXED : 0), fileDescriptor, 0) == MAP_FAILED) [[unlikely]]
throw exception("An error occurred while mapping shared memory in guest: {}", strerror(errno));
guest = map;
return guest.data();
}
void KMemory::Unmap(span<u8> map) {
if (!state.process->memory.AddressSpaceContains(map)) [[unlikely]]
throw exception("KMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size())) [[unlikely]]
throw exception("KMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} ({} bytes)", map.data(), map.end().base(), map.size());
if (guest.data() != map.data() && guest.size() != map.size()) [[unlikely]]
throw exception("Unmapping KMemory partially is not supported: Requested Unmap: 0x{:X} - 0x{:X} (0x{:X}), Current Mapping: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size(), guest.data(), guest.end().base(), guest.size());
if (mmap(map.data(), map.size(), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED) [[unlikely]]
throw exception("An error occurred while unmapping shared/transfer memory in guest: {}", strerror(errno));
}
KMemory::~KMemory() {
if (host.valid())
munmap(host.data(), host.size());
close(fileDescriptor);
}
}

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
// Copyright © 2023 Skyline Team and Contributors (https://github.com/skyline-emu/)
#pragma once
@ -8,25 +8,31 @@
namespace skyline::kernel::type {
/**
* @brief The base kernel memory object that other memory classes derieve from
* @brief The base kernel shared memory object that other memory classes derieve from
*/
class KMemory : public KObject {
private:
int fileDescriptor; //!< A file descriptor to the underlying shared memory
public:
KMemory(const DeviceState &state, KType objectType, span <u8> guest) : KObject(state, objectType), guest(guest) {}
KMemory(const DeviceState &state, KType objectType, size_t size);
/**
* @return A span representing the memory object on the guest
*/
span <u8> guest;
span<u8> guest;
span<u8> host; //!< We also keep a host mirror of the underlying shared memory for host access, it is persistently mapped and should be used by anything accessing the memory on the host
/**
* @brief Updates the permissions of a block of mapped memory
* @param ptr The starting address to change the permissions at
* @param size The size of the partition to change the permissions of
* @param permission The new permissions to be set for the memory
* @note 'ptr' needs to be in guest-reserved address space
*/
virtual void UpdatePermission(span <u8> map, memory::Permission permission) = 0;
virtual u8 *Map(span<u8> map, memory::Permission permission);
virtual ~KMemory() = default;
/**
* @note 'ptr' needs to be in guest-reserved address space
*/
virtual void Unmap(span<u8> map);
virtual ~KMemory();
};
}

View File

@ -14,7 +14,6 @@ namespace skyline::kernel::type {
KProcess,
KSharedMemory,
KTransferMemory,
KPrivateMemory,
KSession,
KEvent,
};

View File

@ -1,96 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
#include <android/sharedmem.h>
#include <asm/unistd.h>
#include <unistd.h>
#include "KPrivateMemory.h"
#include "KProcess.h"
namespace skyline::kernel::type {
KPrivateMemory::KPrivateMemory(const DeviceState &state, KHandle handle, span<u8> guest, memory::Permission permission, memory::MemoryState memState)
: permission(permission),
memoryState(memState),
handle(handle),
KMemory(state, KType::KPrivateMemory, guest) {
if (!state.process->memory.AddressSpaceContains(guest))
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", guest.data(), guest.data() + guest.size());
if (!util::IsPageAligned(guest.data()) || !util::IsPageAligned(guest.size()))
throw exception("KPrivateMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", guest.data(), guest.data() + guest.size(), guest.size());
if (mprotect(guest.data(), guest.size(), PROT_READ | PROT_WRITE | PROT_EXEC) < 0) // We only need to reprotect as the allocation has already been reserved by the MemoryManager
throw exception("An occurred while mapping private memory: {} with 0x{:X} @ 0x{:X}", strerror(errno), guest.data(), guest.size());
state.process->memory.InsertChunk(ChunkDescriptor{
.ptr = guest.data(),
.size = guest.size(),
.permission = permission,
.state = memState,
.memory = this,
});
}
void KPrivateMemory::Resize(size_t nSize) {
if (mprotect(guest.data(), nSize, PROT_READ | PROT_WRITE | PROT_EXEC) < 0)
throw exception("An occurred while resizing private memory: {}", strerror(errno));
if (nSize < guest.size()) {
state.process->memory.InsertChunk(ChunkDescriptor{
.ptr = guest.data() + nSize,
.size = guest.size() - nSize,
.state = memory::states::Unmapped,
});
} else if (guest.size() < nSize) {
state.process->memory.InsertChunk(ChunkDescriptor{
.ptr = guest.data() + guest.size(),
.size = nSize - guest.size(),
.permission = permission,
.state = memoryState,
.memory = this,
});
}
guest = span<u8>{guest.data(), nSize};
}
void KPrivateMemory::Remap(span<u8> map) {
if (!state.process->memory.AddressSpaceContains(map))
throw exception("KPrivateMemory remapping isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size()))
throw exception("KPrivateMemory remapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size());
if (mprotect(guest.data(), guest.size(), PROT_NONE) < 0)
throw exception("An occurred while remapping private memory: {}", strerror(errno));
if (mprotect(map.data(), map.size(), PROT_READ | PROT_WRITE | PROT_EXEC) < 0)
throw exception("An occurred while remapping private memory: {}", strerror(errno));
}
void KPrivateMemory::UpdatePermission(span<u8> map, memory::Permission pPermission) {
auto ptr{std::clamp(map.data(), guest.data(), guest.end().base())};
auto size{std::min(map.size(), static_cast<size_t>((guest.end().base()) - ptr))};
if (ptr && !util::IsPageAligned(ptr))
throw exception("KPrivateMemory permission updated with a non-page-aligned address: 0x{:X}", ptr);
// If a static code region has been mapped as writable it needs to be changed to mutable
if (memoryState == memory::states::CodeStatic && pPermission.w)
memoryState = memory::states::CodeMutable;
state.process->memory.InsertChunk(ChunkDescriptor{
.ptr = ptr,
.size = size,
.permission = pPermission,
.state = memoryState,
.memory = this,
});
}
KPrivateMemory::~KPrivateMemory() {
mprotect(guest.data(), guest.size(), PROT_NONE);
state.process->memory.InsertChunk(ChunkDescriptor{
.ptr = guest.data(),
.size = guest.size(),
.state = memory::states::Unmapped,
});
}
}

View File

@ -1,43 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
#pragma once
#include "KMemory.h"
namespace skyline::kernel::type {
/**
* @brief KPrivateMemory is used to map memory local to the guest process
* @note This does not reflect a kernel object in Horizon OS, it is an abstraction which makes things simpler to manage in Skyline instead
*/
class KPrivateMemory : public KMemory {
public:
memory::Permission permission;
memory::MemoryState memoryState;
KHandle handle;
/**
* @param permission The permissions for the allocated memory (As reported to the application, host memory permissions aren't reflected by this)
* @note 'ptr' needs to be in guest-reserved address space
*/
KPrivateMemory(const DeviceState &state, KHandle handle, span<u8> guest, memory::Permission permission, memory::MemoryState memState);
/**
* @note There is no check regarding if any expansions will cause the memory mapping to leak into other mappings
* @note Any extensions will have the same permissions and memory state as the initial mapping as opposed to extending the end
*/
void Resize(size_t size);
/**
* @note This does not copy over anything, only contents of any overlapping regions will be retained
*/
void Remap(span<u8> map);
void UpdatePermission(span<u8> map, memory::Permission pPermission) override;
/**
* @brief The destructor of private memory, it deallocates the memory
*/
~KPrivateMemory();
};
}

View File

@ -8,12 +8,12 @@
#include "KProcess.h"
namespace skyline::kernel::type {
KProcess::TlsPage::TlsPage(std::shared_ptr<KPrivateMemory> memory) : memory(std::move(memory)) {}
KProcess::TlsPage::TlsPage(u8 *memory) : memory(memory) {}
u8 *KProcess::TlsPage::ReserveSlot() {
if (index == constant::TlsSlots)
return nullptr;
return memory->guest.data() + (constant::TlsSlotSize * index++);
return memory + (constant::TlsSlotSize * index++);
}
KProcess::KProcess(const DeviceState &state) : memory(state), KSyncObject(state, KType::KProcess) {}
@ -26,7 +26,7 @@ namespace skyline::kernel::type {
}
void KProcess::Kill(bool join, bool all, bool disableCreation) {
Logger::Warn("Killing {}{}KProcess{}", join ? "and joining " : "", all ? "all threads in " : "HOS-0 in ", disableCreation ? " with new thread creation disabled" : "");
Logger::Warn("Killing {}{}KProcess{}", join ? "and joining " : "", all ? "all threads in " : "HOS-1 in ", disableCreation ? " with new thread creation disabled" : "");
Logger::EmulationContext.Flush();
bool expected{false};
@ -49,8 +49,8 @@ namespace skyline::kernel::type {
void KProcess::InitializeHeapTls() {
constexpr size_t DefaultHeapSize{0x200000};
heap = std::make_shared<KPrivateMemory>(state, 0, span<u8>{state.process->memory.heap.data(), DefaultHeapSize}, memory::Permission{true, true, false}, memory::states::Heap);
InsertItem(heap); // Insert it into the handle table so GetMemoryObject will contain it
memory.MapHeapMemory(span<u8>{state.process->memory.heap.data(), DefaultHeapSize});
memory.processHeapSize = DefaultHeapSize;
tlsExceptionContext = AllocateTlsSlot();
}
@ -61,8 +61,26 @@ namespace skyline::kernel::type {
if ((slot = tlsPage->ReserveSlot()))
return slot;
slot = tlsPages.empty() ? reinterpret_cast<u8 *>(memory.tlsIo.data()) : ((*(tlsPages.end() - 1))->memory->guest.data() + constant::PageSize);
auto tlsPage{std::make_shared<TlsPage>(std::make_shared<KPrivateMemory>(state, 0, span<u8>{slot, constant::PageSize}, memory::Permission(true, true, false), memory::states::ThreadLocal))};
bool isAllocated{};
u8 *pageCandidate{state.process->memory.tlsIo.data()};
std::pair<u8 *, ChunkDescriptor> chunk;
while (state.process->memory.tlsIo.contains(span<u8>(pageCandidate, constant::PageSize))) {
chunk = memory.GetChunk(pageCandidate).value();
if (chunk.second.state == memory::states::Unmapped) {
memory.MapThreadLocalMemory(span<u8>{pageCandidate, constant::PageSize});
isAllocated = true;
break;
} else {
pageCandidate = chunk.first + chunk.second.size;
}
}
if (!isAllocated) [[unlikely]]
throw exception("Failed to find free memory for a tls slot!");
auto tlsPage{std::make_shared<TlsPage>(pageCandidate)};
tlsPages.push_back(tlsPage);
return tlsPage->ReserveSlot();
}
@ -72,8 +90,27 @@ namespace skyline::kernel::type {
if (disableThreadCreation)
return nullptr;
if (!stackTop && threads.empty()) { //!< Main thread stack is created by the kernel and owned by the process
mainThreadStack = std::make_shared<KPrivateMemory>(state, 0, span<u8>{state.process->memory.stack.data(), state.process->npdm.meta.mainThreadStackSize}, memory::Permission{true, true, false}, memory::states::Stack);
stackTop = mainThreadStack->guest.end().base();
bool isAllocated{};
u8 *pageCandidate{memory.stack.data()};
std::pair<u8 *, ChunkDescriptor> chunk;
while (state.process->memory.stack.contains(span<u8>(pageCandidate, state.process->npdm.meta.mainThreadStackSize))) {
chunk = memory.GetChunk(pageCandidate).value();
if (chunk.second.state == memory::states::Unmapped && chunk.second.size >= state.process->npdm.meta.mainThreadStackSize) {
memory.MapStackMemory(span<u8>{pageCandidate, state.process->npdm.meta.mainThreadStackSize});
isAllocated = true;
break;
} else {
pageCandidate = chunk.first + chunk.second.size;
}
}
if (!isAllocated) [[unlikely]]
throw exception("Failed to map main thread stack!");
stackTop = pageCandidate + state.process->npdm.meta.mainThreadStackSize;
mainThreadStack = span<u8>(pageCandidate, state.process->npdm.meta.mainThreadStackSize);
}
size_t tid{threads.size() + 1}; //!< The first thread is HOS-1 rather than HOS-0, this is to match the HOS kernel's behaviour
auto thread{NewHandle<KThread>(this, tid, entry, argument, stackTop, priority ? *priority : state.process->npdm.meta.mainThreadPriority, idealCore ? *idealCore : state.process->npdm.meta.idealCore).item};
@ -81,29 +118,6 @@ namespace skyline::kernel::type {
return thread;
}
std::optional<KProcess::HandleOut<KMemory>> KProcess::GetMemoryObject(u8 *ptr) {
std::shared_lock lock(handleMutex);
for (KHandle index{}; index < handles.size(); index++) {
auto &object{handles[index]};
if (object) {
switch (object->objectType) {
case type::KType::KPrivateMemory:
case type::KType::KSharedMemory:
case type::KType::KTransferMemory: {
auto mem{std::static_pointer_cast<type::KMemory>(object)};
if (mem->guest.contains(ptr))
return std::make_optional<KProcess::HandleOut<KMemory>>({mem, constant::BaseHandleIndex + index});
}
default:
break;
}
}
}
return std::nullopt;
}
void KProcess::ClearHandleTable() {
std::shared_lock lock(handleMutex);
handles.clear();

View File

@ -42,9 +42,9 @@ namespace skyline {
*/
struct TlsPage {
u8 index{}; //!< The slots are assigned sequentially, this holds the index of the last TLS slot reserved
std::shared_ptr<KPrivateMemory> memory; //!< A single page sized memory allocation for this TLS page
u8 *memory; //!< A single page sized memory allocation for this TLS page
TlsPage(std::shared_ptr<KPrivateMemory> memory);
TlsPage(u8 *memory);
/**
* @return A non-null pointer to a TLS page slot on success, a nullptr will be returned if this page is full
@ -57,10 +57,8 @@ namespace skyline {
u8 *tlsExceptionContext{}; //!< A pointer to the TLS exception handling context slot
std::mutex tlsMutex; //!< A mutex to synchronize allocation of TLS pages to prevent extra pages from being created
std::vector<std::shared_ptr<TlsPage>> tlsPages; //!< All TLS pages allocated by this process
std::shared_ptr<KPrivateMemory> mainThreadStack; //!< The stack memory of the main thread stack is owned by the KProcess itself
std::shared_ptr<KPrivateMemory> heap;
vfs::NPDM npdm;
span<u8> mainThreadStack;
private:
std::shared_mutex handleMutex;
std::vector<std::shared_ptr<KObject>> handles;
@ -117,7 +115,7 @@ namespace skyline {
std::unique_lock lock(handleMutex);
std::shared_ptr<objectClass> item;
if constexpr (std::is_same<objectClass, KThread>() || std::is_same<objectClass, KPrivateMemory>())
if constexpr (std::is_same<objectClass, KThread>())
item = std::make_shared<objectClass>(state, constant::BaseHandleIndex + handles.size(), args...);
else
item = std::make_shared<objectClass>(state, args...);
@ -156,8 +154,6 @@ namespace skyline {
objectType = KType::KSharedMemory;
} else if constexpr(std::is_same<objectClass, KTransferMemory>()) {
objectType = KType::KTransferMemory;
} else if constexpr(std::is_same<objectClass, KPrivateMemory>()) {
objectType = KType::KPrivateMemory;
} else if constexpr(std::is_same<objectClass, KSession>()) {
objectType = KType::KSession;
} else if constexpr(std::is_same<objectClass, KEvent>()) {
@ -188,13 +184,6 @@ namespace skyline {
throw std::out_of_range(fmt::format("GetHandle was called with a deleted handle: 0x{:X}", handle));
}
/**
* @brief Retrieves a kernel memory object that owns the specified address
* @param address The address to look for
* @return A shared pointer to the corresponding KMemory object
*/
std::optional<HandleOut<KMemory>> GetMemoryObject(u8 *ptr);
/**
* @brief Closes a handle in the handle table
*/

View File

@ -1,134 +1,34 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
// Copyright © 2023 Skyline Team and Contributors (https://github.com/skyline-emu/)
#include <android/sharedmem.h>
#include <unistd.h>
#include <asm/unistd.h>
#include "KSharedMemory.h"
#include "KProcess.h"
namespace skyline::kernel::type {
KSharedMemory::KSharedMemory(const DeviceState &state, size_t size, memory::MemoryState memState, KType type)
: memoryState(memState),
KMemory(state, type, span<u8>{}) {
fd = ASharedMemory_create(type == KType::KSharedMemory ? "HOS-KSharedMemory" : "HOS-KTransferMemory", size);
if (fd < 0)
throw exception("An error occurred while creating shared memory: {}", fd);
auto hostPtr{static_cast<u8 *>(mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED, fd, 0))};
if (hostPtr == MAP_FAILED)
throw exception("An occurred while mapping shared memory: {}", strerror(errno));
host = span<u8>{hostPtr, size};
}
KSharedMemory::KSharedMemory(const DeviceState &state, size_t size)
: KMemory{state, KType::KSharedMemory, size} {}
u8 *KSharedMemory::Map(span<u8> map, memory::Permission permission) {
if (!state.process->memory.AddressSpaceContains(map))
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size()))
throw exception("KSharedMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size());
if (guest.valid())
throw exception("Mapping KSharedMemory multiple times on guest is not supported: Requested Mapping: 0x{:X} - 0x{:X} (0x{:X}), Current Mapping: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size(), guest.data(), guest.end().base(), guest.size());
u8 *result{KMemory::Map(map, permission)};
auto guestPtr{static_cast<u8 *>(mmap(map.data(), map.size(), permission.Get(), MAP_SHARED | (map.data() ? MAP_FIXED : 0), fd, 0))};
if (guestPtr == MAP_FAILED)
throw exception("An error occurred while mapping shared memory in guest: {}", strerror(errno));
guest = span<u8>{guestPtr, map.size()};
state.process->memory.MapSharedMemory(guest, permission);
state.process->memory.InsertChunk(ChunkDescriptor{
.ptr = guest.data(),
.size = guest.size(),
.permission = permission,
.state = memoryState,
.attributes = memory::MemoryAttribute{
.isBorrowed = objectType == KType::KTransferMemory,
},
.memory = this
});
return guest.data();
return result;
}
void KSharedMemory::Unmap(span<u8> map) {
auto &memoryManager{state.process->memory};
if (!memoryManager.AddressSpaceContains(map))
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size()))
throw exception("KSharedMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size());
if (guest.data() != map.data() && guest.size() != map.size())
throw exception("Unmapping KSharedMemory partially is not supported: Requested Unmap: 0x{:X} - 0x{:X} (0x{:X}), Current Mapping: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size(), guest.data(), guest.end().base(), guest.size());
if (mmap(map.data(), map.size(), PROT_NONE, MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
throw exception("An error occurred while unmapping shared memory in guest: {}", strerror(errno));
KMemory::Unmap(map);
guest = span<u8>{};
memoryManager.InsertChunk(ChunkDescriptor{
.ptr = map.data(),
.size = map.size(),
.state = memory::states::Unmapped,
});
}
void KSharedMemory::UpdatePermission(span<u8> map, memory::Permission permission) {
if (map.valid() && !util::IsPageAligned(map.data()))
throw exception("KSharedMemory permission updated with a non-page-aligned address: 0x{:X}", map.data());
if (guest.valid()) {
mprotect(map.data(), map.size(), permission.Get());
if (guest.data() == MAP_FAILED)
throw exception("An error occurred while updating shared memory's permissions in guest: {}", strerror(errno));
state.process->memory.InsertChunk(ChunkDescriptor{
.ptr = map.data(),
.size = map.size(),
.permission = permission,
.state = memoryState,
.attributes = memory::MemoryAttribute{
.isBorrowed = objectType == KType::KTransferMemory,
},
.memory = this
});
}
state.process->memory.UnmapMemory(map);
}
KSharedMemory::~KSharedMemory() {
if (state.process && guest.valid()) {
auto &memoryManager{state.process->memory};
if (objectType != KType::KTransferMemory) {
if (mmap(guest.data(), guest.size(), PROT_NONE, MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
Logger::Warn("An error occurred while unmapping shared memory: {}", strerror(errno));
if (mmap(guest.data(), guest.size(), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED) [[unlikely]]
Logger::Warn("An error occurred while unmapping shared memory: {}", strerror(errno));
state.process->memory.InsertChunk(ChunkDescriptor{
.ptr = guest.data(),
.size = guest.size(),
.state = memory::states::Unmapped,
});
} else {
// KTransferMemory remaps the region with R/W permissions during destruction
constexpr memory::Permission UnborrowPermission{true, true, false};
if (mmap(guest.data(), guest.size(), UnborrowPermission.Get(), MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
Logger::Warn("An error occurred while remapping transfer memory: {}", strerror(errno));
else if (!host.valid())
Logger::Warn("Expected host mapping of transfer memory to be valid during KTransferMemory destruction");
guest.copy_from(host);
state.process->memory.InsertChunk(ChunkDescriptor{
.ptr = guest.data(),
.size = guest.size(),
.permission = UnborrowPermission,
.state = memoryState,
.attributes = memory::MemoryAttribute{
.isBorrowed = false,
},
.memory = this
});
}
state.process->memory.UnmapMemory(guest);
}
if (host.valid())
munmap(host.data(), host.size());
close(fd);
}
}

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
// Copyright © 2023 Skyline Team and Contributors (https://github.com/skyline-emu/)
#pragma once
@ -10,14 +10,8 @@ namespace skyline::kernel::type {
* @brief KSharedMemory is used to retain two mappings of the same underlying memory, allowing sharing memory between two processes
*/
class KSharedMemory : public KMemory {
private:
int fd; //!< A file descriptor to the underlying shared memory
memory::MemoryState memoryState; //!< The state of the memory as supplied initially, this is retained for any mappings
public:
span<u8> host; //!< We also keep a host mirror of the underlying shared memory for host access, it is persistently mapped and should be used by anything accessing the memory on the host
KSharedMemory(const DeviceState &state, size_t size, memory::MemoryState memState = memory::states::SharedMemory, KType type = KType::KSharedMemory);
KSharedMemory(const DeviceState &state, size_t size);
/**
* @note 'ptr' needs to be in guest-reserved address space
@ -29,8 +23,6 @@ namespace skyline::kernel::type {
*/
void Unmap(span<u8> map);
void UpdatePermission(span<u8> map, memory::Permission permission) override;
/**
* @brief The destructor of shared memory, it deallocates the memory from all processes
*/

View File

@ -9,7 +9,6 @@
#include <common/signal.h>
#include <common/spin_lock.h>
#include "KSyncObject.h"
#include "KPrivateMemory.h"
#include "KSharedMemory.h"
namespace skyline {

View File

@ -0,0 +1,64 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2023 Skyline Team and Contributors (https://github.com/skyline-emu/)
#include "KTransferMemory.h"
#include "KProcess.h"
namespace skyline::kernel::type {
KTransferMemory::KTransferMemory(const DeviceState &state, size_t size)
: KMemory{state, KType::KTransferMemory, size} {}
u8 *KTransferMemory::Map(span<u8> map, memory::Permission permission) {
std::memcpy(host.data(), map.data(), map.size());
u8 *result{KMemory::Map(map, permission)};
auto oldChunk{state.process->memory.GetChunk(map.data()).value()};
originalMapping = oldChunk.second;
if (!originalMapping.state.transferMemoryAllowed) [[unlikely]] {
Logger::Warn("Tried to map transfer memory with incompatible state at: 0x{:X} (0x{:X} bytes)", map.data(), map.size());
return nullptr;
} else {
state.process->memory.MapTransferMemory(guest, permission);
state.process->memory.SetRegionBorrowed(guest, true);
return result;
}
}
void KTransferMemory::Unmap(span<u8> map) {
KMemory::Unmap(map);
guest = span<u8>{};
switch (originalMapping.state.type) {
case memory::MemoryType::CodeMutable:
state.process->memory.MapMutableCodeMemory(map);
break;
case memory::MemoryType::Heap:
state.process->memory.MapHeapMemory(map);
break;
default:
Logger::Warn("Unmapping KTransferMemory with incompatible state: (0x{:X})", originalMapping.state.value);
}
std::memcpy(map.data(), host.data(), map.size());
}
KTransferMemory::~KTransferMemory() {
if (state.process && guest.valid()) {
if (mmap(guest.data(), guest.size(), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS | MAP_POPULATE, -1, 0) == MAP_FAILED) [[unlikely]]
Logger::Warn("An error occurred while unmapping transfer memory in guest: {}", strerror(errno));
switch (originalMapping.state.type) {
case memory::MemoryType::CodeMutable:
state.process->memory.MapMutableCodeMemory(guest);
break;
case memory::MemoryType::Heap:
state.process->memory.MapHeapMemory(guest);
break;
default:
Logger::Warn("Unmapping KTransferMemory with incompatible state: (0x{:X})", originalMapping.state.value);
}
std::memcpy(guest.data(), host.data(), guest.size());
}
}
}

View File

@ -1,24 +1,35 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
// Copyright © 2023 Skyline Team and Contributors (https://github.com/skyline-emu/)
#pragma once
#include "KSharedMemory.h"
#include "KMemory.h"
namespace skyline::kernel::type {
/**
* @brief KTransferMemory is used to transfer memory from one application to another on HOS, we emulate this abstraction using KSharedMemory as it's essentially the same with the main difference being that KSharedMemory is allocated by the kernel while KTransferMemory is created from memory that's been allocated by the guest beforehand
* @note KSharedMemory::{Map, Unmap, ~KSharedMemory} contains code to handle differences in memory attributes and destruction
*/
class KTransferMemory : public KSharedMemory {
class KTransferMemory : public KMemory {
private:
ChunkDescriptor originalMapping;
public:
/**
* @note 'ptr' needs to be in guest-reserved address space
*/
KTransferMemory(const DeviceState &state, u8 *ptr, size_t size, memory::Permission permission, memory::MemoryState memState = memory::states::TransferMemory)
: KSharedMemory(state, size, memState, KType::KTransferMemory) {
std::memcpy(host.data(), ptr, size);
Map(span<u8>{ptr, size}, permission);
}
KTransferMemory(const DeviceState &state, size_t size);
/**
* @note 'ptr' needs to be in guest-reserved address space
*/
u8 *Map(span<u8> map, memory::Permission permission);
/**
* @note 'ptr' needs to be in guest-reserved address space
*/
void Unmap(span<u8> map);
~KTransferMemory();
};
}

View File

@ -89,20 +89,24 @@ namespace skyline::loader {
hookSize = util::AlignUp(state.nce->GetHookSectionSize(executableSymbols), PAGE_SIZE);
}
auto patchType{process->memory.addressSpaceType == memory::AddressSpaceType::AddressSpace36Bit ? memory::states::Heap : memory::states::Reserved};
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{base, patch.size + hookSize}, memory::Permission{false, false, false}, patchType); // ---
if (process->memory.addressSpaceType == memory::AddressSpaceType::AddressSpace36Bit) {
process->memory.MapHeapMemory(span<u8>{base, patch.size + hookSize}); // ---
process->memory.SetRegionPermission(span<u8>{base, patch.size + hookSize}, memory::Permission{false, false, false});
} else {
process->memory.Reserve(span<u8>{base, patch.size + hookSize}); // ---
}
Logger::Debug("Successfully mapped section .patch @ 0x{:X}, Size = 0x{:X}", base, patch.size);
if (hookSize > 0)
Logger::Debug("Successfully mapped section .hook @ 0x{:X}, Size = 0x{:X}", base + patch.size, hookSize);
u8 *executableBase{base + patch.size + hookSize};
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{executableBase + executable.text.offset, textSize}, memory::Permission{true, false, true}, memory::states::CodeStatic); // R-X
process->memory.MapCodeMemory(span<u8>{executableBase + executable.text.offset, textSize}, memory::Permission{true, false, true}); // R-X
Logger::Debug("Successfully mapped section .text @ 0x{:X}, Size = 0x{:X}", executableBase, textSize);
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{executableBase + executable.ro.offset, roSize}, memory::Permission{true, false, false}, memory::states::CodeStatic); // R--
process->memory.MapCodeMemory(span<u8>{executableBase + executable.ro.offset, roSize}, memory::Permission{true, false, false}); // R--
Logger::Debug("Successfully mapped section .rodata @ 0x{:X}, Size = 0x{:X}", executableBase + executable.ro.offset, roSize);
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{executableBase + executable.data.offset, dataSize}, memory::Permission{true, true, false}, memory::states::CodeMutable); // RW-
process->memory.MapMutableCodeMemory(span<u8>{executableBase + executable.data.offset, dataSize}); // RW-
Logger::Debug("Successfully mapped section .data + .bss @ 0x{:X}, Size = 0x{:X}", executableBase + executable.data.offset, dataSize);
size_t size{patch.size + hookSize + textSize + roSize + dataSize};

View File

@ -71,8 +71,8 @@ namespace skyline::service::ro {
if (state.process->memory.heap.contains(ptr) || state.process->memory.alias.contains(ptr))
continue;
auto desc{state.process->memory.Get(ptr)};
if (!desc || desc->state != memory::states::Unmapped || (static_cast<size_t>(ptr - desc->ptr) + size) < desc->size)
auto desc{state.process->memory.GetChunk(ptr)};
if (!desc || desc->second.state != memory::states::Unmapped || (static_cast<size_t>(ptr - desc->first) + size) < desc->second.size)
continue;
} while (!ptr);
@ -85,26 +85,21 @@ namespace skyline::service::ro {
Result IRoInterface::UnloadModule(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
Logger::Error("Module unloading is unimplemented!");
return {};
}
Result IRoInterface::RegisterModuleInfo(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
return {};
}
Result IRoInterface::UnregisterModuleInfo(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
return {};
}
Result IRoInterface::RegisterProcessHandle(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
return {};
}
Result IRoInterface::RegisterProcessModuleInfo(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
return {};
}
}