mirror of
https://github.com/skyline-emu/skyline.git
synced 2024-11-26 04:54:17 +01:00
Use spans instead of ptr and size in kernel memory
This commit is contained in:
parent
e3e92ce1d4
commit
2e356b8f0b
@ -86,6 +86,13 @@ namespace skyline {
|
||||
return this->begin() <= other.begin() && this->end() >= other.end();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return If a supplied address is located inside this span
|
||||
*/
|
||||
constexpr bool contains(const T *address) const {
|
||||
return this->data() <= address && this->end().base() > address;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return If the span is valid by not being null
|
||||
*/
|
||||
|
@ -17,7 +17,7 @@ namespace skyline::gpu {
|
||||
u8 *alignedData{util::AlignDown(guest->data(), PAGE_SIZE)};
|
||||
size_t alignedSize{static_cast<size_t>(util::AlignUp(guest->data() + guest->size(), PAGE_SIZE) - alignedData)};
|
||||
|
||||
alignedMirror = gpu.state.process->memory.CreateMirror(alignedData, alignedSize);
|
||||
alignedMirror = gpu.state.process->memory.CreateMirror(span<u8>{alignedData, alignedSize});
|
||||
mirror = alignedMirror.subspan(static_cast<size_t>(guest->data() - alignedData), guest->size());
|
||||
|
||||
trapHandle = gpu.state.nce->TrapRegions(*guest, true, [this] {
|
||||
@ -264,7 +264,7 @@ namespace skyline::gpu {
|
||||
return BufferView{shared_from_this(), &(*it)};
|
||||
}
|
||||
|
||||
vk::DeviceSize Buffer::AcquireMegaBuffer(MegaBuffer& megaBuffer) {
|
||||
vk::DeviceSize Buffer::AcquireMegaBuffer(MegaBuffer &megaBuffer) {
|
||||
SynchronizeGuest(false, true); // First try and enable megabuffering by doing an immediate sync
|
||||
|
||||
if (!megaBufferingEnabled)
|
||||
@ -367,7 +367,7 @@ namespace skyline::gpu {
|
||||
bufferDelegate->buffer->Write(pCycle, flushHostCallback, gpuCopyCallback, data, offset + bufferDelegate->view->offset);
|
||||
}
|
||||
|
||||
vk::DeviceSize BufferView::AcquireMegaBuffer(MegaBuffer& megaBuffer) const {
|
||||
vk::DeviceSize BufferView::AcquireMegaBuffer(MegaBuffer &megaBuffer) const {
|
||||
vk::DeviceSize bufferOffset{bufferDelegate->buffer->AcquireMegaBuffer(megaBuffer)};
|
||||
|
||||
// Propagate 0 results since they signify that megabuffering isn't supported for a buffer
|
||||
|
@ -134,7 +134,7 @@ namespace skyline::gpu {
|
||||
u8 *alignedData{util::AlignDown(mapping.data(), PAGE_SIZE)};
|
||||
size_t alignedSize{static_cast<size_t>(util::AlignUp(mapping.data() + mapping.size(), PAGE_SIZE) - alignedData)};
|
||||
|
||||
alignedMirror = gpu.state.process->memory.CreateMirror(alignedData, alignedSize);
|
||||
alignedMirror = gpu.state.process->memory.CreateMirror(span<u8>{alignedData, alignedSize});
|
||||
mirror = alignedMirror.subspan(static_cast<size_t>(mapping.data() - alignedData), mapping.size());
|
||||
} else {
|
||||
std::vector<span<u8>> alignedMappings;
|
||||
|
@ -27,7 +27,7 @@ namespace skyline::input {
|
||||
Input(const DeviceState &state)
|
||||
: state(state),
|
||||
kHid(std::make_shared<kernel::type::KSharedMemory>(state, sizeof(HidSharedMemory))),
|
||||
hid(reinterpret_cast<HidSharedMemory *>(kHid->host.ptr)),
|
||||
hid(reinterpret_cast<HidSharedMemory *>(kHid->host.data())),
|
||||
npad(state, hid),
|
||||
touch(state, hid) {}
|
||||
};
|
||||
|
@ -10,30 +10,29 @@ namespace skyline::kernel {
|
||||
MemoryManager::MemoryManager(const DeviceState &state) : state(state) {}
|
||||
|
||||
MemoryManager::~MemoryManager() {
|
||||
if (base.address && base.size)
|
||||
munmap(reinterpret_cast<void *>(base.address), base.size);
|
||||
if (base.valid() && !base.empty())
|
||||
munmap(reinterpret_cast<void *>(base.data()), base.size());
|
||||
}
|
||||
|
||||
constexpr size_t RegionAlignment{1ULL << 21}; //!< The minimum alignment of a HOS memory region
|
||||
constexpr size_t CodeRegionSize{4ULL * 1024 * 1024 * 1024}; //!< The assumed maximum size of the code region (4GiB)
|
||||
|
||||
void MemoryManager::InitializeVmm(memory::AddressSpaceType type) {
|
||||
size_t baseSize{};
|
||||
switch (type) {
|
||||
case memory::AddressSpaceType::AddressSpace32Bit:
|
||||
case memory::AddressSpaceType::AddressSpace32BitNoReserved:
|
||||
throw exception("32-bit address spaces are not supported");
|
||||
|
||||
case memory::AddressSpaceType::AddressSpace36Bit: {
|
||||
addressSpace.address = 0;
|
||||
addressSpace.size = 1UL << 36;
|
||||
base.size = 0x78000000 + 0x180000000 + 0x78000000 + 0x180000000;
|
||||
addressSpace = span<u8>{reinterpret_cast<u8 *>(0), 1ULL << 36};
|
||||
baseSize = 0x78000000 + 0x180000000 + 0x78000000 + 0x180000000;
|
||||
throw exception("36-bit address spaces are not supported"); // Due to VMM base being forced at 0x800000 and it being used by ART
|
||||
}
|
||||
|
||||
case memory::AddressSpaceType::AddressSpace39Bit: {
|
||||
addressSpace.address = 0;
|
||||
addressSpace.size = 1UL << 39;
|
||||
base.size = CodeRegionSize + 0x1000000000 + 0x180000000 + 0x80000000 + 0x1000000000;
|
||||
addressSpace = span<u8>{reinterpret_cast<u8 *>(0), 1ULL << 39};
|
||||
baseSize = CodeRegionSize + 0x1000000000 + 0x180000000 + 0x80000000 + 0x1000000000;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -49,81 +48,71 @@ namespace skyline::kernel {
|
||||
auto end{util::HexStringToInt<u64>(std::string_view(maps.data() + line, sizeof(u64) * 2))};
|
||||
if (end < start)
|
||||
continue;
|
||||
if (end - start > base.size + (alignedStart - start)) { // We don't want to overflow if alignedStart > start
|
||||
base.address = alignedStart;
|
||||
if (end - start > baseSize + (alignedStart - start)) { // We don't want to overflow if alignedStart > start
|
||||
base = span<u8>{reinterpret_cast<u8 *>(alignedStart), baseSize};
|
||||
break;
|
||||
}
|
||||
|
||||
start = util::HexStringToInt<u64>(std::string_view(maps.data() + maps.find_first_of('-', line) + 1, sizeof(u64) * 2));
|
||||
alignedStart = util::AlignUp(start, RegionAlignment);
|
||||
if (alignedStart + base.size > addressSpace.size) // We don't want to map past the end of the address space
|
||||
if (alignedStart + baseSize > addressSpace.size()) // We don't want to map past the end of the address space
|
||||
break;
|
||||
} while ((line = maps.find_first_of('\n', line)) != std::string::npos && line++);
|
||||
|
||||
if (!base.address)
|
||||
if (!base.valid())
|
||||
throw exception("Cannot find a suitable carveout for the guest address space");
|
||||
|
||||
memoryFd = static_cast<int>(syscall(__NR_memfd_create, "HOS-AS", MFD_CLOEXEC)); // We need to use memfd directly as ASharedMemory doesn't always use it while we depend on it for FreeMemory (using FALLOC_FL_PUNCH_HOLE) to work
|
||||
if (memoryFd == -1)
|
||||
throw exception("Failed to create memfd for guest address space: {}", strerror(errno));
|
||||
|
||||
if (ftruncate(memoryFd, static_cast<off_t>(base.size)) == -1)
|
||||
if (ftruncate(memoryFd, static_cast<off_t>(base.size())) == -1)
|
||||
throw exception("Failed to resize memfd for guest address space: {}", strerror(errno));
|
||||
|
||||
auto result{mmap(reinterpret_cast<void *>(base.address), base.size, PROT_WRITE, MAP_FIXED | MAP_SHARED, memoryFd, 0)};
|
||||
auto result{mmap(reinterpret_cast<void *>(base.data()), base.size(), PROT_WRITE, MAP_FIXED | MAP_SHARED, memoryFd, 0)};
|
||||
if (result == MAP_FAILED)
|
||||
throw exception("Failed to mmap guest address space: {}", strerror(errno));
|
||||
|
||||
chunks = {
|
||||
ChunkDescriptor{
|
||||
.ptr = reinterpret_cast<u8 *>(addressSpace.address),
|
||||
.size = base.address - addressSpace.address,
|
||||
.ptr = addressSpace.data(),
|
||||
.size = static_cast<size_t>(base.data() - addressSpace.data()),
|
||||
.state = memory::states::Reserved,
|
||||
},
|
||||
ChunkDescriptor{
|
||||
.ptr = reinterpret_cast<u8 *>(base.address),
|
||||
.size = base.size,
|
||||
.ptr = base.data(),
|
||||
.size = base.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
},
|
||||
ChunkDescriptor{
|
||||
.ptr = reinterpret_cast<u8 *>(base.address + base.size),
|
||||
.size = addressSpace.size - (base.address + base.size),
|
||||
.ptr = base.end().base(),
|
||||
.size = addressSpace.size() - reinterpret_cast<u64>(base.end().base()),
|
||||
.state = memory::states::Reserved,
|
||||
}};
|
||||
}
|
||||
|
||||
void MemoryManager::InitializeRegions(u8 *codeStart, u64 size) {
|
||||
u64 address{reinterpret_cast<u64>(codeStart)};
|
||||
if (!util::IsAligned(address, RegionAlignment))
|
||||
throw exception("Non-aligned code region was used to initialize regions: 0x{:X} - 0x{:X}", codeStart, codeStart + size);
|
||||
void MemoryManager::InitializeRegions(span<u8> codeRegion) {
|
||||
if (!util::IsAligned(codeRegion.data(), RegionAlignment))
|
||||
throw exception("Non-aligned code region was used to initialize regions: 0x{:X} - 0x{:X}", codeRegion.data(), codeRegion.end().base());
|
||||
|
||||
switch (addressSpace.size) {
|
||||
switch (addressSpace.size()) {
|
||||
case 1UL << 36: {
|
||||
code.address = 0x800000;
|
||||
code.size = 0x78000000;
|
||||
if (code.address > address || (code.size - (address - code.address)) < size)
|
||||
code = span<u8>{reinterpret_cast<u8 *>(0x800000), 0x78000000};
|
||||
if (code.data() > codeRegion.data() || (code.end().base() < codeRegion.end().base()))
|
||||
throw exception("Code mapping larger than 36-bit code region");
|
||||
alias.address = code.address + code.size;
|
||||
alias.size = 0x180000000;
|
||||
stack.address = alias.address + alias.size;
|
||||
stack.size = 0x78000000;
|
||||
alias = span<u8>{code.end().base(), 0x180000000};
|
||||
stack = span<u8>{alias.end().base(), 0x78000000};
|
||||
tlsIo = stack; //!< TLS/IO is shared with Stack on 36-bit
|
||||
heap.address = stack.address + stack.size;
|
||||
heap.size = 0x180000000;
|
||||
heap = span<u8>{stack.end().base(), 0x180000000};
|
||||
break;
|
||||
}
|
||||
|
||||
case 1UL << 39: {
|
||||
code.address = base.address;
|
||||
code.size = util::AlignUp(size, RegionAlignment);
|
||||
alias.address = code.address + code.size;
|
||||
alias.size = 0x1000000000;
|
||||
heap.address = alias.address + alias.size;
|
||||
heap.size = 0x180000000;
|
||||
stack.address = heap.address + heap.size;
|
||||
stack.size = 0x80000000;
|
||||
tlsIo.address = stack.address + stack.size;
|
||||
tlsIo.size = 0x1000000000;
|
||||
code = span<u8>{base.data(), util::AlignUp(codeRegion.size(), RegionAlignment)};
|
||||
alias = span<u8>{code.end().base(), 0x1000000000};
|
||||
heap = span<u8>{alias.end().base(), 0x180000000};
|
||||
stack = span<u8>{heap.end().base(), 0x80000000};
|
||||
tlsIo = span<u8>{stack.end().base(), 0x1000000000};
|
||||
break;
|
||||
}
|
||||
|
||||
@ -131,33 +120,31 @@ namespace skyline::kernel {
|
||||
throw exception("Regions initialized without VMM initialization");
|
||||
}
|
||||
|
||||
auto newSize{code.size + alias.size + stack.size + heap.size + ((addressSpace.size == 1UL << 39) ? tlsIo.size : 0)};
|
||||
if (newSize > base.size)
|
||||
throw exception("Guest VMM size has exceeded host carveout size: 0x{:X}/0x{:X} (Code: 0x{:X}/0x{:X})", newSize, base.size, code.size, CodeRegionSize);
|
||||
if (newSize != base.size)
|
||||
munmap(reinterpret_cast<u8 *>(base.address) + base.size, newSize - base.size);
|
||||
auto newSize{code.size() + alias.size() + stack.size() + heap.size() + ((addressSpace.size() == 1UL << 39) ? tlsIo.size() : 0)};
|
||||
if (newSize > base.size())
|
||||
throw exception("Guest VMM size has exceeded host carveout size: 0x{:X}/0x{:X} (Code: 0x{:X}/0x{:X})", newSize, base.size(), code.size(), CodeRegionSize);
|
||||
if (newSize != base.size())
|
||||
munmap(base.end().base(), newSize - base.size());
|
||||
|
||||
if (size > code.size)
|
||||
throw exception("Code region ({}) is smaller than mapped code size ({})", code.size, size);
|
||||
if (codeRegion.size() > code.size())
|
||||
throw exception("Code region ({}) is smaller than mapped code size ({})", code.size(), codeRegion.size());
|
||||
|
||||
Logger::Debug("Region Map:\nVMM Base: 0x{:X}\nCode Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nAlias Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nHeap Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nStack Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nTLS/IO Region: 0x{:X} - 0x{:X} (Size: 0x{:X})", base.address, code.address, code.address + code.size, code.size, alias.address, alias.address + alias.size, alias.size, heap.address, heap
|
||||
.address + heap.size, heap.size, stack.address, stack.address + stack.size, stack.size, tlsIo.address, tlsIo.address + tlsIo.size, tlsIo.size);
|
||||
Logger::Debug("Region Map:\nVMM Base: 0x{:X}\nCode Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nAlias Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nHeap Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nStack Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nTLS/IO Region: 0x{:X} - 0x{:X} (Size: 0x{:X})", base.data(), code.data(), code.end().base(), code.size(), alias.data(), alias.end().base(), alias.size(), heap.data(), heap.end().base(), heap.size(), stack.data(), stack.end().base(), stack.size(), tlsIo.data(), tlsIo.end().base(), tlsIo.size());
|
||||
}
|
||||
|
||||
span<u8> MemoryManager::CreateMirror(u8 *pointer, size_t size) {
|
||||
auto address{reinterpret_cast<u64>(pointer)};
|
||||
if (address < base.address || address + size > base.address + base.size)
|
||||
throw exception("Mapping is outside of VMM base: 0x{:X} - 0x{:X}", address, address + size);
|
||||
span<u8> MemoryManager::CreateMirror(span<u8> mapping) {
|
||||
if (mapping.data() < base.data() || mapping.end().base() > base.end().base())
|
||||
throw exception("Mapping is outside of VMM base: 0x{:X} - 0x{:X}", mapping.data(), mapping.end().base());
|
||||
|
||||
size_t offset{address - base.address};
|
||||
if (!util::IsPageAligned(offset) || !util::IsPageAligned(size))
|
||||
throw exception("Mapping is not aligned to a page: 0x{:X}-0x{:X} (0x{:X})", address, address + size, offset);
|
||||
auto offset{static_cast<size_t>(mapping.data() - base.data())};
|
||||
if (!util::IsPageAligned(offset) || !util::IsPageAligned(mapping.size()))
|
||||
throw exception("Mapping is not aligned to a page: 0x{:X}-0x{:X} (0x{:X})", mapping.data(), mapping.end().base(), offset);
|
||||
|
||||
auto mirror{mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED, memoryFd, static_cast<off_t>(offset))};
|
||||
auto mirror{mmap(nullptr, mapping.size(), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED, memoryFd, static_cast<off_t>(offset))};
|
||||
if (mirror == MAP_FAILED)
|
||||
throw exception("Failed to create mirror mapping at 0x{:X}-0x{:X} (0x{:X}): {}", address, address + size, offset, strerror(errno));
|
||||
throw exception("Failed to create mirror mapping at 0x{:X}-0x{:X} (0x{:X}): {}", mapping.data(), mapping.end().base(), offset, strerror(errno));
|
||||
|
||||
return span<u8>{reinterpret_cast<u8 *>(mirror), size};
|
||||
return span<u8>{reinterpret_cast<u8 *>(mirror), mapping.size()};
|
||||
}
|
||||
|
||||
span<u8> MemoryManager::CreateMirrors(const std::vector<span<u8>> ®ions) {
|
||||
@ -171,17 +158,16 @@ namespace skyline::kernel {
|
||||
|
||||
size_t mirrorOffset{};
|
||||
for (const auto ®ion : regions) {
|
||||
auto address{reinterpret_cast<u64>(region.data())};
|
||||
if (address < base.address || address + region.size() > base.address + base.size)
|
||||
throw exception("Mapping is outside of VMM base: 0x{:X} - 0x{:X}", address, address + region.size());
|
||||
if (region.data() < base.data() || region.end().base() > base.end().base())
|
||||
throw exception("Mapping is outside of VMM base: 0x{:X} - 0x{:X}", region.data(), region.end().base());
|
||||
|
||||
size_t offset{address - base.address};
|
||||
auto offset{static_cast<size_t>(region.data() - base.data())};
|
||||
if (!util::IsPageAligned(offset) || !util::IsPageAligned(region.size()))
|
||||
throw exception("Mapping is not aligned to a page: 0x{:X}-0x{:X} (0x{:X})", address, address + region.size(), offset);
|
||||
throw exception("Mapping is not aligned to a page: 0x{:X}-0x{:X} (0x{:X})", region.data(), region.end().base(), offset);
|
||||
|
||||
auto mirror{mmap(reinterpret_cast<u8 *>(mirrorBase) + mirrorOffset, region.size(), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED, memoryFd, static_cast<off_t>(offset))};
|
||||
if (mirror == MAP_FAILED)
|
||||
throw exception("Failed to create mirror mapping at 0x{:X}-0x{:X} (0x{:X}): {}", address, address + region.size(), offset, strerror(errno));
|
||||
throw exception("Failed to create mirror mapping at 0x{:X}-0x{:X} (0x{:X}): {}", region.data(), region.end().base(), offset, strerror(errno));
|
||||
|
||||
mirrorOffset += region.size();
|
||||
}
|
||||
@ -192,18 +178,17 @@ namespace skyline::kernel {
|
||||
return span<u8>{reinterpret_cast<u8 *>(mirrorBase), totalSize};
|
||||
}
|
||||
|
||||
void MemoryManager::FreeMemory(u8 *pointer, size_t size) {
|
||||
auto address{reinterpret_cast<u64>(pointer)};
|
||||
if (address < base.address || address + size > base.address + base.size)
|
||||
throw exception("Mapping is outside of VMM base: 0x{:X} - 0x{:X}", address, address + size);
|
||||
void MemoryManager::FreeMemory(span<u8> memory) {
|
||||
if (memory.data() < base.data() || memory.end().base() > base.end().base())
|
||||
throw exception("Mapping is outside of VMM base: 0x{:X} - 0x{:X}", memory.data(), memory.end().base());
|
||||
|
||||
size_t offset{address - base.address};
|
||||
if (!util::IsPageAligned(offset) || !util::IsPageAligned(size))
|
||||
throw exception("Mapping is not aligned to a page: 0x{:X}-0x{:X} (0x{:X})", address, address + size, offset);
|
||||
auto offset{static_cast<size_t>(memory.data() - base.data())};
|
||||
if (!util::IsPageAligned(offset) || !util::IsPageAligned(memory.size()))
|
||||
throw exception("Mapping is not aligned to a page: 0x{:X}-0x{:X} (0x{:X})", memory.data(), memory.end().base(), offset);
|
||||
|
||||
// We need to use fallocate(FALLOC_FL_PUNCH_HOLE) to free the backing memory rather than madvise(MADV_REMOVE) as the latter fails when the memory doesn't have write permissions, we generally need to free memory after reprotecting it to disallow accesses between the two calls which would cause UB
|
||||
if (fallocate(*memoryFd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, static_cast<off_t>(offset), static_cast<off_t>(size)) != 0)
|
||||
throw exception("Failed to free memory at 0x{:X}-0x{:X} (0x{:X}): {}", address, address + size, offset, strerror(errno));
|
||||
if (fallocate(*memoryFd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, static_cast<off_t>(offset), static_cast<off_t>(memory.size())) != 0)
|
||||
throw exception("Failed to free memory at 0x{:X}-0x{:X} (0x{:X}): {}", memory.data(), memory.end().base(), offset, strerror(errno));
|
||||
}
|
||||
|
||||
void MemoryManager::InsertChunk(const ChunkDescriptor &chunk) {
|
||||
@ -275,7 +260,7 @@ namespace skyline::kernel {
|
||||
for (const auto &chunk : chunks)
|
||||
if (chunk.state == memory::states::Heap)
|
||||
size += chunk.size;
|
||||
return size + code.size + state.process->mainThreadStack->size;
|
||||
return size + code.size() + state.process->mainThreadStack->guest.size();
|
||||
}
|
||||
|
||||
size_t MemoryManager::GetSystemResourceUsage() {
|
||||
|
@ -180,15 +180,6 @@ namespace skyline {
|
||||
constexpr MemoryState CodeWritable{0x00402015};
|
||||
}
|
||||
|
||||
struct Region {
|
||||
u64 address;
|
||||
size_t size;
|
||||
|
||||
bool IsInside(void *ptr) {
|
||||
return (address <= reinterpret_cast<u64>(ptr)) && ((address + size) > reinterpret_cast<u64>(ptr));
|
||||
}
|
||||
};
|
||||
|
||||
enum class AddressSpaceType : u8 {
|
||||
AddressSpace32Bit = 0, //!< 32-bit address space used by 32-bit applications
|
||||
AddressSpace36Bit = 1, //!< 36-bit address space used by 64-bit applications before 2.0.0
|
||||
@ -219,13 +210,13 @@ namespace skyline {
|
||||
std::vector<ChunkDescriptor> chunks;
|
||||
|
||||
public:
|
||||
memory::Region addressSpace{}; //!< The entire address space
|
||||
memory::Region base{}; //!< The application-accessible address space
|
||||
memory::Region code{};
|
||||
memory::Region alias{};
|
||||
memory::Region heap{};
|
||||
memory::Region stack{};
|
||||
memory::Region tlsIo{}; //!< TLS/IO
|
||||
span<u8> addressSpace{}; //!< The entire address space
|
||||
span<u8> base{}; //!< The application-accessible address space
|
||||
span<u8> code{};
|
||||
span<u8> alias{};
|
||||
span<u8> heap{};
|
||||
span<u8> stack{};
|
||||
span<u8> tlsIo{}; //!< TLS/IO
|
||||
|
||||
FileDescriptor memoryFd{}; //!< The file descriptor of the memory backing for the entire guest address space
|
||||
|
||||
@ -240,14 +231,14 @@ namespace skyline {
|
||||
*/
|
||||
void InitializeVmm(memory::AddressSpaceType type);
|
||||
|
||||
void InitializeRegions(u8 *codeStart, u64 size);
|
||||
void InitializeRegions(span<u8> codeRegion);
|
||||
|
||||
/**
|
||||
* @brief Mirrors a page-aligned mapping in the guest address space to the host address space
|
||||
* @return A span to the host address space mirror mapped as RWX, unmapping it is the responsibility of the caller
|
||||
* @note The supplied mapping **must** be page-aligned and inside the guest address space
|
||||
*/
|
||||
span<u8> CreateMirror(u8* pointer, size_t size);
|
||||
span<u8> CreateMirror(span<u8> mapping);
|
||||
|
||||
/**
|
||||
* @brief Mirrors multiple page-aligned mapping in the guest address space to the host address space
|
||||
@ -256,13 +247,13 @@ namespace skyline {
|
||||
* @note The supplied mapping **must** be page-aligned and inside the guest address space
|
||||
* @note If a single mapping is mirrored, it is recommended to use CreateMirror instead
|
||||
*/
|
||||
span<u8> CreateMirrors(const std::vector<span<u8>>& regions);
|
||||
span<u8> CreateMirrors(const std::vector<span<u8>> ®ions);
|
||||
|
||||
/**
|
||||
* @brief Frees the underlying physical memory for a page-aligned mapping in the guest address space
|
||||
* @note All subsequent accesses to freed memory will return 0s
|
||||
*/
|
||||
void FreeMemory(u8* pointer, size_t size);
|
||||
void FreeMemory(span<u8> memory);
|
||||
|
||||
void InsertChunk(const ChunkDescriptor &chunk);
|
||||
|
||||
|
@ -25,9 +25,9 @@ namespace skyline::kernel::svc {
|
||||
heap->Resize(size);
|
||||
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
state.ctx->gpr.x1 = reinterpret_cast<u64>(heap->ptr);
|
||||
state.ctx->gpr.x1 = reinterpret_cast<u64>(heap->guest.data());
|
||||
|
||||
Logger::Debug("Allocated at 0x{:X} - 0x{:X} (0x{:X} bytes)", heap->ptr, heap->ptr + heap->size, heap->size);
|
||||
Logger::Debug("Allocated at 0x{:X} - 0x{:X} (0x{:X} bytes)", heap->guest.data(), heap->guest.end().base(), heap->guest.size());
|
||||
}
|
||||
|
||||
void SetMemoryAttribute(const DeviceState &state) {
|
||||
@ -96,7 +96,7 @@ namespace skyline::kernel::svc {
|
||||
}
|
||||
|
||||
auto stack{state.process->memory.stack};
|
||||
if (!stack.IsInside(destination)) {
|
||||
if (!stack.contains(span<u8>{destination, size})) {
|
||||
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
|
||||
Logger::Warn("Destination not within stack region: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
||||
return;
|
||||
@ -114,13 +114,13 @@ namespace skyline::kernel::svc {
|
||||
return;
|
||||
}
|
||||
|
||||
state.process->NewHandle<type::KPrivateMemory>(destination, size, chunk->permission, memory::states::Stack);
|
||||
state.process->NewHandle<type::KPrivateMemory>(span<u8>{destination, size}, chunk->permission, memory::states::Stack);
|
||||
std::memcpy(destination, source, size);
|
||||
|
||||
auto object{state.process->GetMemoryObject(source)};
|
||||
if (!object)
|
||||
throw exception("svcMapMemory: Cannot find memory object in handle table for address 0x{:X}", source);
|
||||
object->item->UpdatePermission(source, size, {false, false, false});
|
||||
object->item->UpdatePermission(span<u8>{source, size}, {false, false, false});
|
||||
|
||||
Logger::Debug("Mapped range 0x{:X} - 0x{:X} to 0x{:X} - 0x{:X} (Size: 0x{:X} bytes)", source, source + size, destination, destination + size, size);
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
@ -144,7 +144,7 @@ namespace skyline::kernel::svc {
|
||||
}
|
||||
|
||||
auto stack{state.process->memory.stack};
|
||||
if (!stack.IsInside(source)) {
|
||||
if (!stack.contains(span<u8>{source, size})) {
|
||||
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
|
||||
Logger::Warn("Source not within stack region: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
||||
return;
|
||||
@ -168,7 +168,7 @@ namespace skyline::kernel::svc {
|
||||
if (!destObject)
|
||||
throw exception("svcUnmapMemory: Cannot find destination memory object in handle table for address 0x{:X}", destination);
|
||||
|
||||
destObject->item->UpdatePermission(destination, size, sourceChunk->permission);
|
||||
destObject->item->UpdatePermission(span<u8>{destination, size}, sourceChunk->permission);
|
||||
|
||||
std::memcpy(source, destination, size);
|
||||
|
||||
@ -201,7 +201,7 @@ namespace skyline::kernel::svc {
|
||||
|
||||
Logger::Debug("Address: 0x{:X}, Region Start: 0x{:X}, Size: 0x{:X}, Type: 0x{:X}, Is Uncached: {}, Permissions: {}{}{}", pointer, memInfo.address, memInfo.size, memInfo.type, static_cast<bool>(chunk->attributes.isUncached), chunk->permission.r ? 'R' : '-', chunk->permission.w ? 'W' : '-', chunk->permission.x ? 'X' : '-');
|
||||
} else {
|
||||
auto addressSpaceEnd{reinterpret_cast<u64>(state.process->memory.addressSpace.address + state.process->memory.addressSpace.size)};
|
||||
auto addressSpaceEnd{reinterpret_cast<u64>(state.process->memory.addressSpace.end().base())};
|
||||
|
||||
memInfo = {
|
||||
.address = addressSpaceEnd,
|
||||
@ -499,7 +499,7 @@ namespace skyline::kernel::svc {
|
||||
|
||||
Logger::Debug("Mapping shared memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes) ({}{}{})", handle, pointer, pointer + size, size, permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||
|
||||
object->Map(pointer, size, permission);
|
||||
object->Map(span<u8>{pointer, size}, permission);
|
||||
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
} catch (const std::out_of_range &) {
|
||||
@ -529,7 +529,7 @@ namespace skyline::kernel::svc {
|
||||
|
||||
Logger::Debug("Unmapping shared memory (0x{:X}) at 0x{:X} - 0x{:X} (0x{:X} bytes)", handle, pointer, pointer + size, size);
|
||||
|
||||
object->Unmap(pointer, size);
|
||||
object->Unmap(span<u8>{pointer, size});
|
||||
|
||||
state.ctx->gpr.w0 = Result{};
|
||||
} catch (const std::out_of_range &) {
|
||||
@ -932,23 +932,23 @@ namespace skyline::kernel::svc {
|
||||
break;
|
||||
|
||||
case InfoState::AliasRegionBaseAddr:
|
||||
out = state.process->memory.alias.address;
|
||||
out = reinterpret_cast<u64>(state.process->memory.alias.data());
|
||||
break;
|
||||
|
||||
case InfoState::AliasRegionSize:
|
||||
out = state.process->memory.alias.size;
|
||||
out = state.process->memory.alias.size();
|
||||
break;
|
||||
|
||||
case InfoState::HeapRegionBaseAddr:
|
||||
out = state.process->memory.heap.address;
|
||||
out = reinterpret_cast<u64>(state.process->memory.heap.data());
|
||||
break;
|
||||
|
||||
case InfoState::HeapRegionSize:
|
||||
out = state.process->memory.heap.size;
|
||||
out = state.process->memory.heap.size();
|
||||
break;
|
||||
|
||||
case InfoState::TotalMemoryAvailable:
|
||||
out = std::min(totalPhysicalMemory, state.process->memory.heap.size);
|
||||
out = std::min(totalPhysicalMemory, state.process->memory.heap.size());
|
||||
break;
|
||||
|
||||
case InfoState::TotalMemoryUsage:
|
||||
@ -960,19 +960,19 @@ namespace skyline::kernel::svc {
|
||||
break;
|
||||
|
||||
case InfoState::AddressSpaceBaseAddr:
|
||||
out = state.process->memory.base.address;
|
||||
out = reinterpret_cast<u64>(state.process->memory.base.data());
|
||||
break;
|
||||
|
||||
case InfoState::AddressSpaceSize:
|
||||
out = state.process->memory.base.size;
|
||||
out = state.process->memory.base.size();
|
||||
break;
|
||||
|
||||
case InfoState::StackRegionBaseAddr:
|
||||
out = state.process->memory.stack.address;
|
||||
out = reinterpret_cast<u64>(state.process->memory.stack.data());
|
||||
break;
|
||||
|
||||
case InfoState::StackRegionSize:
|
||||
out = state.process->memory.stack.size;
|
||||
out = state.process->memory.stack.size();
|
||||
break;
|
||||
|
||||
case InfoState::TotalSystemResourceAvailable:
|
||||
@ -989,7 +989,7 @@ namespace skyline::kernel::svc {
|
||||
break;
|
||||
|
||||
case InfoState::TotalMemoryAvailableWithoutSystemResource:
|
||||
out = std::min(totalPhysicalMemory, state.process->memory.heap.size) - state.process->npdm.meta.systemResourceSize;
|
||||
out = std::min(totalPhysicalMemory, state.process->memory.heap.size()) - state.process->npdm.meta.systemResourceSize;
|
||||
break;
|
||||
|
||||
case InfoState::TotalMemoryUsageWithoutSystemResource:
|
||||
@ -1028,13 +1028,13 @@ namespace skyline::kernel::svc {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!state.process->memory.alias.IsInside(pointer) || !state.process->memory.alias.IsInside(pointer + size)) {
|
||||
if (!state.process->memory.alias.contains(span<u8>{pointer, size})) {
|
||||
Logger::Warn("Memory region 0x{:X} - 0x{:X} (0x{:X}) is invalid", pointer, pointer + size, size);
|
||||
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
|
||||
return;
|
||||
}
|
||||
|
||||
state.process->NewHandle<type::KPrivateMemory>(pointer, size, memory::Permission{true, true, false}, memory::states::Heap);
|
||||
state.process->NewHandle<type::KPrivateMemory>(span<u8>{pointer, size}, memory::Permission{true, true, false}, memory::states::Heap);
|
||||
|
||||
Logger::Debug("Mapped physical memory at 0x{:X} - 0x{:X} (0x{:X})", pointer, pointer + size, size);
|
||||
|
||||
@ -1057,7 +1057,7 @@ namespace skyline::kernel::svc {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!state.process->memory.alias.IsInside(pointer) || !state.process->memory.alias.IsInside(pointer + size)) {
|
||||
if (!state.process->memory.alias.contains(span<u8>{pointer, size})) {
|
||||
Logger::Warn("Memory region 0x{:X} - 0x{:X} (0x{:X}) is invalid", pointer, pointer + size, size);
|
||||
state.ctx->gpr.w0 = result::InvalidMemoryRegion;
|
||||
return;
|
||||
@ -1070,17 +1070,17 @@ namespace skyline::kernel::svc {
|
||||
auto memory{state.process->GetMemoryObject(pointer)};
|
||||
if (memory) {
|
||||
auto item{static_pointer_cast<type::KPrivateMemory>(memory->item)};
|
||||
auto initialSize{item->size};
|
||||
auto initialSize{item->guest.size()};
|
||||
if (item->memoryState == memory::states::Heap) {
|
||||
if (item->ptr >= pointer) {
|
||||
if (item->size <= size) {
|
||||
if (item->guest.data() >= pointer) {
|
||||
if (item->guest.size() <= size) {
|
||||
item->Resize(0);
|
||||
state.process->CloseHandle(memory->handle);
|
||||
} else {
|
||||
item->Remap(pointer + size, item->size - (size + static_cast<size_t>(item->ptr - pointer)));
|
||||
item->Remap(span<u8>{pointer + size, static_cast<size_t>((pointer + item->guest.size() - item->guest.data())) - size});
|
||||
}
|
||||
} else if (item->ptr < pointer) {
|
||||
item->Resize(static_cast<size_t>(pointer - item->ptr));
|
||||
} else if (item->guest.data() < pointer) {
|
||||
item->Resize(static_cast<size_t>(pointer - item->guest.data()));
|
||||
}
|
||||
}
|
||||
pointer += initialSize;
|
||||
@ -1185,7 +1185,7 @@ namespace skyline::kernel::svc {
|
||||
auto &context{*reinterpret_cast<ThreadContext *>(state.ctx->gpr.x0)};
|
||||
context = {}; // Zero-initialize the contents of the context as not all fields are set
|
||||
|
||||
auto& targetContext{thread->ctx};
|
||||
auto &targetContext{thread->ctx};
|
||||
for (size_t i{}; i < targetContext.gpr.regs.size(); i++)
|
||||
context.gpr[i] = targetContext.gpr.regs[i];
|
||||
|
||||
|
@ -12,12 +12,12 @@ namespace skyline::kernel::type {
|
||||
*/
|
||||
class KMemory : public KObject {
|
||||
public:
|
||||
KMemory(const DeviceState &state, KType objectType) : KObject(state, objectType) {}
|
||||
KMemory(const DeviceState &state, KType objectType, span <u8> guest) : KObject(state, objectType), guest(guest) {}
|
||||
|
||||
/**
|
||||
* @return A span representing the memory object on the guest
|
||||
*/
|
||||
virtual span<u8> Get() = 0;
|
||||
span <u8> guest;
|
||||
|
||||
/**
|
||||
* @brief Updates the permissions of a block of mapped memory
|
||||
@ -25,12 +25,7 @@ namespace skyline::kernel::type {
|
||||
* @param size The size of the partition to change the permissions of
|
||||
* @param permission The new permissions to be set for the memory
|
||||
*/
|
||||
virtual void UpdatePermission(u8 *ptr, size_t size, memory::Permission permission) = 0;
|
||||
|
||||
bool IsInside(u8 *ptr) {
|
||||
auto spn{Get()};
|
||||
return (spn.data() <= ptr) && ((spn.data() + spn.size()) > ptr);
|
||||
}
|
||||
virtual void UpdatePermission(span <u8> map, memory::Permission permission) = 0;
|
||||
|
||||
virtual ~KMemory() = default;
|
||||
};
|
||||
|
@ -8,87 +8,84 @@
|
||||
#include "KProcess.h"
|
||||
|
||||
namespace skyline::kernel::type {
|
||||
KPrivateMemory::KPrivateMemory(const DeviceState &state, u8 *ptr, size_t size, memory::Permission permission, memory::MemoryState memState)
|
||||
: ptr(ptr),
|
||||
size(size),
|
||||
permission(permission),
|
||||
KPrivateMemory::KPrivateMemory(const DeviceState &state, span<u8> guest, memory::Permission permission, memory::MemoryState memState)
|
||||
: permission(permission),
|
||||
memoryState(memState),
|
||||
KMemory(state, KType::KPrivateMemory) {
|
||||
if (!state.process->memory.base.IsInside(ptr) || !state.process->memory.base.IsInside(ptr + size))
|
||||
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", ptr, ptr + size);
|
||||
if (!util::IsPageAligned(ptr) || !util::IsPageAligned(size))
|
||||
throw exception("KPrivateMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", ptr, ptr + size, size);
|
||||
KMemory(state, KType::KPrivateMemory, guest) {
|
||||
if (!state.process->memory.base.contains(guest))
|
||||
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", guest.data(), guest.data() + guest.size());
|
||||
if (!util::IsPageAligned(guest.data()) || !util::IsPageAligned(guest.size()))
|
||||
throw exception("KPrivateMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", guest.data(), guest.data() + guest.size());
|
||||
|
||||
if (mprotect(ptr, size, PROT_READ | PROT_WRITE | PROT_EXEC) < 0) // We only need to reprotect as the allocation has already been reserved by the MemoryManager
|
||||
throw exception("An occurred while mapping private memory: {} with 0x{:X} @ 0x{:X}", strerror(errno), ptr, size);
|
||||
if (mprotect(guest.data(), guest.size(), PROT_READ | PROT_WRITE | PROT_EXEC) < 0) // We only need to reprotect as the allocation has already been reserved by the MemoryManager
|
||||
throw exception("An occurred while mapping private memory: {} with 0x{:X} @ 0x{:X}", strerror(errno), guest.data(), guest.size());
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = ptr,
|
||||
.size = size,
|
||||
.ptr = guest.data(),
|
||||
.size = guest.size(),
|
||||
.permission = permission,
|
||||
.state = memState,
|
||||
});
|
||||
}
|
||||
|
||||
void KPrivateMemory::Resize(size_t nSize) {
|
||||
if (mprotect(ptr, nSize, PROT_READ | PROT_WRITE | PROT_EXEC) < 0)
|
||||
if (mprotect(guest.data(), nSize, PROT_READ | PROT_WRITE | PROT_EXEC) < 0)
|
||||
throw exception("An occurred while resizing private memory: {}", strerror(errno));
|
||||
|
||||
if (nSize < size) {
|
||||
if (nSize < guest.size()) {
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = ptr + nSize,
|
||||
.size = size - nSize,
|
||||
.ptr = guest.data() + nSize,
|
||||
.size = guest.size() - nSize,
|
||||
.state = memory::states::Unmapped,
|
||||
});
|
||||
} else if (size < nSize) {
|
||||
} else if (guest.size() < nSize) {
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = ptr + size,
|
||||
.size = nSize - size,
|
||||
.ptr = guest.data() + guest.size(),
|
||||
.size = nSize - guest.size(),
|
||||
.permission = permission,
|
||||
.state = memoryState,
|
||||
});
|
||||
}
|
||||
|
||||
size = nSize;
|
||||
guest = span<u8>{guest.data(), nSize};
|
||||
}
|
||||
|
||||
void KPrivateMemory::Remap(u8 *nPtr, size_t nSize) {
|
||||
if (!state.process->memory.base.IsInside(nPtr) || !state.process->memory.base.IsInside(nPtr + nSize))
|
||||
throw exception("KPrivateMemory remapping isn't inside guest address space: 0x{:X} - 0x{:X}", nPtr, nPtr + nSize);
|
||||
if (!util::IsPageAligned(nPtr) || !util::IsPageAligned(nSize))
|
||||
throw exception("KPrivateMemory remapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", nPtr, nPtr + nSize, nSize);
|
||||
void KPrivateMemory::Remap(span<u8> map) {
|
||||
if (!state.process->memory.base.contains(map))
|
||||
throw exception("KPrivateMemory remapping isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
|
||||
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size()))
|
||||
throw exception("KPrivateMemory remapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size());
|
||||
|
||||
if (mprotect(ptr, size, PROT_NONE) < 0)
|
||||
if (mprotect(guest.data(), guest.size(), PROT_NONE) < 0)
|
||||
throw exception("An occurred while remapping private memory: {}", strerror(errno));
|
||||
|
||||
if (mprotect(nPtr, nSize, PROT_READ | PROT_WRITE | PROT_EXEC) < 0)
|
||||
if (mprotect(map.data(), map.size(), PROT_READ | PROT_WRITE | PROT_EXEC) < 0)
|
||||
throw exception("An occurred while remapping private memory: {}", strerror(errno));
|
||||
}
|
||||
|
||||
void KPrivateMemory::UpdatePermission(u8 *pPtr, size_t pSize, memory::Permission pPermission) {
|
||||
pPtr = std::clamp(pPtr, ptr, ptr + size);
|
||||
pSize = std::min(pSize, static_cast<size_t>((ptr + size) - pPtr));
|
||||
void KPrivateMemory::UpdatePermission(span<u8> map, memory::Permission pPermission) {
|
||||
auto ptr{std::clamp(map.data(), guest.data(), guest.end().base())};
|
||||
auto size{std::min(map.size(), static_cast<size_t>((guest.end().base()) - ptr))};
|
||||
|
||||
if (pPtr && !util::IsPageAligned(pPtr))
|
||||
throw exception("KPrivateMemory permission updated with a non-page-aligned address: 0x{:X}", pPtr);
|
||||
if (ptr && !util::IsPageAligned(ptr))
|
||||
throw exception("KPrivateMemory permission updated with a non-page-aligned address: 0x{:X}", ptr);
|
||||
|
||||
// If a static code region has been mapped as writable it needs to be changed to mutable
|
||||
if (memoryState == memory::states::CodeStatic && pPermission.w)
|
||||
memoryState = memory::states::CodeMutable;
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = pPtr,
|
||||
.size = pSize,
|
||||
.ptr = ptr,
|
||||
.size = size,
|
||||
.permission = pPermission,
|
||||
.state = memoryState,
|
||||
});
|
||||
}
|
||||
|
||||
KPrivateMemory::~KPrivateMemory() {
|
||||
mprotect(ptr, size, PROT_NONE);
|
||||
mprotect(guest.data(), guest.size(), PROT_NONE);
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = ptr,
|
||||
.size = size,
|
||||
.ptr = guest.data(),
|
||||
.size = guest.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
});
|
||||
}
|
||||
|
@ -12,8 +12,6 @@ namespace skyline::kernel::type {
|
||||
*/
|
||||
class KPrivateMemory : public KMemory {
|
||||
public:
|
||||
u8 *ptr{};
|
||||
size_t size{};
|
||||
memory::Permission permission;
|
||||
memory::MemoryState memoryState;
|
||||
|
||||
@ -21,7 +19,7 @@ namespace skyline::kernel::type {
|
||||
* @param permission The permissions for the allocated memory (As reported to the application, host memory permissions aren't reflected by this)
|
||||
* @note 'ptr' needs to be in guest-reserved address space
|
||||
*/
|
||||
KPrivateMemory(const DeviceState &state, u8 *ptr, size_t size, memory::Permission permission, memory::MemoryState memState);
|
||||
KPrivateMemory(const DeviceState &state, span<u8> guest, memory::Permission permission, memory::MemoryState memState);
|
||||
|
||||
/**
|
||||
* @note There is no check regarding if any expansions will cause the memory mapping to leak into other mappings
|
||||
@ -32,13 +30,9 @@ namespace skyline::kernel::type {
|
||||
/**
|
||||
* @note This does not copy over anything, only contents of any overlapping regions will be retained
|
||||
*/
|
||||
void Remap(u8 *ptr, size_t size);
|
||||
void Remap(span<u8> map);
|
||||
|
||||
span<u8> Get() override {
|
||||
return span(ptr, size);
|
||||
}
|
||||
|
||||
void UpdatePermission(u8 *pPtr, size_t pSize, memory::Permission pPermission) override;
|
||||
void UpdatePermission(span<u8> map, memory::Permission pPermission) override;
|
||||
|
||||
/**
|
||||
* @brief The destructor of private memory, it deallocates the memory
|
||||
|
@ -13,7 +13,7 @@ namespace skyline::kernel::type {
|
||||
u8 *KProcess::TlsPage::ReserveSlot() {
|
||||
if (index == constant::TlsSlots)
|
||||
return nullptr;
|
||||
return memory->ptr + (constant::TlsSlotSize * index++);
|
||||
return memory->guest.data() + (constant::TlsSlotSize * index++);
|
||||
}
|
||||
|
||||
KProcess::KProcess(const DeviceState &state) : memory(state), KSyncObject(state, KType::KProcess) {}
|
||||
@ -46,7 +46,7 @@ namespace skyline::kernel::type {
|
||||
|
||||
void KProcess::InitializeHeapTls() {
|
||||
constexpr size_t DefaultHeapSize{0x200000};
|
||||
heap = std::make_shared<KPrivateMemory>(state, reinterpret_cast<u8 *>(state.process->memory.heap.address), DefaultHeapSize, memory::Permission{true, true, false}, memory::states::Heap);
|
||||
heap = std::make_shared<KPrivateMemory>(state, span<u8>{state.process->memory.heap.data(), DefaultHeapSize}, memory::Permission{true, true, false}, memory::states::Heap);
|
||||
InsertItem(heap); // Insert it into the handle table so GetMemoryObject will contain it
|
||||
tlsExceptionContext = AllocateTlsSlot();
|
||||
}
|
||||
@ -58,8 +58,8 @@ namespace skyline::kernel::type {
|
||||
if ((slot = tlsPage->ReserveSlot()))
|
||||
return slot;
|
||||
|
||||
slot = tlsPages.empty() ? reinterpret_cast<u8 *>(memory.tlsIo.address) : ((*(tlsPages.end() - 1))->memory->ptr + PAGE_SIZE);
|
||||
auto tlsPage{std::make_shared<TlsPage>(std::make_shared<KPrivateMemory>(state, slot, PAGE_SIZE, memory::Permission(true, true, false), memory::states::ThreadLocal))};
|
||||
slot = tlsPages.empty() ? reinterpret_cast<u8 *>(memory.tlsIo.data()) : ((*(tlsPages.end() - 1))->memory->guest.data() + PAGE_SIZE);
|
||||
auto tlsPage{std::make_shared<TlsPage>(std::make_shared<KPrivateMemory>(state, span<u8>{slot, PAGE_SIZE}, memory::Permission(true, true, false), memory::states::ThreadLocal))};
|
||||
tlsPages.push_back(tlsPage);
|
||||
return tlsPage->ReserveSlot();
|
||||
}
|
||||
@ -69,8 +69,8 @@ namespace skyline::kernel::type {
|
||||
if (disableThreadCreation)
|
||||
return nullptr;
|
||||
if (!stackTop && threads.empty()) { //!< Main thread stack is created by the kernel and owned by the process
|
||||
mainThreadStack = std::make_shared<KPrivateMemory>(state, reinterpret_cast<u8 *>(state.process->memory.stack.address), state.process->npdm.meta.mainThreadStackSize, memory::Permission{true, true, false}, memory::states::Stack);
|
||||
stackTop = mainThreadStack->ptr + mainThreadStack->size;
|
||||
mainThreadStack = std::make_shared<KPrivateMemory>(state, span<u8>{state.process->memory.stack.data(), state.process->npdm.meta.mainThreadStackSize}, memory::Permission{true, true, false}, memory::states::Stack);
|
||||
stackTop = mainThreadStack->guest.end().base();
|
||||
}
|
||||
auto thread{NewHandle<KThread>(this, threads.size(), entry, argument, stackTop, priority ? *priority : state.process->npdm.meta.mainThreadPriority, idealCore ? *idealCore : state.process->npdm.meta.idealCore).item};
|
||||
threads.push_back(thread);
|
||||
@ -88,7 +88,7 @@ namespace skyline::kernel::type {
|
||||
case type::KType::KSharedMemory:
|
||||
case type::KType::KTransferMemory: {
|
||||
auto mem{std::static_pointer_cast<type::KMemory>(object)};
|
||||
if (mem->IsInside(ptr))
|
||||
if (mem->guest.contains(ptr))
|
||||
return std::make_optional<KProcess::HandleOut<KMemory>>({mem, constant::BaseHandleIndex + index});
|
||||
}
|
||||
|
||||
|
@ -10,34 +10,34 @@
|
||||
namespace skyline::kernel::type {
|
||||
KSharedMemory::KSharedMemory(const DeviceState &state, size_t size, memory::MemoryState memState, KType type)
|
||||
: memoryState(memState),
|
||||
KMemory(state, type) {
|
||||
KMemory(state, type, span<u8>{}) {
|
||||
fd = ASharedMemory_create(type == KType::KSharedMemory ? "HOS-KSharedMemory" : "HOS-KTransferMemory", size);
|
||||
if (fd < 0)
|
||||
throw exception("An error occurred while creating shared memory: {}", fd);
|
||||
|
||||
host.ptr = static_cast<u8 *>(mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED, fd, 0));
|
||||
if (host.ptr == MAP_FAILED)
|
||||
auto hostPtr{static_cast<u8 *>(mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED, fd, 0))};
|
||||
if (hostPtr == MAP_FAILED)
|
||||
throw exception("An occurred while mapping shared memory: {}", strerror(errno));
|
||||
|
||||
host.size = size;
|
||||
host = span<u8>{hostPtr, size};
|
||||
}
|
||||
|
||||
u8 *KSharedMemory::Map(u8 *ptr, u64 size, memory::Permission permission) {
|
||||
if (!state.process->memory.base.IsInside(ptr) || !state.process->memory.base.IsInside(ptr + size))
|
||||
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", ptr, ptr + size);
|
||||
if (!util::IsPageAligned(ptr) || !util::IsPageAligned(size))
|
||||
throw exception("KSharedMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", ptr, ptr + size, size);
|
||||
if (guest.Valid())
|
||||
throw exception("Mapping KSharedMemory multiple times on guest is not supported: Requested Mapping: 0x{:X} - 0x{:X} (0x{:X}), Current Mapping: 0x{:X} - 0x{:X} (0x{:X})", ptr, ptr + size, size, guest.ptr, guest.ptr + guest.size, guest.size);
|
||||
u8 *KSharedMemory::Map(span<u8> map, memory::Permission permission) {
|
||||
if (!state.process->memory.base.contains(map))
|
||||
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
|
||||
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size()))
|
||||
throw exception("KSharedMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size());
|
||||
if (guest.valid())
|
||||
throw exception("Mapping KSharedMemory multiple times on guest is not supported: Requested Mapping: 0x{:X} - 0x{:X} (0x{:X}), Current Mapping: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size(), guest.data(), guest.end().base(), guest.size());
|
||||
|
||||
guest.ptr = static_cast<u8 *>(mmap(ptr, size, permission.Get(), MAP_SHARED | (ptr ? MAP_FIXED : 0), fd, 0));
|
||||
if (guest.ptr == MAP_FAILED)
|
||||
auto guestPtr{static_cast<u8 *>(mmap(map.data(), map.size(), permission.Get(), MAP_SHARED | (map.data() ? MAP_FIXED : 0), fd, 0))};
|
||||
if (guestPtr == MAP_FAILED)
|
||||
throw exception("An error occurred while mapping shared memory in guest: {}", strerror(errno));
|
||||
guest.size = size;
|
||||
guest = span<u8>{guestPtr, map.size()};
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = guest.ptr,
|
||||
.size = size,
|
||||
.ptr = guest.data(),
|
||||
.size = guest.size(),
|
||||
.permission = permission,
|
||||
.state = memoryState,
|
||||
.attributes = memory::MemoryAttribute{
|
||||
@ -45,41 +45,41 @@ namespace skyline::kernel::type {
|
||||
},
|
||||
});
|
||||
|
||||
return guest.ptr;
|
||||
return guest.data();
|
||||
}
|
||||
|
||||
void KSharedMemory::Unmap(u8 *ptr, u64 size) {
|
||||
void KSharedMemory::Unmap(span<u8> map) {
|
||||
auto &memoryManager{state.process->memory};
|
||||
if (!memoryManager.base.IsInside(ptr) || !memoryManager.base.IsInside(ptr + size))
|
||||
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", ptr, ptr + size);
|
||||
if (!util::IsPageAligned(ptr) || !util::IsPageAligned(size))
|
||||
throw exception("KSharedMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", ptr, ptr + size, size);
|
||||
if (guest.ptr != ptr && guest.size != size)
|
||||
throw exception("Unmapping KSharedMemory partially is not supported: Requested Unmap: 0x{:X} - 0x{:X} (0x{:X}), Current Mapping: 0x{:X} - 0x{:X} (0x{:X})", ptr, ptr + size, size, guest.ptr, guest.ptr + guest.size, guest.size);
|
||||
if (!memoryManager.base.contains(map))
|
||||
throw exception("KPrivateMemory allocation isn't inside guest address space: 0x{:X} - 0x{:X}", map.data(), map.end().base());
|
||||
if (!util::IsPageAligned(map.data()) || !util::IsPageAligned(map.size()))
|
||||
throw exception("KSharedMemory mapping isn't page-aligned: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size());
|
||||
if (guest.data() != map.data() && guest.size() != map.size())
|
||||
throw exception("Unmapping KSharedMemory partially is not supported: Requested Unmap: 0x{:X} - 0x{:X} (0x{:X}), Current Mapping: 0x{:X} - 0x{:X} (0x{:X})", map.data(), map.end().base(), map.size(), guest.data(), guest.end().base(), guest.size());
|
||||
|
||||
if (mmap(ptr, size, PROT_NONE, MAP_SHARED | MAP_FIXED, memoryManager.memoryFd, reinterpret_cast<off_t>(ptr - memoryManager.base.address)) == MAP_FAILED)
|
||||
if (mmap(map.data(), map.size(), PROT_NONE, MAP_SHARED | MAP_FIXED, memoryManager.memoryFd, reinterpret_cast<off_t>(map.data() - memoryManager.base.data())) == MAP_FAILED)
|
||||
throw exception("An error occurred while unmapping shared memory in guest: {}", strerror(errno));
|
||||
|
||||
guest = {};
|
||||
guest = span<u8>{};
|
||||
memoryManager.InsertChunk(ChunkDescriptor{
|
||||
.ptr = ptr,
|
||||
.size = size,
|
||||
.ptr = map.data(),
|
||||
.size = map.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
});
|
||||
}
|
||||
|
||||
void KSharedMemory::UpdatePermission(u8 *ptr, size_t size, memory::Permission permission) {
|
||||
if (ptr && !util::IsPageAligned(ptr))
|
||||
throw exception("KSharedMemory permission updated with a non-page-aligned address: 0x{:X}", ptr);
|
||||
void KSharedMemory::UpdatePermission(span<u8> map, memory::Permission permission) {
|
||||
if (map.valid() && !util::IsPageAligned(map.data()))
|
||||
throw exception("KSharedMemory permission updated with a non-page-aligned address: 0x{:X}", map.data());
|
||||
|
||||
if (guest.Valid()) {
|
||||
mprotect(ptr, size, permission.Get());
|
||||
if (guest.ptr == MAP_FAILED)
|
||||
if (guest.valid()) {
|
||||
mprotect(map.data(), map.size(), permission.Get());
|
||||
if (guest.data() == MAP_FAILED)
|
||||
throw exception("An error occurred while updating shared memory's permissions in guest: {}", strerror(errno));
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = ptr,
|
||||
.size = size,
|
||||
.ptr = map.data(),
|
||||
.size = map.size(),
|
||||
.permission = permission,
|
||||
.state = memoryState,
|
||||
.attributes = memory::MemoryAttribute{
|
||||
@ -90,31 +90,30 @@ namespace skyline::kernel::type {
|
||||
}
|
||||
|
||||
KSharedMemory::~KSharedMemory() {
|
||||
if (state.process && guest.Valid()) {
|
||||
if (state.process && guest.valid()) {
|
||||
auto &memoryManager{state.process->memory};
|
||||
if (objectType != KType::KTransferMemory) {
|
||||
if (mmap(guest.ptr, guest.size, PROT_NONE, MAP_SHARED | MAP_FIXED, memoryManager.memoryFd, reinterpret_cast<off_t>(guest.ptr - memoryManager.base.address)) == MAP_FAILED)
|
||||
if (mmap(guest.data(), guest.size(), PROT_NONE, MAP_SHARED | MAP_FIXED, memoryManager.memoryFd, reinterpret_cast<off_t>(guest.data() - memoryManager.base.data())) == MAP_FAILED)
|
||||
Logger::Warn("An error occurred while unmapping shared memory: {}", strerror(errno));
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = guest.ptr,
|
||||
.size = guest.size,
|
||||
.ptr = guest.data(),
|
||||
.size = guest.size(),
|
||||
.state = memory::states::Unmapped,
|
||||
});
|
||||
} else {
|
||||
// KTransferMemory remaps the region with R/W permissions during destruction
|
||||
constexpr memory::Permission UnborrowPermission{true, true, false};
|
||||
|
||||
if (mmap(guest.ptr, guest.size, UnborrowPermission.Get(), MAP_SHARED | MAP_FIXED, memoryManager.memoryFd, reinterpret_cast<off_t>(guest.ptr - memoryManager.base.address)) == MAP_FAILED)
|
||||
if (mmap(guest.data(), guest.size(), UnborrowPermission.Get(), MAP_SHARED | MAP_FIXED, memoryManager.memoryFd, reinterpret_cast<off_t>(guest.data() - memoryManager.base.data())) == MAP_FAILED)
|
||||
Logger::Warn("An error occurred while remapping transfer memory: {}", strerror(errno));
|
||||
else if (!host.Valid())
|
||||
else if (!host.valid())
|
||||
Logger::Warn("Expected host mapping of transfer memory to be valid during KTransferMemory destruction");
|
||||
|
||||
std::memcpy(guest.ptr, host.ptr, host.size);
|
||||
guest.copy_from(host);
|
||||
|
||||
state.process->memory.InsertChunk(ChunkDescriptor{
|
||||
.ptr = guest.ptr,
|
||||
.size = guest.size,
|
||||
.ptr = guest.data(),
|
||||
.size = guest.size(),
|
||||
.permission = UnborrowPermission,
|
||||
.state = memoryState,
|
||||
.attributes = memory::MemoryAttribute{
|
||||
@ -124,8 +123,8 @@ namespace skyline::kernel::type {
|
||||
}
|
||||
}
|
||||
|
||||
if (host.Valid())
|
||||
munmap(host.ptr, host.size);
|
||||
if (host.valid())
|
||||
munmap(host.data(), host.size());
|
||||
|
||||
close(fd);
|
||||
}
|
||||
|
@ -15,32 +15,21 @@ namespace skyline::kernel::type {
|
||||
memory::MemoryState memoryState; //!< The state of the memory as supplied initially, this is retained for any mappings
|
||||
|
||||
public:
|
||||
struct MapInfo {
|
||||
u8 *ptr;
|
||||
size_t size;
|
||||
|
||||
constexpr bool Valid() {
|
||||
return ptr && size;
|
||||
}
|
||||
} host{}, guest{}; //!< We keep two mirrors of the underlying shared memory for guest access and host access, the host mirror is persistently mapped and should be used by anything accessing the memory on the host
|
||||
span<u8> host; //!< We also keep a host mirror of the underlying shared memory for host access, it is persistently mapped and should be used by anything accessing the memory on the host
|
||||
|
||||
KSharedMemory(const DeviceState &state, size_t size, memory::MemoryState memState = memory::states::SharedMemory, KType type = KType::KSharedMemory);
|
||||
|
||||
/**
|
||||
* @note 'ptr' needs to be in guest-reserved address space
|
||||
*/
|
||||
u8 *Map(u8 *ptr, u64 size, memory::Permission permission);
|
||||
u8 *Map(span<u8> map, memory::Permission permission);
|
||||
|
||||
/**
|
||||
* @note 'ptr' needs to be in guest-reserved address space
|
||||
*/
|
||||
void Unmap(u8 *ptr, u64 size);
|
||||
void Unmap(span<u8> map);
|
||||
|
||||
span<u8> Get() override {
|
||||
return span(guest.ptr, guest.size);
|
||||
}
|
||||
|
||||
void UpdatePermission(u8 *ptr, size_t size, memory::Permission permission) override;
|
||||
void UpdatePermission(span<u8> map, memory::Permission permission) override;
|
||||
|
||||
/**
|
||||
* @brief The destructor of shared memory, it deallocates the memory from all processes
|
||||
|
@ -17,8 +17,8 @@ namespace skyline::kernel::type {
|
||||
*/
|
||||
KTransferMemory(const DeviceState &state, u8 *ptr, size_t size, memory::Permission permission, memory::MemoryState memState = memory::states::TransferMemory)
|
||||
: KSharedMemory(state, size, memState, KType::KTransferMemory) {
|
||||
std::memcpy(host.ptr, ptr, size);
|
||||
Map(ptr, size, permission);
|
||||
std::memcpy(host.data(), ptr, size);
|
||||
Map(span<u8>{ptr, size}, permission);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
namespace skyline::loader {
|
||||
Loader::ExecutableLoadInfo Loader::LoadExecutable(const std::shared_ptr<kernel::type::KProcess> &process, const DeviceState &state, Executable &executable, size_t offset, const std::string &name) {
|
||||
u8 *base{reinterpret_cast<u8 *>(process->memory.base.address + offset)};
|
||||
u8 *base{reinterpret_cast<u8 *>(process->memory.base.data() + offset)};
|
||||
|
||||
u64 textSize{executable.text.contents.size()};
|
||||
u64 roSize{executable.ro.contents.size()};
|
||||
@ -26,16 +26,16 @@ namespace skyline::loader {
|
||||
auto patch{state.nce->GetPatchData(executable.text.contents)};
|
||||
auto size{patch.size + textSize + roSize + dataSize};
|
||||
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(base, patch.size, memory::Permission{false, false, false}, memory::states::Reserved); // ---
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{base, patch.size}, memory::Permission{false, false, false}, memory::states::Reserved); // ---
|
||||
Logger::Debug("Successfully mapped section .patch @ 0x{:X}, Size = 0x{:X}", base, patch.size);
|
||||
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(base + patch.size + executable.text.offset, textSize, memory::Permission{true, false, true}, memory::states::CodeStatic); // R-X
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{base + patch.size + executable.text.offset, textSize}, memory::Permission{true, false, true}, memory::states::CodeStatic); // R-X
|
||||
Logger::Debug("Successfully mapped section .text @ 0x{:X}, Size = 0x{:X}", base + patch.size + executable.text.offset, textSize);
|
||||
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(base + patch.size + executable.ro.offset, roSize, memory::Permission{true, false, false}, memory::states::CodeStatic); // R--
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{base + patch.size + executable.ro.offset, roSize}, memory::Permission{true, false, false}, memory::states::CodeStatic); // R--
|
||||
Logger::Debug("Successfully mapped section .rodata @ 0x{:X}, Size = 0x{:X}", base + patch.size + executable.ro.offset, roSize);
|
||||
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(base + patch.size + executable.data.offset, dataSize, memory::Permission{true, true, false}, memory::states::CodeMutable); // RW-
|
||||
process->NewHandle<kernel::type::KPrivateMemory>(span<u8>{base + patch.size + executable.data.offset, dataSize}, memory::Permission{true, true, false}, memory::states::CodeMutable); // RW-
|
||||
Logger::Debug("Successfully mapped section .data + .bss @ 0x{:X}, Size = 0x{:X}", base + patch.size + executable.data.offset, dataSize);
|
||||
|
||||
state.nce->PatchCode(executable.text.contents, reinterpret_cast<u32 *>(base), patch.size, patch.offsets);
|
||||
|
@ -41,7 +41,7 @@ namespace skyline::loader {
|
||||
offset += loadInfo.size;
|
||||
}
|
||||
|
||||
state.process->memory.InitializeRegions(base, offset);
|
||||
state.process->memory.InitializeRegions(span<u8>{base, offset});
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ namespace skyline::loader {
|
||||
state.process->memory.InitializeVmm(memory::AddressSpaceType::AddressSpace39Bit);
|
||||
auto applicationName{nacp ? nacp->GetApplicationName(nacp->GetFirstSupportedTitleLanguage()) : ""};
|
||||
auto loadInfo{LoadExecutable(process, state, executable, 0, applicationName.empty() ? "main.nro" : applicationName + ".nro")};
|
||||
state.process->memory.InitializeRegions(loadInfo.base, loadInfo.size);
|
||||
state.process->memory.InitializeRegions(span<u8>{loadInfo.base, loadInfo.size});
|
||||
|
||||
return loadInfo.entry;
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ namespace skyline::loader {
|
||||
void *NsoLoader::LoadProcessData(const std::shared_ptr<kernel::type::KProcess> &process, const DeviceState &state) {
|
||||
state.process->memory.InitializeVmm(memory::AddressSpaceType::AddressSpace39Bit);
|
||||
auto loadInfo{LoadNso(this, backing, process, state)};
|
||||
state.process->memory.InitializeRegions(loadInfo.base, loadInfo.size);
|
||||
state.process->memory.InitializeRegions(span<u8>{loadInfo.base, loadInfo.size});
|
||||
return loadInfo.entry;
|
||||
}
|
||||
}
|
||||
|
@ -461,7 +461,7 @@ namespace skyline::nce {
|
||||
|
||||
constexpr ssize_t MinimumPageoutSize{PAGE_SIZE}; //!< The minimum size to page out, we don't want to page out small intervals for performance reasons
|
||||
if (freeSize > MinimumPageoutSize)
|
||||
state.process->memory.FreeMemory(freeStart, static_cast<size_t>(freeSize));
|
||||
state.process->memory.FreeMemory(span<u8>{freeStart, static_cast<size_t>(freeSize)});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ namespace skyline::service::am {
|
||||
TransferMemoryIStorage::TransferMemoryIStorage(const DeviceState &state, ServiceManager &manager, std::shared_ptr<kernel::type::KTransferMemory> transferMemory, bool writable) : transferMemory(std::move(transferMemory)), IStorage(state, manager, writable) {}
|
||||
|
||||
span<u8> TransferMemoryIStorage::GetSpan() {
|
||||
return transferMemory->Get();
|
||||
return transferMemory->host;
|
||||
}
|
||||
|
||||
TransferMemoryIStorage::~TransferMemoryIStorage() = default;
|
||||
|
@ -21,7 +21,7 @@ namespace skyline::service::codec {
|
||||
throw exception("Work Buffer doesn't have adequate space for Opus Decoder: 0x{:X} (Required: 0x{:X})", workBufferSize, decoderOutputBufferSize);
|
||||
|
||||
// We utilize the guest-supplied work buffer for allocating the OpusDecoder object into
|
||||
decoderState = reinterpret_cast<OpusDecoder *>(workBuffer->host.ptr);
|
||||
decoderState = reinterpret_cast<OpusDecoder *>(workBuffer->host.data());
|
||||
|
||||
if (int result = opus_decoder_init(decoderState, sampleRate, channelCount) != OPUS_OK)
|
||||
throw OpusException(result);
|
||||
|
@ -38,11 +38,11 @@ namespace skyline::service::pl {
|
||||
constexpr u32 SharedFontKey{SharedFontMagic ^ SharedFontResult}; //!< The XOR key for encrypting the font size
|
||||
|
||||
auto fontsDirectory{std::make_shared<vfs::OsFileSystem>(state.os->publicAppFilesPath + "fonts/")};
|
||||
auto ptr{reinterpret_cast<u32 *>(sharedFontMemory->host.ptr)};
|
||||
auto ptr{reinterpret_cast<u32 *>(sharedFontMemory->host.data())};
|
||||
for (auto &font : fonts) {
|
||||
*ptr++ = 0x18029a7f;
|
||||
*ptr++ = util::SwapEndianness(font.length ^ 0x49621806);
|
||||
font.offset = static_cast<u32>(reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(sharedFontMemory->host.ptr));
|
||||
font.offset = static_cast<u32>(reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(sharedFontMemory->host.data()));
|
||||
|
||||
std::shared_ptr<vfs::Backing> fontFile;
|
||||
if (fontsDirectory->FileExists(font.path))
|
||||
|
@ -59,7 +59,7 @@ namespace skyline::service::timesrv::core {
|
||||
|
||||
TimeSharedMemory::TimeSharedMemory(const DeviceState &state)
|
||||
: kTimeSharedMemory(std::make_shared<kernel::type::KSharedMemory>(state, TimeSharedMemorySize)),
|
||||
timeSharedMemory(reinterpret_cast<TimeSharedMemoryLayout *>(kTimeSharedMemory->host.ptr)) {}
|
||||
timeSharedMemory(reinterpret_cast<TimeSharedMemoryLayout *>(kTimeSharedMemory->host.data())) {}
|
||||
|
||||
void TimeSharedMemory::SetupStandardSteadyClock(UUID rtcId, TimeSpanType baseTimePoint) {
|
||||
SteadyClockTimePoint context{
|
||||
|
Loading…
Reference in New Issue
Block a user