mirror of
https://github.com/Lime3DS/Lime3DS.git
synced 2024-12-17 21:41:49 +01:00
Changed u8* to MemoryRef
This commit is contained in:
parent
cf985631e0
commit
65d96bf6c1
11
TODO
11
TODO
@ -4,6 +4,7 @@
|
|||||||
✔ CPU @done(19-08-13 15:41)
|
✔ CPU @done(19-08-13 15:41)
|
||||||
✔ Memory @done(19-08-13 15:41)
|
✔ Memory @done(19-08-13 15:41)
|
||||||
☐ Page tables
|
☐ Page tables
|
||||||
|
Need to change uses to shared_ptr
|
||||||
✔ Skip N3DS RAM if unused @done(20-01-03 23:26)
|
✔ Skip N3DS RAM if unused @done(20-01-03 23:26)
|
||||||
✔ DSP @done(19-12-28 16:57)
|
✔ DSP @done(19-12-28 16:57)
|
||||||
Memory only
|
Memory only
|
||||||
@ -44,6 +45,7 @@
|
|||||||
Doesn't need to be serialized here
|
Doesn't need to be serialized here
|
||||||
✔ Replace SERIALIZE_AS_POD with BOOST_IS_BITWISE_SERIALIZABLE @started(20-01-03 13:47) @done(20-01-03 13:58) @lasted(11m22s)
|
✔ Replace SERIALIZE_AS_POD with BOOST_IS_BITWISE_SERIALIZABLE @started(20-01-03 13:47) @done(20-01-03 13:58) @lasted(11m22s)
|
||||||
☐ Review constructor/initialization code
|
☐ Review constructor/initialization code
|
||||||
|
☐ Review core timing events
|
||||||
✔ Fix CI @done(19-12-31 21:32)
|
✔ Fix CI @done(19-12-31 21:32)
|
||||||
✔ HW @done(19-08-13 15:41)
|
✔ HW @done(19-08-13 15:41)
|
||||||
✔ GPU regs @done(19-08-13 15:41)
|
✔ GPU regs @done(19-08-13 15:41)
|
||||||
@ -60,7 +62,7 @@
|
|||||||
✔ Address arbiter @done(19-08-13 16:40)
|
✔ Address arbiter @done(19-08-13 16:40)
|
||||||
✔ Client port @done(19-08-13 16:40)
|
✔ Client port @done(19-08-13 16:40)
|
||||||
✔ Client session @done(19-08-13 16:40)
|
✔ Client session @done(19-08-13 16:40)
|
||||||
✔ Config mem @done(19-08-13 16:40)
|
✔ Config mem @done(20-01-04 21:09)
|
||||||
✔ Event @done(19-12-22 18:44)
|
✔ Event @done(19-12-22 18:44)
|
||||||
✔ Handle table @done(19-08-13 16:42)
|
✔ Handle table @done(19-08-13 16:42)
|
||||||
✔ HLE IPC @done(19-12-23 00:36)
|
✔ HLE IPC @done(19-12-23 00:36)
|
||||||
@ -80,16 +82,15 @@
|
|||||||
This is needed because IPC can take as long as it takes
|
This is needed because IPC can take as long as it takes
|
||||||
Changed the unique_ptr<u8[]> to vector<u8>
|
Changed the unique_ptr<u8[]> to vector<u8>
|
||||||
✔ Session @done(19-08-13 16:44)
|
✔ Session @done(19-08-13 16:44)
|
||||||
☐ Shared memory @started(19-12-22 21:20)
|
✔ Shared memory @started(19-12-22 21:20) @done(20-01-04 21:09) @lasted(1w5d23h49m26s)
|
||||||
Need to figure out backing memory (a u8*)
|
Need to figure out backing memory (a u8*)
|
||||||
✘ Shared page @started(19-08-13 16:44) @cancelled(19-12-22 11:19)
|
✔ Shared page @done(20-01-04 21:09)
|
||||||
Not needed right now as shared_page is read-only and derived from other data
|
|
||||||
✔ SVC @done(19-12-22 21:32)
|
✔ SVC @done(19-12-22 21:32)
|
||||||
Nothing to do - all data is constant
|
Nothing to do - all data is constant
|
||||||
☐ Thread @started(19-08-13 16:45)
|
☐ Thread @started(19-08-13 16:45)
|
||||||
This requires refactoring wakeup_callback to be an object ref
|
This requires refactoring wakeup_callback to be an object ref
|
||||||
✔ Timer @done(19-08-13 16:45)
|
✔ Timer @done(19-08-13 16:45)
|
||||||
☐ VM Manager @started(19-08-13 16:46)
|
✔ VM Manager @started(19-08-13 16:46) @done(20-01-04 21:09) @lasted(20w4d5h23m42s)
|
||||||
Just need to figure out backing_mem (a u8*)
|
Just need to figure out backing_mem (a u8*)
|
||||||
✔ Wait object @done(19-08-13 16:46)
|
✔ Wait object @done(19-08-13 16:46)
|
||||||
☐ Service @started(19-12-23 12:49)
|
☐ Service @started(19-12-23 12:49)
|
||||||
|
@ -80,6 +80,8 @@ add_library(common STATIC
|
|||||||
logging/text_formatter.cpp
|
logging/text_formatter.cpp
|
||||||
logging/text_formatter.h
|
logging/text_formatter.h
|
||||||
math_util.h
|
math_util.h
|
||||||
|
memory_ref.h
|
||||||
|
memory_ref.cpp
|
||||||
microprofile.cpp
|
microprofile.cpp
|
||||||
microprofile.h
|
microprofile.h
|
||||||
microprofileui.h
|
microprofileui.h
|
||||||
@ -127,7 +129,7 @@ endif()
|
|||||||
|
|
||||||
create_target_directory_groups(common)
|
create_target_directory_groups(common)
|
||||||
|
|
||||||
target_link_libraries(common PUBLIC fmt microprofile)
|
target_link_libraries(common PUBLIC fmt microprofile Boost::boost Boost::serialization)
|
||||||
target_link_libraries(common PRIVATE libzstd_static)
|
target_link_libraries(common PRIVATE libzstd_static)
|
||||||
if (ARCHITECTURE_x86_64)
|
if (ARCHITECTURE_x86_64)
|
||||||
target_link_libraries(common PRIVATE xbyak)
|
target_link_libraries(common PRIVATE xbyak)
|
||||||
|
4
src/common/memory_ref.cpp
Normal file
4
src/common/memory_ref.cpp
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
#include "common/archives.h"
|
||||||
|
#include "common/memory_ref.h"
|
||||||
|
|
||||||
|
SERIALIZE_EXPORT_IMPL(BufferMem)
|
112
src/common/memory_ref.h
Normal file
112
src/common/memory_ref.h
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <vector>
|
||||||
|
#include <boost/serialization/export.hpp>
|
||||||
|
#include <boost/serialization/shared_ptr.hpp>
|
||||||
|
#include <boost/serialization/vector.hpp>
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
/// Abstract host-side memory - for example a static buffer, or local vector
|
||||||
|
class BackingMem {
|
||||||
|
public:
|
||||||
|
virtual ~BackingMem() = default;
|
||||||
|
virtual u8* GetPtr() = 0;
|
||||||
|
virtual u32 GetSize() const = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Backing memory implemented by a local buffer
|
||||||
|
class BufferMem : public BackingMem {
|
||||||
|
public:
|
||||||
|
BufferMem() = default;
|
||||||
|
BufferMem(u32 size) : data(std::vector<u8>(size)) {}
|
||||||
|
|
||||||
|
virtual u8* GetPtr() {
|
||||||
|
return data.data();
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual u32 GetSize() const {
|
||||||
|
return static_cast<u32>(data.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<u8>& Vector() {
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<u8> data;
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void serialize(Archive& ar, const unsigned int) {
|
||||||
|
ar& data;
|
||||||
|
}
|
||||||
|
friend class boost::serialization::access;
|
||||||
|
};
|
||||||
|
|
||||||
|
BOOST_CLASS_EXPORT_KEY(BufferMem);
|
||||||
|
|
||||||
|
/// A managed reference to host-side memory. Fast enough to be used everywhere instead of u8*
|
||||||
|
/// Supports serialization.
|
||||||
|
class MemoryRef {
|
||||||
|
public:
|
||||||
|
MemoryRef() = default;
|
||||||
|
MemoryRef(std::nullptr_t) {}
|
||||||
|
MemoryRef(std::shared_ptr<BackingMem> backing_mem_)
|
||||||
|
: backing_mem(std::move(backing_mem_)), offset(0) {
|
||||||
|
Init();
|
||||||
|
}
|
||||||
|
MemoryRef(std::shared_ptr<BackingMem> backing_mem_, u32 offset_)
|
||||||
|
: backing_mem(std::move(backing_mem_)), offset(offset_) {
|
||||||
|
ASSERT(offset < backing_mem->GetSize());
|
||||||
|
Init();
|
||||||
|
}
|
||||||
|
inline operator u8*() {
|
||||||
|
return cptr;
|
||||||
|
}
|
||||||
|
inline u8* GetPtr() {
|
||||||
|
return cptr;
|
||||||
|
}
|
||||||
|
inline operator bool() const {
|
||||||
|
return cptr != nullptr;
|
||||||
|
}
|
||||||
|
inline const u8* GetPtr() const {
|
||||||
|
return cptr;
|
||||||
|
}
|
||||||
|
inline u32 GetSize() const {
|
||||||
|
return csize;
|
||||||
|
}
|
||||||
|
inline void operator+=(u32 offset_by) {
|
||||||
|
ASSERT(offset_by < csize);
|
||||||
|
offset += offset_by;
|
||||||
|
Init();
|
||||||
|
}
|
||||||
|
inline MemoryRef operator+(u32 offset_by) const {
|
||||||
|
ASSERT(offset_by < csize);
|
||||||
|
return MemoryRef(backing_mem, offset + offset_by);
|
||||||
|
}
|
||||||
|
inline u8* operator+(std::size_t offset_by) const {
|
||||||
|
ASSERT(offset_by < csize);
|
||||||
|
return cptr + offset_by;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::shared_ptr<BackingMem> backing_mem;
|
||||||
|
u32 offset;
|
||||||
|
// Cached values for speed
|
||||||
|
u8* cptr;
|
||||||
|
u32 csize;
|
||||||
|
|
||||||
|
void Init() {
|
||||||
|
cptr = backing_mem->GetPtr() + offset;
|
||||||
|
csize = static_cast<u32>(backing_mem->GetSize() - offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void serialize(Archive& ar, const unsigned int) {
|
||||||
|
ar& backing_mem;
|
||||||
|
ar& offset;
|
||||||
|
Init();
|
||||||
|
}
|
||||||
|
friend class boost::serialization::access;
|
||||||
|
};
|
@ -298,7 +298,7 @@ void ARM_Dynarmic::PageTableChanged() {
|
|||||||
std::unique_ptr<Dynarmic::A32::Jit> ARM_Dynarmic::MakeJit() {
|
std::unique_ptr<Dynarmic::A32::Jit> ARM_Dynarmic::MakeJit() {
|
||||||
Dynarmic::A32::UserConfig config;
|
Dynarmic::A32::UserConfig config;
|
||||||
config.callbacks = cb.get();
|
config.callbacks = cb.get();
|
||||||
config.page_table = ¤t_page_table->pointers;
|
config.page_table = ¤t_page_table->GetPointerArray();
|
||||||
config.coprocessors[15] = std::make_shared<DynarmicCP15>(interpreter_state);
|
config.coprocessors[15] = std::make_shared<DynarmicCP15>(interpreter_state);
|
||||||
config.define_unpredictable_behaviour = true;
|
config.define_unpredictable_behaviour = true;
|
||||||
return std::make_unique<Dynarmic::A32::Jit>(config);
|
return std::make_unique<Dynarmic::A32::Jit>(config);
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include <boost/serialization/binary_object.hpp>
|
#include <boost/serialization/binary_object.hpp>
|
||||||
#include "common/common_funcs.h"
|
#include "common/common_funcs.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "common/memory_ref.h"
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
@ -50,11 +51,19 @@ struct ConfigMemDef {
|
|||||||
static_assert(sizeof(ConfigMemDef) == Memory::CONFIG_MEMORY_SIZE,
|
static_assert(sizeof(ConfigMemDef) == Memory::CONFIG_MEMORY_SIZE,
|
||||||
"Config Memory structure size is wrong");
|
"Config Memory structure size is wrong");
|
||||||
|
|
||||||
class Handler {
|
class Handler : public BackingMem {
|
||||||
public:
|
public:
|
||||||
Handler();
|
Handler();
|
||||||
ConfigMemDef& GetConfigMem();
|
ConfigMemDef& GetConfigMem();
|
||||||
|
|
||||||
|
virtual u8* GetPtr() {
|
||||||
|
return static_cast<u8*>(static_cast<void*>(&config_mem));
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual u32 GetSize() const {
|
||||||
|
return sizeof(config_mem);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ConfigMemDef config_mem;
|
ConfigMemDef config_mem;
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include "common/alignment.h"
|
#include "common/alignment.h"
|
||||||
|
#include "common/memory_ref.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/ipc.h"
|
#include "core/hle/ipc.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
@ -193,28 +194,29 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
|||||||
// TODO(Subv): Perform permission checks.
|
// TODO(Subv): Perform permission checks.
|
||||||
|
|
||||||
// Reserve a page of memory before the mapped buffer
|
// Reserve a page of memory before the mapped buffer
|
||||||
auto reserve_buffer = std::vector<u8>(Memory::PAGE_SIZE);
|
std::shared_ptr<BackingMem> reserve_buffer =
|
||||||
|
std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||||
dst_process->vm_manager.MapBackingMemoryToBase(
|
dst_process->vm_manager.MapBackingMemoryToBase(
|
||||||
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer.data(),
|
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer,
|
||||||
Memory::PAGE_SIZE, Kernel::MemoryState::Reserved);
|
Memory::PAGE_SIZE, Kernel::MemoryState::Reserved);
|
||||||
|
|
||||||
auto buffer = std::vector<u8>(num_pages * Memory::PAGE_SIZE);
|
std::shared_ptr<BackingMem> buffer =
|
||||||
memory.ReadBlock(*src_process, source_address, buffer.data() + page_offset, size);
|
std::make_shared<BufferMem>(num_pages * Memory::PAGE_SIZE);
|
||||||
|
memory.ReadBlock(*src_process, source_address, buffer->GetPtr() + page_offset, size);
|
||||||
|
|
||||||
// Map the page(s) into the target process' address space.
|
// Map the page(s) into the target process' address space.
|
||||||
target_address =
|
target_address =
|
||||||
dst_process->vm_manager
|
dst_process->vm_manager
|
||||||
.MapBackingMemoryToBase(Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE,
|
.MapBackingMemoryToBase(Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE,
|
||||||
buffer.data(), num_pages * Memory::PAGE_SIZE,
|
buffer, buffer->GetSize(), Kernel::MemoryState::Shared)
|
||||||
Kernel::MemoryState::Shared)
|
|
||||||
.Unwrap();
|
.Unwrap();
|
||||||
|
|
||||||
cmd_buf[i++] = target_address + page_offset;
|
cmd_buf[i++] = target_address + page_offset;
|
||||||
|
|
||||||
// Reserve a page of memory after the mapped buffer
|
// Reserve a page of memory after the mapped buffer
|
||||||
dst_process->vm_manager.MapBackingMemoryToBase(
|
dst_process->vm_manager.MapBackingMemoryToBase(
|
||||||
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer.data(),
|
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer,
|
||||||
Memory::PAGE_SIZE, Kernel::MemoryState::Reserved);
|
reserve_buffer->GetSize(), Kernel::MemoryState::Reserved);
|
||||||
|
|
||||||
mapped_buffer_context.push_back({permissions, size, source_address,
|
mapped_buffer_context.push_back({permissions, size, source_address,
|
||||||
target_address + page_offset, std::move(buffer),
|
target_address + page_offset, std::move(buffer),
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <boost/serialization/vector.hpp>
|
#include <boost/serialization/shared_ptr.hpp>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/hle/ipc.h"
|
#include "core/hle/ipc.h"
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
@ -25,8 +25,8 @@ struct MappedBufferContext {
|
|||||||
VAddr source_address;
|
VAddr source_address;
|
||||||
VAddr target_address;
|
VAddr target_address;
|
||||||
|
|
||||||
std::vector<u8> buffer;
|
std::shared_ptr<BackingMem> buffer;
|
||||||
std::vector<u8> reserve_buffer;
|
std::shared_ptr<BackingMem> reserve_buffer;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
|
@ -116,8 +116,8 @@ void KernelSystem::serialize(Archive& ar, const unsigned int file_version) {
|
|||||||
ar& process_list;
|
ar& process_list;
|
||||||
ar& current_process;
|
ar& current_process;
|
||||||
ar&* thread_manager.get();
|
ar&* thread_manager.get();
|
||||||
ar&* config_mem_handler.get();
|
ar& config_mem_handler;
|
||||||
// Shared page data is read-only at the moment, so doesn't need serializing
|
ar& shared_page_handler;
|
||||||
// Deliberately don't include debugger info to allow debugging through loads
|
// Deliberately don't include debugger info to allow debugging through loads
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,8 +282,8 @@ private:
|
|||||||
|
|
||||||
std::unique_ptr<ThreadManager> thread_manager;
|
std::unique_ptr<ThreadManager> thread_manager;
|
||||||
|
|
||||||
std::unique_ptr<ConfigMem::Handler> config_mem_handler;
|
std::shared_ptr<ConfigMem::Handler> config_mem_handler;
|
||||||
std::unique_ptr<SharedPage::Handler> shared_page_handler;
|
std::shared_ptr<SharedPage::Handler> shared_page_handler;
|
||||||
|
|
||||||
std::unique_ptr<IPCDebugger::Recorder> ipc_recorder;
|
std::unique_ptr<IPCDebugger::Recorder> ipc_recorder;
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ void KernelSystem::MemoryInit(u32 mem_type) {
|
|||||||
// We must've allocated the entire FCRAM by the end
|
// We must've allocated the entire FCRAM by the end
|
||||||
ASSERT(base == Memory::FCRAM_SIZE);
|
ASSERT(base == Memory::FCRAM_SIZE);
|
||||||
|
|
||||||
config_mem_handler = std::make_unique<ConfigMem::Handler>();
|
config_mem_handler = std::make_shared<ConfigMem::Handler>();
|
||||||
auto& config_mem = config_mem_handler->GetConfigMem();
|
auto& config_mem = config_mem_handler->GetConfigMem();
|
||||||
config_mem.app_mem_type = mem_type;
|
config_mem.app_mem_type = mem_type;
|
||||||
// app_mem_malloc does not always match the configured size for memory_region[0]: in case the
|
// app_mem_malloc does not always match the configured size for memory_region[0]: in case the
|
||||||
@ -66,7 +66,7 @@ void KernelSystem::MemoryInit(u32 mem_type) {
|
|||||||
config_mem.sys_mem_alloc = memory_regions[1].size;
|
config_mem.sys_mem_alloc = memory_regions[1].size;
|
||||||
config_mem.base_mem_alloc = memory_regions[2].size;
|
config_mem.base_mem_alloc = memory_regions[2].size;
|
||||||
|
|
||||||
shared_page_handler = std::make_unique<SharedPage::Handler>(timing);
|
shared_page_handler = std::make_shared<SharedPage::Handler>(timing);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryRegionInfo* KernelSystem::GetMemoryRegion(MemoryRegion region) {
|
MemoryRegionInfo* KernelSystem::GetMemoryRegion(MemoryRegion region) {
|
||||||
@ -127,7 +127,7 @@ void KernelSystem::HandleSpecialMapping(VMManager& address_space, const AddressM
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
u8* target_pointer = memory.GetPhysicalPointer(area->paddr_base + offset_into_region);
|
auto target_pointer = memory.GetPhysicalRef(area->paddr_base + offset_into_region);
|
||||||
|
|
||||||
// TODO(yuriks): This flag seems to have some other effect, but it's unknown what
|
// TODO(yuriks): This flag seems to have some other effect, but it's unknown what
|
||||||
MemoryState memory_state = mapping.unk_flag ? MemoryState::Static : MemoryState::IO;
|
MemoryState memory_state = mapping.unk_flag ? MemoryState::Static : MemoryState::IO;
|
||||||
@ -140,20 +140,16 @@ void KernelSystem::HandleSpecialMapping(VMManager& address_space, const AddressM
|
|||||||
}
|
}
|
||||||
|
|
||||||
void KernelSystem::MapSharedPages(VMManager& address_space) {
|
void KernelSystem::MapSharedPages(VMManager& address_space) {
|
||||||
auto cfg_mem_vma =
|
auto cfg_mem_vma = address_space
|
||||||
address_space
|
.MapBackingMemory(Memory::CONFIG_MEMORY_VADDR, {config_mem_handler},
|
||||||
.MapBackingMemory(Memory::CONFIG_MEMORY_VADDR,
|
Memory::CONFIG_MEMORY_SIZE, MemoryState::Shared)
|
||||||
reinterpret_cast<u8*>(&config_mem_handler->GetConfigMem()),
|
.Unwrap();
|
||||||
Memory::CONFIG_MEMORY_SIZE, MemoryState::Shared)
|
|
||||||
.Unwrap();
|
|
||||||
address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
|
address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
|
||||||
|
|
||||||
auto shared_page_vma =
|
auto shared_page_vma = address_space
|
||||||
address_space
|
.MapBackingMemory(Memory::SHARED_PAGE_VADDR, {shared_page_handler},
|
||||||
.MapBackingMemory(Memory::SHARED_PAGE_VADDR,
|
Memory::SHARED_PAGE_SIZE, MemoryState::Shared)
|
||||||
reinterpret_cast<u8*>(&shared_page_handler->GetSharedPage()),
|
.Unwrap();
|
||||||
Memory::SHARED_PAGE_SIZE, MemoryState::Shared)
|
|
||||||
.Unwrap();
|
|
||||||
address_space.Reprotect(shared_page_vma, VMAPermission::Read);
|
address_space.Reprotect(shared_page_vma, VMAPermission::Read);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,7 +223,7 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per
|
|||||||
std::fill(kernel.memory.GetFCRAMPointer(interval.lower()),
|
std::fill(kernel.memory.GetFCRAMPointer(interval.lower()),
|
||||||
kernel.memory.GetFCRAMPointer(interval.upper()), 0);
|
kernel.memory.GetFCRAMPointer(interval.upper()), 0);
|
||||||
auto vma = vm_manager.MapBackingMemory(interval_target,
|
auto vma = vm_manager.MapBackingMemory(interval_target,
|
||||||
kernel.memory.GetFCRAMPointer(interval.lower()),
|
kernel.memory.GetFCRAMRef(interval.lower()),
|
||||||
interval_size, memory_state);
|
interval_size, memory_state);
|
||||||
ASSERT(vma.Succeeded());
|
ASSERT(vma.Succeeded());
|
||||||
vm_manager.Reprotect(vma.Unwrap(), perms);
|
vm_manager.Reprotect(vma.Unwrap(), perms);
|
||||||
@ -251,7 +251,7 @@ ResultCode Process::HeapFree(VAddr target, u32 size) {
|
|||||||
// Free heaps block by block
|
// Free heaps block by block
|
||||||
CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size));
|
CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size));
|
||||||
for (const auto [backing_memory, block_size] : backing_blocks) {
|
for (const auto [backing_memory, block_size] : backing_blocks) {
|
||||||
memory_region->Free(kernel.memory.GetFCRAMOffset(backing_memory), block_size);
|
memory_region->Free(kernel.memory.GetFCRAMOffset(backing_memory.GetPtr()), block_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode result = vm_manager.UnmapRange(target, size);
|
ResultCode result = vm_manager.UnmapRange(target, size);
|
||||||
@ -295,9 +295,9 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u8* backing_memory = kernel.memory.GetFCRAMPointer(physical_offset);
|
auto backing_memory = kernel.memory.GetFCRAMRef(physical_offset);
|
||||||
|
|
||||||
std::fill(backing_memory, backing_memory + size, 0);
|
std::fill(backing_memory.GetPtr(), backing_memory.GetPtr() + size, 0);
|
||||||
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
|
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
|
||||||
ASSERT(vma.Succeeded());
|
ASSERT(vma.Succeeded());
|
||||||
vm_manager.Reprotect(vma.Unwrap(), perms);
|
vm_manager.Reprotect(vma.Unwrap(), perms);
|
||||||
|
@ -48,7 +48,7 @@ ResultVal<std::shared_ptr<SharedMemory>> KernelSystem::CreateSharedMemory(
|
|||||||
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
|
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
|
||||||
|
|
||||||
std::fill(memory.GetFCRAMPointer(*offset), memory.GetFCRAMPointer(*offset + size), 0);
|
std::fill(memory.GetFCRAMPointer(*offset), memory.GetFCRAMPointer(*offset + size), 0);
|
||||||
shared_memory->backing_blocks = {{memory.GetFCRAMPointer(*offset), size}};
|
shared_memory->backing_blocks = {{memory.GetFCRAMRef(*offset), size}};
|
||||||
shared_memory->holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
|
shared_memory->holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
|
||||||
shared_memory->linear_heap_phys_offset = *offset;
|
shared_memory->linear_heap_phys_offset = *offset;
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ std::shared_ptr<SharedMemory> KernelSystem::CreateSharedMemoryForApplet(
|
|||||||
shared_memory->other_permissions = other_permissions;
|
shared_memory->other_permissions = other_permissions;
|
||||||
for (const auto& interval : backing_blocks) {
|
for (const auto& interval : backing_blocks) {
|
||||||
shared_memory->backing_blocks.push_back(
|
shared_memory->backing_blocks.push_back(
|
||||||
{memory.GetFCRAMPointer(interval.lower()), interval.upper() - interval.lower()});
|
{memory.GetFCRAMRef(interval.lower()), interval.upper() - interval.lower()});
|
||||||
std::fill(memory.GetFCRAMPointer(interval.lower()),
|
std::fill(memory.GetFCRAMPointer(interval.lower()),
|
||||||
memory.GetFCRAMPointer(interval.upper()), 0);
|
memory.GetFCRAMPointer(interval.upper()), 0);
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <utility>
|
#include <utility>
|
||||||
#include <boost/serialization/export.hpp>
|
#include <boost/serialization/export.hpp>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "common/memory_ref.h"
|
||||||
#include "core/hle/kernel/object.h"
|
#include "core/hle/kernel/object.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
@ -87,7 +88,7 @@ private:
|
|||||||
/// during creation.
|
/// during creation.
|
||||||
PAddr linear_heap_phys_offset = 0;
|
PAddr linear_heap_phys_offset = 0;
|
||||||
/// Backing memory for this shared memory block.
|
/// Backing memory for this shared memory block.
|
||||||
std::vector<std::pair<u8*, u32>> backing_blocks;
|
std::vector<std::pair<MemoryRef, u32>> backing_blocks;
|
||||||
/// Size of the memory block. Page-aligned.
|
/// Size of the memory block. Page-aligned.
|
||||||
u32 size = 0;
|
u32 size = 0;
|
||||||
/// Permission restrictions applied to the process which created the block.
|
/// Permission restrictions applied to the process which created the block.
|
||||||
@ -109,7 +110,7 @@ private:
|
|||||||
template <class Archive>
|
template <class Archive>
|
||||||
void serialize(Archive& ar, const unsigned int file_version) {
|
void serialize(Archive& ar, const unsigned int file_version) {
|
||||||
ar& linear_heap_phys_offset;
|
ar& linear_heap_phys_offset;
|
||||||
// TODO: backing blocks u8* (this is always FCRAM I think)
|
ar& backing_blocks;
|
||||||
ar& size;
|
ar& size;
|
||||||
ar& permissions;
|
ar& permissions;
|
||||||
ar& other_permissions;
|
ar& other_permissions;
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
#include "common/archives.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/hle/kernel/shared_page.h"
|
#include "core/hle/kernel/shared_page.h"
|
||||||
@ -13,6 +14,17 @@
|
|||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
namespace boost::serialization {
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void load_construct_data(Archive& ar, SharedPage::Handler* t, const unsigned int) {
|
||||||
|
::new (t) SharedPage::Handler(Core::System::GetInstance().CoreTiming());
|
||||||
|
}
|
||||||
|
template void load_construct_data<iarchive>(iarchive& ar, SharedPage::Handler* t,
|
||||||
|
const unsigned int);
|
||||||
|
|
||||||
|
} // namespace boost::serialization
|
||||||
|
|
||||||
namespace SharedPage {
|
namespace SharedPage {
|
||||||
|
|
||||||
static std::chrono::seconds GetInitTime() {
|
static std::chrono::seconds GetInitTime() {
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "common/bit_field.h"
|
#include "common/bit_field.h"
|
||||||
#include "common/common_funcs.h"
|
#include "common/common_funcs.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "common/memory_ref.h"
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
@ -83,7 +84,7 @@ struct SharedPageDef {
|
|||||||
static_assert(sizeof(SharedPageDef) == Memory::SHARED_PAGE_SIZE,
|
static_assert(sizeof(SharedPageDef) == Memory::SHARED_PAGE_SIZE,
|
||||||
"Shared page structure size is wrong");
|
"Shared page structure size is wrong");
|
||||||
|
|
||||||
class Handler {
|
class Handler : public BackingMem {
|
||||||
public:
|
public:
|
||||||
Handler(Core::Timing& timing);
|
Handler(Core::Timing& timing);
|
||||||
|
|
||||||
@ -97,6 +98,14 @@ public:
|
|||||||
|
|
||||||
SharedPageDef& GetSharedPage();
|
SharedPageDef& GetSharedPage();
|
||||||
|
|
||||||
|
virtual u8* GetPtr() {
|
||||||
|
return static_cast<u8*>(static_cast<void*>(&shared_page));
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual u32 GetSize() const {
|
||||||
|
return sizeof(shared_page);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u64 GetSystemTime() const;
|
u64 GetSystemTime() const;
|
||||||
void UpdateTimeCallback(u64 userdata, int cycles_late);
|
void UpdateTimeCallback(u64 userdata, int cycles_late);
|
||||||
@ -105,6 +114,19 @@ private:
|
|||||||
std::chrono::seconds init_time;
|
std::chrono::seconds init_time;
|
||||||
|
|
||||||
SharedPageDef shared_page;
|
SharedPageDef shared_page;
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void serialize(Archive& ar, const unsigned int) {
|
||||||
|
ar& boost::serialization::make_binary_object(&shared_page, sizeof(shared_page));
|
||||||
|
}
|
||||||
|
friend class boost::serialization::access;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace SharedPage
|
} // namespace SharedPage
|
||||||
|
|
||||||
|
namespace boost::serialization {
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void load_construct_data(Archive& ar, SharedPage::Handler* t, const unsigned int);
|
||||||
|
|
||||||
|
} // namespace boost::serialization
|
||||||
|
@ -382,7 +382,7 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread(std::string name,
|
|||||||
|
|
||||||
// Map the page to the current process' address space.
|
// Map the page to the current process' address space.
|
||||||
vm_manager.MapBackingMemory(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
|
vm_manager.MapBackingMemory(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
|
||||||
memory.GetFCRAMPointer(*offset), Memory::PAGE_SIZE,
|
memory.GetFCRAMRef(*offset), Memory::PAGE_SIZE,
|
||||||
MemoryState::Locked);
|
MemoryState::Locked);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,7 +27,8 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
|
|||||||
type != next.type) {
|
type != next.type) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
|
if (type == VMAType::BackingMemory &&
|
||||||
|
backing_memory.GetPtr() + size != next.backing_memory.GetPtr()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (type == VMAType::MMIO && paddr + size != next.paddr) {
|
if (type == VMAType::MMIO && paddr + size != next.paddr) {
|
||||||
@ -50,8 +51,7 @@ void VMManager::Reset() {
|
|||||||
initial_vma.size = MAX_ADDRESS;
|
initial_vma.size = MAX_ADDRESS;
|
||||||
vma_map.emplace(initial_vma.base, initial_vma);
|
vma_map.emplace(initial_vma.base, initial_vma);
|
||||||
|
|
||||||
page_table.pointers.fill(nullptr);
|
page_table.Clear();
|
||||||
page_table.attributes.fill(Memory::PageType::Unmapped);
|
|
||||||
|
|
||||||
UpdatePageTableForVMA(initial_vma);
|
UpdatePageTableForVMA(initial_vma);
|
||||||
}
|
}
|
||||||
@ -64,7 +64,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size, u8* memory,
|
ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size, MemoryRef memory,
|
||||||
u32 size, MemoryState state) {
|
u32 size, MemoryState state) {
|
||||||
|
|
||||||
// Find the first Free VMA.
|
// Find the first Free VMA.
|
||||||
@ -93,9 +93,9 @@ ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size,
|
|||||||
return MakeResult<VAddr>(target);
|
return MakeResult<VAddr>(target);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u32 size,
|
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, MemoryRef memory,
|
||||||
MemoryState state) {
|
u32 size, MemoryState state) {
|
||||||
ASSERT(memory != nullptr);
|
ASSERT(memory.GetPtr() != nullptr);
|
||||||
|
|
||||||
// This is the appropriately sized VMA that will turn into our allocation.
|
// This is the appropriately sized VMA that will turn into our allocation.
|
||||||
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
|
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
|
||||||
@ -359,9 +359,9 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultVal<std::vector<std::pair<u8*, u32>>> VMManager::GetBackingBlocksForRange(VAddr address,
|
ResultVal<std::vector<std::pair<MemoryRef, u32>>> VMManager::GetBackingBlocksForRange(VAddr address,
|
||||||
u32 size) {
|
u32 size) {
|
||||||
std::vector<std::pair<u8*, u32>> backing_blocks;
|
std::vector<std::pair<MemoryRef, u32>> backing_blocks;
|
||||||
VAddr interval_target = address;
|
VAddr interval_target = address;
|
||||||
while (interval_target != address + size) {
|
while (interval_target != address + size) {
|
||||||
auto vma = FindVMA(interval_target);
|
auto vma = FindVMA(interval_target);
|
||||||
@ -372,7 +372,7 @@ ResultVal<std::vector<std::pair<u8*, u32>>> VMManager::GetBackingBlocksForRange(
|
|||||||
|
|
||||||
VAddr interval_end = std::min(address + size, vma->second.base + vma->second.size);
|
VAddr interval_end = std::min(address + size, vma->second.base + vma->second.size);
|
||||||
u32 interval_size = interval_end - interval_target;
|
u32 interval_size = interval_end - interval_target;
|
||||||
u8* backing_memory = vma->second.backing_memory + (interval_target - vma->second.base);
|
auto backing_memory = vma->second.backing_memory + (interval_target - vma->second.base);
|
||||||
backing_blocks.push_back({backing_memory, interval_size});
|
backing_blocks.push_back({backing_memory, interval_size});
|
||||||
|
|
||||||
interval_target += interval_size;
|
interval_target += interval_size;
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#include <boost/serialization/map.hpp>
|
#include <boost/serialization/map.hpp>
|
||||||
#include <boost/serialization/split_member.hpp>
|
#include <boost/serialization/split_member.hpp>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "common/memory_ref.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
#include "core/mmio.h"
|
#include "core/mmio.h"
|
||||||
@ -73,7 +74,7 @@ struct VirtualMemoryArea {
|
|||||||
|
|
||||||
// Settings for type = BackingMemory
|
// Settings for type = BackingMemory
|
||||||
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
|
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
|
||||||
u8* backing_memory = nullptr;
|
MemoryRef backing_memory{};
|
||||||
|
|
||||||
// Settings for type = MMIO
|
// Settings for type = MMIO
|
||||||
/// Physical address of the register area this VMA maps to.
|
/// Physical address of the register area this VMA maps to.
|
||||||
@ -92,8 +93,7 @@ private:
|
|||||||
ar& type;
|
ar& type;
|
||||||
ar& permissions;
|
ar& permissions;
|
||||||
ar& meminfo_state;
|
ar& meminfo_state;
|
||||||
// TODO: backing memory ref
|
ar& backing_memory;
|
||||||
// backing memory can be: Physical/FCRAM pointer, config mem, shared page
|
|
||||||
ar& paddr;
|
ar& paddr;
|
||||||
ar& mmio_handler;
|
ar& mmio_handler;
|
||||||
}
|
}
|
||||||
@ -151,7 +151,7 @@ public:
|
|||||||
* @param state MemoryState tag to attach to the VMA.
|
* @param state MemoryState tag to attach to the VMA.
|
||||||
* @returns The address at which the memory was mapped.
|
* @returns The address at which the memory was mapped.
|
||||||
*/
|
*/
|
||||||
ResultVal<VAddr> MapBackingMemoryToBase(VAddr base, u32 region_size, u8* memory, u32 size,
|
ResultVal<VAddr> MapBackingMemoryToBase(VAddr base, u32 region_size, MemoryRef memory, u32 size,
|
||||||
MemoryState state);
|
MemoryState state);
|
||||||
/**
|
/**
|
||||||
* Maps an unmanaged host memory pointer at a given address.
|
* Maps an unmanaged host memory pointer at a given address.
|
||||||
@ -161,7 +161,8 @@ public:
|
|||||||
* @param size Size of the mapping.
|
* @param size Size of the mapping.
|
||||||
* @param state MemoryState tag to attach to the VMA.
|
* @param state MemoryState tag to attach to the VMA.
|
||||||
*/
|
*/
|
||||||
ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u32 size, MemoryState state);
|
ResultVal<VMAHandle> MapBackingMemory(VAddr target, MemoryRef memory, u32 size,
|
||||||
|
MemoryState state);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Maps a memory-mapped IO region at a given address.
|
* Maps a memory-mapped IO region at a given address.
|
||||||
@ -203,38 +204,14 @@ public:
|
|||||||
void LogLayout(Log::Level log_level) const;
|
void LogLayout(Log::Level log_level) const;
|
||||||
|
|
||||||
/// Gets a list of backing memory blocks for the specified range
|
/// Gets a list of backing memory blocks for the specified range
|
||||||
ResultVal<std::vector<std::pair<u8*, u32>>> GetBackingBlocksForRange(VAddr address, u32 size);
|
ResultVal<std::vector<std::pair<MemoryRef, u32>>> GetBackingBlocksForRange(VAddr address,
|
||||||
|
u32 size);
|
||||||
|
|
||||||
/// Each VMManager has its own page table, which is set as the main one when the owning process
|
/// Each VMManager has its own page table, which is set as the main one when the owning process
|
||||||
/// is scheduled.
|
/// is scheduled.
|
||||||
Memory::PageTable page_table;
|
Memory::PageTable page_table;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class boost::serialization::access;
|
|
||||||
template <class Archive>
|
|
||||||
void save(Archive& ar, const unsigned int file_version) const {
|
|
||||||
ar& vma_map;
|
|
||||||
for (int i = 0; i < page_table.pointers.size(); i++) {
|
|
||||||
ar << memory.GetFCRAMOffset(page_table.pointers[i]);
|
|
||||||
}
|
|
||||||
ar& page_table.special_regions;
|
|
||||||
ar& page_table.attributes;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class Archive>
|
|
||||||
void load(Archive& ar, const unsigned int file_version) {
|
|
||||||
ar& vma_map;
|
|
||||||
for (int i = 0; i < page_table.pointers.size(); i++) {
|
|
||||||
u32 offset{};
|
|
||||||
ar >> offset;
|
|
||||||
page_table.pointers[i] = memory.GetFCRAMPointer(offset);
|
|
||||||
}
|
|
||||||
ar& page_table.special_regions;
|
|
||||||
ar& page_table.attributes;
|
|
||||||
}
|
|
||||||
|
|
||||||
BOOST_SERIALIZATION_SPLIT_MEMBER()
|
|
||||||
|
|
||||||
using VMAIter = decltype(vma_map)::iterator;
|
using VMAIter = decltype(vma_map)::iterator;
|
||||||
|
|
||||||
/// Converts a VMAHandle to a mutable VMAIter.
|
/// Converts a VMAHandle to a mutable VMAIter.
|
||||||
@ -271,5 +248,12 @@ private:
|
|||||||
void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
|
void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
|
||||||
|
|
||||||
Memory::MemorySystem& memory;
|
Memory::MemorySystem& memory;
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void serialize(Archive& ar, const unsigned int) {
|
||||||
|
ar& vma_map;
|
||||||
|
ar& page_table;
|
||||||
|
}
|
||||||
|
friend class boost::serialization::access;
|
||||||
};
|
};
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
#include "core/arm/arm_interface.h"
|
#include "core/arm/arm_interface.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
|
#include "core/global.h"
|
||||||
#include "core/hle/kernel/memory.h"
|
#include "core/hle/kernel/memory.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/lock.h"
|
#include "core/hle/lock.h"
|
||||||
@ -22,8 +23,19 @@
|
|||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
#include "video_core/video_core.h"
|
#include "video_core/video_core.h"
|
||||||
|
|
||||||
|
SERIALIZE_EXPORT_IMPL(Memory::MemorySystem::BackingMemImpl<Memory::Region::FCRAM>)
|
||||||
|
SERIALIZE_EXPORT_IMPL(Memory::MemorySystem::BackingMemImpl<Memory::Region::VRAM>)
|
||||||
|
SERIALIZE_EXPORT_IMPL(Memory::MemorySystem::BackingMemImpl<Memory::Region::DSP>)
|
||||||
|
SERIALIZE_EXPORT_IMPL(Memory::MemorySystem::BackingMemImpl<Memory::Region::N3DS>)
|
||||||
|
|
||||||
namespace Memory {
|
namespace Memory {
|
||||||
|
|
||||||
|
void PageTable::Clear() {
|
||||||
|
pointers.raw.fill(nullptr);
|
||||||
|
pointers.refs.fill(MemoryRef());
|
||||||
|
attributes.fill(PageType::Unmapped);
|
||||||
|
}
|
||||||
|
|
||||||
class RasterizerCacheMarker {
|
class RasterizerCacheMarker {
|
||||||
public:
|
public:
|
||||||
void Mark(VAddr addr, bool cached) {
|
void Mark(VAddr addr, bool cached) {
|
||||||
@ -81,6 +93,43 @@ public:
|
|||||||
|
|
||||||
AudioCore::DspInterface* dsp = nullptr;
|
AudioCore::DspInterface* dsp = nullptr;
|
||||||
|
|
||||||
|
std::shared_ptr<BackingMem> fcram_mem;
|
||||||
|
std::shared_ptr<BackingMem> vram_mem;
|
||||||
|
std::shared_ptr<BackingMem> n3ds_extra_ram_mem;
|
||||||
|
std::shared_ptr<BackingMem> dsp_mem;
|
||||||
|
|
||||||
|
MemorySystem::Impl();
|
||||||
|
|
||||||
|
virtual u8* GetPtr(Region r) {
|
||||||
|
switch (r) {
|
||||||
|
case Region::VRAM:
|
||||||
|
return vram.get();
|
||||||
|
case Region::DSP:
|
||||||
|
return dsp->GetDspMemory().data();
|
||||||
|
case Region::FCRAM:
|
||||||
|
return fcram.get();
|
||||||
|
case Region::N3DS:
|
||||||
|
return n3ds_extra_ram.get();
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual u32 GetSize(Region r) const {
|
||||||
|
switch (r) {
|
||||||
|
case Region::VRAM:
|
||||||
|
return VRAM_SIZE;
|
||||||
|
case Region::DSP:
|
||||||
|
return DSP_RAM_SIZE;
|
||||||
|
case Region::FCRAM:
|
||||||
|
return FCRAM_N3DS_SIZE;
|
||||||
|
case Region::N3DS:
|
||||||
|
return N3DS_EXTRA_RAM_SIZE;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
@ -95,10 +144,41 @@ private:
|
|||||||
ar& cache_marker;
|
ar& cache_marker;
|
||||||
ar& page_table_list;
|
ar& page_table_list;
|
||||||
// dsp is set from Core::System at startup
|
// dsp is set from Core::System at startup
|
||||||
// current page table set from current process?
|
// TODO: current_page_table
|
||||||
|
ar& fcram_mem;
|
||||||
|
ar& vram_mem;
|
||||||
|
ar& n3ds_extra_ram_mem;
|
||||||
|
ar& dsp_mem;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// We use this rather than BufferMem because we don't want new objects to be allocated when
|
||||||
|
// deserializing. This avoids unnecessary memory thrashing.
|
||||||
|
template <Region R>
|
||||||
|
class MemorySystem::BackingMemImpl : public BackingMem {
|
||||||
|
public:
|
||||||
|
BackingMemImpl() : system(Core::Global<Core::System>().Memory()) {}
|
||||||
|
virtual u8* GetPtr() {
|
||||||
|
return system.impl->GetPtr(R);
|
||||||
|
}
|
||||||
|
virtual u32 GetSize() const {
|
||||||
|
return system.impl->GetSize(R);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
MemorySystem& system;
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void serialize(Archive& ar, const unsigned int) {}
|
||||||
|
friend class boost::serialization::access;
|
||||||
|
};
|
||||||
|
|
||||||
|
MemorySystem::Impl::Impl()
|
||||||
|
: fcram_mem(std::make_shared<BackingMemImpl<Region::FCRAM>>()),
|
||||||
|
vram_mem(std::make_shared<BackingMemImpl<Region::VRAM>>()),
|
||||||
|
n3ds_extra_ram_mem(std::make_shared<BackingMemImpl<Region::N3DS>>()),
|
||||||
|
dsp_mem(std::make_shared<BackingMemImpl<Region::DSP>>()) {}
|
||||||
|
|
||||||
MemorySystem::MemorySystem() : impl(std::make_unique<Impl>()) {}
|
MemorySystem::MemorySystem() : impl(std::make_unique<Impl>()) {}
|
||||||
MemorySystem::~MemorySystem() = default;
|
MemorySystem::~MemorySystem() = default;
|
||||||
|
|
||||||
@ -117,8 +197,9 @@ PageTable* MemorySystem::GetCurrentPageTable() const {
|
|||||||
return impl->current_page_table;
|
return impl->current_page_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) {
|
void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef memory,
|
||||||
LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", (void*)memory, base * PAGE_SIZE,
|
PageType type) {
|
||||||
|
LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", (void*)memory.GetPtr(), base * PAGE_SIZE,
|
||||||
(base + size) * PAGE_SIZE);
|
(base + size) * PAGE_SIZE);
|
||||||
|
|
||||||
RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE,
|
RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE,
|
||||||
@ -143,7 +224,7 @@ void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, u8* memor
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemorySystem::MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target) {
|
void MemorySystem::MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, MemoryRef target) {
|
||||||
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
|
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
|
||||||
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
|
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
|
||||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
|
||||||
@ -164,15 +245,15 @@ void MemorySystem::UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
|
|||||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
|
||||||
}
|
}
|
||||||
|
|
||||||
u8* MemorySystem::GetPointerForRasterizerCache(VAddr addr) {
|
MemoryRef MemorySystem::GetPointerForRasterizerCache(VAddr addr) {
|
||||||
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
|
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
|
||||||
return impl->fcram.get() + (addr - LINEAR_HEAP_VADDR);
|
return {impl->fcram_mem, addr - LINEAR_HEAP_VADDR};
|
||||||
}
|
}
|
||||||
if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) {
|
if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) {
|
||||||
return impl->fcram.get() + (addr - NEW_LINEAR_HEAP_VADDR);
|
return {impl->fcram_mem, addr - NEW_LINEAR_HEAP_VADDR};
|
||||||
}
|
}
|
||||||
if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) {
|
if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) {
|
||||||
return impl->vram.get() + (addr - VRAM_VADDR);
|
return {impl->vram_mem, addr - VRAM_VADDR};
|
||||||
}
|
}
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
@ -271,7 +352,7 @@ void MemorySystem::Write(const VAddr vaddr, const T data) {
|
|||||||
bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) {
|
bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) {
|
||||||
auto& page_table = process.vm_manager.page_table;
|
auto& page_table = process.vm_manager.page_table;
|
||||||
|
|
||||||
const u8* page_pointer = page_table.pointers[vaddr >> PAGE_BITS];
|
auto page_pointer = page_table.pointers[vaddr >> PAGE_BITS];
|
||||||
if (page_pointer)
|
if (page_pointer)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
@ -323,6 +404,10 @@ std::string MemorySystem::ReadCString(VAddr vaddr, std::size_t max_length) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
u8* MemorySystem::GetPhysicalPointer(PAddr address) {
|
u8* MemorySystem::GetPhysicalPointer(PAddr address) {
|
||||||
|
return GetPhysicalRef(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryRef MemorySystem::GetPhysicalRef(PAddr address) {
|
||||||
struct MemoryArea {
|
struct MemoryArea {
|
||||||
PAddr paddr_base;
|
PAddr paddr_base;
|
||||||
u32 size;
|
u32 size;
|
||||||
@ -349,25 +434,25 @@ u8* MemorySystem::GetPhysicalPointer(PAddr address) {
|
|||||||
|
|
||||||
u32 offset_into_region = address - area->paddr_base;
|
u32 offset_into_region = address - area->paddr_base;
|
||||||
|
|
||||||
u8* target_pointer = nullptr;
|
std::shared_ptr<BackingMem> target_mem = nullptr;
|
||||||
switch (area->paddr_base) {
|
switch (area->paddr_base) {
|
||||||
case VRAM_PADDR:
|
case VRAM_PADDR:
|
||||||
target_pointer = impl->vram.get() + offset_into_region;
|
target_mem = impl->vram_mem;
|
||||||
break;
|
break;
|
||||||
case DSP_RAM_PADDR:
|
case DSP_RAM_PADDR:
|
||||||
target_pointer = impl->dsp->GetDspMemory().data() + offset_into_region;
|
target_mem = impl->dsp_mem;
|
||||||
break;
|
break;
|
||||||
case FCRAM_PADDR:
|
case FCRAM_PADDR:
|
||||||
target_pointer = impl->fcram.get() + offset_into_region;
|
target_mem = impl->fcram_mem;
|
||||||
break;
|
break;
|
||||||
case N3DS_EXTRA_RAM_PADDR:
|
case N3DS_EXTRA_RAM_PADDR:
|
||||||
target_pointer = impl->n3ds_extra_ram.get() + offset_into_region;
|
target_mem = impl->n3ds_extra_ram_mem;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
return target_pointer;
|
return {target_mem, offset_into_region};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// For a rasterizer-accessible PAddr, gets a list of all possible VAddr
|
/// For a rasterizer-accessible PAddr, gets a list of all possible VAddr
|
||||||
@ -781,7 +866,7 @@ void WriteMMIO<u64>(MMIORegionPointer mmio_handler, VAddr addr, const u64 data)
|
|||||||
mmio_handler->Write64(addr, data);
|
mmio_handler->Write64(addr, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 MemorySystem::GetFCRAMOffset(u8* pointer) {
|
u32 MemorySystem::GetFCRAMOffset(const u8* pointer) {
|
||||||
ASSERT(pointer >= impl->fcram.get() && pointer <= impl->fcram.get() + Memory::FCRAM_N3DS_SIZE);
|
ASSERT(pointer >= impl->fcram.get() && pointer <= impl->fcram.get() + Memory::FCRAM_N3DS_SIZE);
|
||||||
return pointer - impl->fcram.get();
|
return pointer - impl->fcram.get();
|
||||||
}
|
}
|
||||||
@ -791,6 +876,11 @@ u8* MemorySystem::GetFCRAMPointer(u32 offset) {
|
|||||||
return impl->fcram.get() + offset;
|
return impl->fcram.get() + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MemoryRef MemorySystem::GetFCRAMRef(u32 offset) {
|
||||||
|
ASSERT(offset <= Memory::FCRAM_N3DS_SIZE);
|
||||||
|
return MemoryRef(impl->fcram_mem, offset);
|
||||||
|
}
|
||||||
|
|
||||||
void MemorySystem::SetDSP(AudioCore::DspInterface& dsp) {
|
void MemorySystem::SetDSP(AudioCore::DspInterface& dsp) {
|
||||||
impl->dsp = &dsp;
|
impl->dsp = &dsp;
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <boost/serialization/array.hpp>
|
#include <boost/serialization/array.hpp>
|
||||||
#include <boost/serialization/vector.hpp>
|
#include <boost/serialization/vector.hpp>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "common/memory_ref.h"
|
||||||
#include "core/mmio.h"
|
#include "core/mmio.h"
|
||||||
|
|
||||||
class ARM_Interface;
|
class ARM_Interface;
|
||||||
@ -77,7 +78,48 @@ struct PageTable {
|
|||||||
* Array of memory pointers backing each page. An entry can only be non-null if the
|
* Array of memory pointers backing each page. An entry can only be non-null if the
|
||||||
* corresponding entry in the `attributes` array is of type `Memory`.
|
* corresponding entry in the `attributes` array is of type `Memory`.
|
||||||
*/
|
*/
|
||||||
std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers;
|
|
||||||
|
// The reason for this rigmarole is to keep the 'raw' and 'refs' arrays in sync.
|
||||||
|
// We need 'raw' for dynarmic and 'refs' for serialization
|
||||||
|
struct Pointers {
|
||||||
|
|
||||||
|
struct Entry {
|
||||||
|
Entry(Pointers& pointers_, VAddr idx_) : pointers(pointers_), idx(idx_) {}
|
||||||
|
|
||||||
|
inline void operator=(MemoryRef value) {
|
||||||
|
pointers.refs[idx] = value;
|
||||||
|
pointers.raw[idx] = value.GetPtr();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline operator u8*() {
|
||||||
|
return pointers.raw[idx];
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Pointers& pointers;
|
||||||
|
VAddr idx;
|
||||||
|
};
|
||||||
|
|
||||||
|
inline Entry operator[](VAddr idx) {
|
||||||
|
return Entry(*this, idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline u8* operator[](VAddr idx) const {
|
||||||
|
return raw[idx];
|
||||||
|
}
|
||||||
|
|
||||||
|
inline Entry operator[](std::size_t idx) {
|
||||||
|
return Entry(*this, static_cast<VAddr>(idx));
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::array<u8*, PAGE_TABLE_NUM_ENTRIES> raw;
|
||||||
|
|
||||||
|
std::array<MemoryRef, PAGE_TABLE_NUM_ENTRIES> refs;
|
||||||
|
|
||||||
|
friend struct PageTable;
|
||||||
|
};
|
||||||
|
Pointers pointers;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
|
* Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
|
||||||
@ -91,12 +133,21 @@ struct PageTable {
|
|||||||
*/
|
*/
|
||||||
std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
|
std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
|
||||||
|
|
||||||
|
inline std::array<u8*, PAGE_TABLE_NUM_ENTRIES>& GetPointerArray() {
|
||||||
|
return pointers.raw;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Clear();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void serialize(Archive& ar, const unsigned int) {
|
void serialize(Archive& ar, const unsigned int) {
|
||||||
// TODO: Pointers; same as VMA backing regions we need to serialize the u8*
|
ar& pointers.refs;
|
||||||
ar& special_regions;
|
ar& special_regions;
|
||||||
ar& attributes;
|
ar& attributes;
|
||||||
|
for (auto i = 0; i < PAGE_TABLE_NUM_ENTRIES; i++) {
|
||||||
|
pointers.raw[i] = pointers.refs[i].GetPtr();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
};
|
};
|
||||||
@ -142,6 +193,8 @@ enum : PAddr {
|
|||||||
FCRAM_N3DS_PADDR_END = FCRAM_PADDR + FCRAM_N3DS_SIZE,
|
FCRAM_N3DS_PADDR_END = FCRAM_PADDR + FCRAM_N3DS_SIZE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum class Region { FCRAM, VRAM, DSP, N3DS };
|
||||||
|
|
||||||
/// Virtual user-space memory regions
|
/// Virtual user-space memory regions
|
||||||
enum : VAddr {
|
enum : VAddr {
|
||||||
/// Where the application text, data and bss reside.
|
/// Where the application text, data and bss reside.
|
||||||
@ -249,7 +302,7 @@ public:
|
|||||||
* @param size The amount of bytes to map. Must be page-aligned.
|
* @param size The amount of bytes to map. Must be page-aligned.
|
||||||
* @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
|
* @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
|
||||||
*/
|
*/
|
||||||
void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target);
|
void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, MemoryRef target);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Maps a region of the emulated process address space as a IO region.
|
* Maps a region of the emulated process address space as a IO region.
|
||||||
@ -293,16 +346,21 @@ public:
|
|||||||
*/
|
*/
|
||||||
u8* GetPhysicalPointer(PAddr address);
|
u8* GetPhysicalPointer(PAddr address);
|
||||||
|
|
||||||
|
MemoryRef GetPhysicalRef(PAddr address);
|
||||||
|
|
||||||
u8* GetPointer(VAddr vaddr);
|
u8* GetPointer(VAddr vaddr);
|
||||||
|
|
||||||
bool IsValidPhysicalAddress(PAddr paddr);
|
bool IsValidPhysicalAddress(PAddr paddr);
|
||||||
|
|
||||||
/// Gets offset in FCRAM from a pointer inside FCRAM range
|
/// Gets offset in FCRAM from a pointer inside FCRAM range
|
||||||
u32 GetFCRAMOffset(u8* pointer);
|
u32 GetFCRAMOffset(const u8* pointer);
|
||||||
|
|
||||||
/// Gets pointer in FCRAM with given offset
|
/// Gets pointer in FCRAM with given offset
|
||||||
u8* GetFCRAMPointer(u32 offset);
|
u8* GetFCRAMPointer(u32 offset);
|
||||||
|
|
||||||
|
/// Gets a serializable ref to FCRAM with the given offset
|
||||||
|
MemoryRef GetFCRAMRef(u32 offset);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Mark each page touching the region as cached.
|
* Mark each page touching the region as cached.
|
||||||
*/
|
*/
|
||||||
@ -329,9 +387,9 @@ private:
|
|||||||
* Since the cache only happens on linear heap or VRAM, we know the exact physical address and
|
* Since the cache only happens on linear heap or VRAM, we know the exact physical address and
|
||||||
* pointer of such virtual address
|
* pointer of such virtual address
|
||||||
*/
|
*/
|
||||||
u8* GetPointerForRasterizerCache(VAddr addr);
|
MemoryRef GetPointerForRasterizerCache(VAddr addr);
|
||||||
|
|
||||||
void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type);
|
void MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef memory, PageType type);
|
||||||
|
|
||||||
class Impl;
|
class Impl;
|
||||||
|
|
||||||
@ -340,9 +398,18 @@ private:
|
|||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void serialize(Archive& ar, const unsigned int file_version);
|
void serialize(Archive& ar, const unsigned int file_version);
|
||||||
|
|
||||||
|
public:
|
||||||
|
template <Region R>
|
||||||
|
class BackingMemImpl;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Determines if the given VAddr is valid for the specified process.
|
/// Determines if the given VAddr is valid for the specified process.
|
||||||
bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr);
|
bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr);
|
||||||
|
|
||||||
} // namespace Memory
|
} // namespace Memory
|
||||||
|
|
||||||
|
BOOST_CLASS_EXPORT_KEY(Memory::MemorySystem::BackingMemImpl<Memory::Region::FCRAM>)
|
||||||
|
BOOST_CLASS_EXPORT_KEY(Memory::MemorySystem::BackingMemImpl<Memory::Region::VRAM>)
|
||||||
|
BOOST_CLASS_EXPORT_KEY(Memory::MemorySystem::BackingMemImpl<Memory::Region::DSP>)
|
||||||
|
BOOST_CLASS_EXPORT_KEY(Memory::MemorySystem::BackingMemImpl<Memory::Region::N3DS>)
|
||||||
|
@ -22,8 +22,7 @@ TestEnvironment::TestEnvironment(bool mutable_memory_)
|
|||||||
kernel->SetCurrentProcess(kernel->CreateProcess(kernel->CreateCodeSet("", 0)));
|
kernel->SetCurrentProcess(kernel->CreateProcess(kernel->CreateCodeSet("", 0)));
|
||||||
page_table = &kernel->GetCurrentProcess()->vm_manager.page_table;
|
page_table = &kernel->GetCurrentProcess()->vm_manager.page_table;
|
||||||
|
|
||||||
page_table->pointers.fill(nullptr);
|
page_table->Clear();
|
||||||
page_table->attributes.fill(Memory::PageType::Unmapped);
|
|
||||||
|
|
||||||
memory->MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory);
|
memory->MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory);
|
||||||
memory->MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory);
|
memory->MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory);
|
||||||
|
@ -138,67 +138,70 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
|||||||
}
|
}
|
||||||
|
|
||||||
SECTION("translates StaticBuffer descriptors") {
|
SECTION("translates StaticBuffer descriptors") {
|
||||||
auto buffer = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
|
auto mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||||
std::fill(buffer->begin(), buffer->end(), 0xAB);
|
MemoryRef buffer{mem};
|
||||||
|
std::fill(buffer.GetPtr(), buffer.GetPtr() + buffer.GetSize(), 0xAB);
|
||||||
|
|
||||||
VAddr target_address = 0x10000000;
|
VAddr target_address = 0x10000000;
|
||||||
auto result = process->vm_manager.MapBackingMemory(target_address, buffer->data(),
|
auto result = process->vm_manager.MapBackingMemory(target_address, buffer, buffer.GetSize(),
|
||||||
buffer->size(), MemoryState::Private);
|
MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
const u32_le input[]{
|
const u32_le input[]{
|
||||||
IPC::MakeHeader(0, 0, 2),
|
IPC::MakeHeader(0, 0, 2),
|
||||||
IPC::StaticBufferDesc(buffer->size(), 0),
|
IPC::StaticBufferDesc(buffer.GetSize(), 0),
|
||||||
target_address,
|
target_address,
|
||||||
};
|
};
|
||||||
|
|
||||||
context.PopulateFromIncomingCommandBuffer(input, *process);
|
context.PopulateFromIncomingCommandBuffer(input, *process);
|
||||||
|
|
||||||
CHECK(context.GetStaticBuffer(0) == *buffer);
|
CHECK(context.GetStaticBuffer(0) == mem->Vector());
|
||||||
|
|
||||||
REQUIRE(process->vm_manager.UnmapRange(target_address, buffer->size()) == RESULT_SUCCESS);
|
REQUIRE(process->vm_manager.UnmapRange(target_address, buffer.GetSize()) == RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("translates MappedBuffer descriptors") {
|
SECTION("translates MappedBuffer descriptors") {
|
||||||
auto buffer = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
|
auto mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||||
std::fill(buffer->begin(), buffer->end(), 0xCD);
|
MemoryRef buffer{mem};
|
||||||
|
std::fill(buffer.GetPtr(), buffer.GetPtr() + buffer.GetSize(), 0xCD);
|
||||||
|
|
||||||
VAddr target_address = 0x10000000;
|
VAddr target_address = 0x10000000;
|
||||||
auto result = process->vm_manager.MapBackingMemory(target_address, buffer->data(),
|
auto result = process->vm_manager.MapBackingMemory(target_address, buffer, buffer.GetSize(),
|
||||||
buffer->size(), MemoryState::Private);
|
MemoryState::Private);
|
||||||
|
|
||||||
const u32_le input[]{
|
const u32_le input[]{
|
||||||
IPC::MakeHeader(0, 0, 2),
|
IPC::MakeHeader(0, 0, 2),
|
||||||
IPC::MappedBufferDesc(buffer->size(), IPC::R),
|
IPC::MappedBufferDesc(buffer.GetSize(), IPC::R),
|
||||||
target_address,
|
target_address,
|
||||||
};
|
};
|
||||||
|
|
||||||
context.PopulateFromIncomingCommandBuffer(input, *process);
|
context.PopulateFromIncomingCommandBuffer(input, *process);
|
||||||
|
|
||||||
std::vector<u8> other_buffer(buffer->size());
|
std::vector<u8> other_buffer(buffer.GetSize());
|
||||||
context.GetMappedBuffer(0).Read(other_buffer.data(), 0, buffer->size());
|
context.GetMappedBuffer(0).Read(other_buffer.data(), 0, buffer.GetSize());
|
||||||
|
|
||||||
CHECK(other_buffer == *buffer);
|
CHECK(other_buffer == mem->Vector());
|
||||||
|
|
||||||
REQUIRE(process->vm_manager.UnmapRange(target_address, buffer->size()) == RESULT_SUCCESS);
|
REQUIRE(process->vm_manager.UnmapRange(target_address, buffer.GetSize()) == RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("translates mixed params") {
|
SECTION("translates mixed params") {
|
||||||
auto buffer_static = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
|
auto mem_static = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||||
std::fill(buffer_static->begin(), buffer_static->end(), 0xCE);
|
MemoryRef buffer_static{mem_static};
|
||||||
|
std::fill(buffer_static.GetPtr(), buffer_static.GetPtr() + buffer_static.GetSize(), 0xCE);
|
||||||
|
|
||||||
auto buffer_mapped = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
|
auto mem_mapped = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||||
std::fill(buffer_mapped->begin(), buffer_mapped->end(), 0xDF);
|
MemoryRef buffer_mapped{mem_mapped};
|
||||||
|
std::fill(buffer_mapped.GetPtr(), buffer_mapped.GetPtr() + buffer_mapped.GetSize(), 0xDF);
|
||||||
|
|
||||||
VAddr target_address_static = 0x10000000;
|
VAddr target_address_static = 0x10000000;
|
||||||
auto result =
|
auto result = process->vm_manager.MapBackingMemory(
|
||||||
process->vm_manager.MapBackingMemory(target_address_static, buffer_static->data(),
|
target_address_static, buffer_static, buffer_static.GetSize(), MemoryState::Private);
|
||||||
buffer_static->size(), MemoryState::Private);
|
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
VAddr target_address_mapped = 0x20000000;
|
VAddr target_address_mapped = 0x20000000;
|
||||||
result = process->vm_manager.MapBackingMemory(target_address_mapped, buffer_mapped->data(),
|
result = process->vm_manager.MapBackingMemory(
|
||||||
buffer_mapped->size(), MemoryState::Private);
|
target_address_mapped, buffer_mapped, buffer_mapped.GetSize(), MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
auto a = MakeObject(kernel);
|
auto a = MakeObject(kernel);
|
||||||
@ -210,9 +213,9 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
|||||||
process->handle_table.Create(a).Unwrap(),
|
process->handle_table.Create(a).Unwrap(),
|
||||||
IPC::CallingPidDesc(),
|
IPC::CallingPidDesc(),
|
||||||
0,
|
0,
|
||||||
IPC::StaticBufferDesc(buffer_static->size(), 0),
|
IPC::StaticBufferDesc(buffer_static.GetSize(), 0),
|
||||||
target_address_static,
|
target_address_static,
|
||||||
IPC::MappedBufferDesc(buffer_mapped->size(), IPC::R),
|
IPC::MappedBufferDesc(buffer_mapped.GetSize(), IPC::R),
|
||||||
target_address_mapped,
|
target_address_mapped,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -223,14 +226,14 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
|||||||
CHECK(output[2] == 0xABCDEF00);
|
CHECK(output[2] == 0xABCDEF00);
|
||||||
CHECK(context.GetIncomingHandle(output[4]) == a);
|
CHECK(context.GetIncomingHandle(output[4]) == a);
|
||||||
CHECK(output[6] == process->process_id);
|
CHECK(output[6] == process->process_id);
|
||||||
CHECK(context.GetStaticBuffer(0) == *buffer_static);
|
CHECK(context.GetStaticBuffer(0) == mem_static->Vector());
|
||||||
std::vector<u8> other_buffer(buffer_mapped->size());
|
std::vector<u8> other_buffer(buffer_mapped.GetSize());
|
||||||
context.GetMappedBuffer(0).Read(other_buffer.data(), 0, buffer_mapped->size());
|
context.GetMappedBuffer(0).Read(other_buffer.data(), 0, buffer_mapped.GetSize());
|
||||||
CHECK(other_buffer == *buffer_mapped);
|
CHECK(other_buffer == mem_mapped->Vector());
|
||||||
|
|
||||||
REQUIRE(process->vm_manager.UnmapRange(target_address_static, buffer_static->size()) ==
|
REQUIRE(process->vm_manager.UnmapRange(target_address_static, buffer_static.GetSize()) ==
|
||||||
RESULT_SUCCESS);
|
RESULT_SUCCESS);
|
||||||
REQUIRE(process->vm_manager.UnmapRange(target_address_mapped, buffer_mapped->size()) ==
|
REQUIRE(process->vm_manager.UnmapRange(target_address_mapped, buffer_mapped.GetSize()) ==
|
||||||
RESULT_SUCCESS);
|
RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -317,10 +320,12 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
|||||||
|
|
||||||
context.AddStaticBuffer(0, input_buffer);
|
context.AddStaticBuffer(0, input_buffer);
|
||||||
|
|
||||||
auto output_buffer = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
|
auto output_mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||||
|
MemoryRef output_buffer{output_mem};
|
||||||
|
|
||||||
VAddr target_address = 0x10000000;
|
VAddr target_address = 0x10000000;
|
||||||
auto result = process->vm_manager.MapBackingMemory(
|
auto result = process->vm_manager.MapBackingMemory(
|
||||||
target_address, output_buffer->data(), output_buffer->size(), MemoryState::Private);
|
target_address, output_buffer, output_buffer.GetSize(), MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
input[0] = IPC::MakeHeader(0, 0, 2);
|
input[0] = IPC::MakeHeader(0, 0, 2);
|
||||||
@ -332,13 +337,13 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
|||||||
std::array<u32_le, IPC::COMMAND_BUFFER_LENGTH + 2> output_cmdbuff;
|
std::array<u32_le, IPC::COMMAND_BUFFER_LENGTH + 2> output_cmdbuff;
|
||||||
// Set up the output StaticBuffer
|
// Set up the output StaticBuffer
|
||||||
output_cmdbuff[IPC::COMMAND_BUFFER_LENGTH] =
|
output_cmdbuff[IPC::COMMAND_BUFFER_LENGTH] =
|
||||||
IPC::StaticBufferDesc(output_buffer->size(), 0);
|
IPC::StaticBufferDesc(output_buffer.GetSize(), 0);
|
||||||
output_cmdbuff[IPC::COMMAND_BUFFER_LENGTH + 1] = target_address;
|
output_cmdbuff[IPC::COMMAND_BUFFER_LENGTH + 1] = target_address;
|
||||||
|
|
||||||
context.WriteToOutgoingCommandBuffer(output_cmdbuff.data(), *process);
|
context.WriteToOutgoingCommandBuffer(output_cmdbuff.data(), *process);
|
||||||
|
|
||||||
CHECK(*output_buffer == input_buffer);
|
CHECK(output_mem->Vector() == input_buffer);
|
||||||
REQUIRE(process->vm_manager.UnmapRange(target_address, output_buffer->size()) ==
|
REQUIRE(process->vm_manager.UnmapRange(target_address, output_buffer.GetSize()) ==
|
||||||
RESULT_SUCCESS);
|
RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -346,15 +351,17 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
|||||||
std::vector<u8> input_buffer(Memory::PAGE_SIZE);
|
std::vector<u8> input_buffer(Memory::PAGE_SIZE);
|
||||||
std::fill(input_buffer.begin(), input_buffer.end(), 0xAB);
|
std::fill(input_buffer.begin(), input_buffer.end(), 0xAB);
|
||||||
|
|
||||||
auto output_buffer = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
|
auto output_mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||||
|
MemoryRef output_buffer{output_mem};
|
||||||
|
|
||||||
VAddr target_address = 0x10000000;
|
VAddr target_address = 0x10000000;
|
||||||
auto result = process->vm_manager.MapBackingMemory(
|
auto result = process->vm_manager.MapBackingMemory(
|
||||||
target_address, output_buffer->data(), output_buffer->size(), MemoryState::Private);
|
target_address, output_buffer, output_buffer.GetSize(), MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
const u32_le input_cmdbuff[]{
|
const u32_le input_cmdbuff[]{
|
||||||
IPC::MakeHeader(0, 0, 2),
|
IPC::MakeHeader(0, 0, 2),
|
||||||
IPC::MappedBufferDesc(output_buffer->size(), IPC::W),
|
IPC::MappedBufferDesc(output_buffer.GetSize(), IPC::W),
|
||||||
target_address,
|
target_address,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -363,15 +370,15 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
|||||||
context.GetMappedBuffer(0).Write(input_buffer.data(), 0, input_buffer.size());
|
context.GetMappedBuffer(0).Write(input_buffer.data(), 0, input_buffer.size());
|
||||||
|
|
||||||
input[0] = IPC::MakeHeader(0, 0, 2);
|
input[0] = IPC::MakeHeader(0, 0, 2);
|
||||||
input[1] = IPC::MappedBufferDesc(output_buffer->size(), IPC::W);
|
input[1] = IPC::MappedBufferDesc(output_buffer.GetSize(), IPC::W);
|
||||||
input[2] = 0;
|
input[2] = 0;
|
||||||
|
|
||||||
context.WriteToOutgoingCommandBuffer(output, *process);
|
context.WriteToOutgoingCommandBuffer(output, *process);
|
||||||
|
|
||||||
CHECK(output[1] == IPC::MappedBufferDesc(output_buffer->size(), IPC::W));
|
CHECK(output[1] == IPC::MappedBufferDesc(output_buffer.GetSize(), IPC::W));
|
||||||
CHECK(output[2] == target_address);
|
CHECK(output[2] == target_address);
|
||||||
CHECK(*output_buffer == input_buffer);
|
CHECK(output_mem->Vector() == input_buffer);
|
||||||
REQUIRE(process->vm_manager.UnmapRange(target_address, output_buffer->size()) ==
|
REQUIRE(process->vm_manager.UnmapRange(target_address, output_buffer.GetSize()) ==
|
||||||
RESULT_SUCCESS);
|
RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,47 +10,48 @@
|
|||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
TEST_CASE("Memory Basics", "[kernel][memory]") {
|
TEST_CASE("Memory Basics", "[kernel][memory]") {
|
||||||
auto block = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
|
auto mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||||
|
MemoryRef block{mem};
|
||||||
Memory::MemorySystem memory;
|
Memory::MemorySystem memory;
|
||||||
SECTION("mapping memory") {
|
SECTION("mapping memory") {
|
||||||
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
||||||
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
||||||
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(),
|
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block, block.GetSize(),
|
||||||
Kernel::MemoryState::Private);
|
Kernel::MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
auto vma = manager->FindVMA(Memory::HEAP_VADDR);
|
auto vma = manager->FindVMA(Memory::HEAP_VADDR);
|
||||||
CHECK(vma != manager->vma_map.end());
|
CHECK(vma != manager->vma_map.end());
|
||||||
CHECK(vma->second.size == block->size());
|
CHECK(vma->second.size == block.GetSize());
|
||||||
CHECK(vma->second.type == Kernel::VMAType::BackingMemory);
|
CHECK(vma->second.type == Kernel::VMAType::BackingMemory);
|
||||||
CHECK(vma->second.backing_memory == block->data());
|
CHECK(vma->second.backing_memory.GetPtr() == block.GetPtr());
|
||||||
CHECK(vma->second.meminfo_state == Kernel::MemoryState::Private);
|
CHECK(vma->second.meminfo_state == Kernel::MemoryState::Private);
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("unmapping memory") {
|
SECTION("unmapping memory") {
|
||||||
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
||||||
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
||||||
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(),
|
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block, block.GetSize(),
|
||||||
Kernel::MemoryState::Private);
|
Kernel::MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
ResultCode code = manager->UnmapRange(Memory::HEAP_VADDR, block->size());
|
ResultCode code = manager->UnmapRange(Memory::HEAP_VADDR, block.GetSize());
|
||||||
REQUIRE(code == RESULT_SUCCESS);
|
REQUIRE(code == RESULT_SUCCESS);
|
||||||
|
|
||||||
auto vma = manager->FindVMA(Memory::HEAP_VADDR);
|
auto vma = manager->FindVMA(Memory::HEAP_VADDR);
|
||||||
CHECK(vma != manager->vma_map.end());
|
CHECK(vma != manager->vma_map.end());
|
||||||
CHECK(vma->second.type == Kernel::VMAType::Free);
|
CHECK(vma->second.type == Kernel::VMAType::Free);
|
||||||
CHECK(vma->second.backing_memory == nullptr);
|
CHECK(vma->second.backing_memory.GetPtr() == nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("changing memory permissions") {
|
SECTION("changing memory permissions") {
|
||||||
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
||||||
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
||||||
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(),
|
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block, block.GetSize(),
|
||||||
Kernel::MemoryState::Private);
|
Kernel::MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
ResultCode code = manager->ReprotectRange(Memory::HEAP_VADDR, block->size(),
|
ResultCode code = manager->ReprotectRange(Memory::HEAP_VADDR, block.GetSize(),
|
||||||
Kernel::VMAPermission::Execute);
|
Kernel::VMAPermission::Execute);
|
||||||
CHECK(code == RESULT_SUCCESS);
|
CHECK(code == RESULT_SUCCESS);
|
||||||
|
|
||||||
@ -58,24 +59,24 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
|
|||||||
CHECK(vma != manager->vma_map.end());
|
CHECK(vma != manager->vma_map.end());
|
||||||
CHECK(vma->second.permissions == Kernel::VMAPermission::Execute);
|
CHECK(vma->second.permissions == Kernel::VMAPermission::Execute);
|
||||||
|
|
||||||
code = manager->UnmapRange(Memory::HEAP_VADDR, block->size());
|
code = manager->UnmapRange(Memory::HEAP_VADDR, block.GetSize());
|
||||||
REQUIRE(code == RESULT_SUCCESS);
|
REQUIRE(code == RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("changing memory state") {
|
SECTION("changing memory state") {
|
||||||
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
||||||
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
||||||
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(),
|
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block, block.GetSize(),
|
||||||
Kernel::MemoryState::Private);
|
Kernel::MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
ResultCode code = manager->ReprotectRange(Memory::HEAP_VADDR, block->size(),
|
ResultCode code = manager->ReprotectRange(Memory::HEAP_VADDR, block.GetSize(),
|
||||||
Kernel::VMAPermission::ReadWrite);
|
Kernel::VMAPermission::ReadWrite);
|
||||||
REQUIRE(code == RESULT_SUCCESS);
|
REQUIRE(code == RESULT_SUCCESS);
|
||||||
|
|
||||||
SECTION("with invalid address") {
|
SECTION("with invalid address") {
|
||||||
ResultCode code = manager->ChangeMemoryState(
|
ResultCode code = manager->ChangeMemoryState(
|
||||||
0xFFFFFFFF, block->size(), Kernel::MemoryState::Locked,
|
0xFFFFFFFF, block.GetSize(), Kernel::MemoryState::Locked,
|
||||||
Kernel::VMAPermission::ReadWrite, Kernel::MemoryState::Aliased,
|
Kernel::VMAPermission::ReadWrite, Kernel::MemoryState::Aliased,
|
||||||
Kernel::VMAPermission::Execute);
|
Kernel::VMAPermission::Execute);
|
||||||
CHECK(code == Kernel::ERR_INVALID_ADDRESS);
|
CHECK(code == Kernel::ERR_INVALID_ADDRESS);
|
||||||
@ -83,7 +84,7 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
|
|||||||
|
|
||||||
SECTION("ignoring the original permissions") {
|
SECTION("ignoring the original permissions") {
|
||||||
ResultCode code = manager->ChangeMemoryState(
|
ResultCode code = manager->ChangeMemoryState(
|
||||||
Memory::HEAP_VADDR, block->size(), Kernel::MemoryState::Private,
|
Memory::HEAP_VADDR, block.GetSize(), Kernel::MemoryState::Private,
|
||||||
Kernel::VMAPermission::None, Kernel::MemoryState::Locked,
|
Kernel::VMAPermission::None, Kernel::MemoryState::Locked,
|
||||||
Kernel::VMAPermission::Write);
|
Kernel::VMAPermission::Write);
|
||||||
CHECK(code == RESULT_SUCCESS);
|
CHECK(code == RESULT_SUCCESS);
|
||||||
@ -96,7 +97,7 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
|
|||||||
|
|
||||||
SECTION("enforcing the original permissions with correct expectations") {
|
SECTION("enforcing the original permissions with correct expectations") {
|
||||||
ResultCode code = manager->ChangeMemoryState(
|
ResultCode code = manager->ChangeMemoryState(
|
||||||
Memory::HEAP_VADDR, block->size(), Kernel::MemoryState::Private,
|
Memory::HEAP_VADDR, block.GetSize(), Kernel::MemoryState::Private,
|
||||||
Kernel::VMAPermission::ReadWrite, Kernel::MemoryState::Aliased,
|
Kernel::VMAPermission::ReadWrite, Kernel::MemoryState::Aliased,
|
||||||
Kernel::VMAPermission::Execute);
|
Kernel::VMAPermission::Execute);
|
||||||
CHECK(code == RESULT_SUCCESS);
|
CHECK(code == RESULT_SUCCESS);
|
||||||
@ -109,7 +110,7 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
|
|||||||
|
|
||||||
SECTION("with incorrect permission expectations") {
|
SECTION("with incorrect permission expectations") {
|
||||||
ResultCode code = manager->ChangeMemoryState(
|
ResultCode code = manager->ChangeMemoryState(
|
||||||
Memory::HEAP_VADDR, block->size(), Kernel::MemoryState::Private,
|
Memory::HEAP_VADDR, block.GetSize(), Kernel::MemoryState::Private,
|
||||||
Kernel::VMAPermission::Execute, Kernel::MemoryState::Aliased,
|
Kernel::VMAPermission::Execute, Kernel::MemoryState::Aliased,
|
||||||
Kernel::VMAPermission::Execute);
|
Kernel::VMAPermission::Execute);
|
||||||
CHECK(code == Kernel::ERR_INVALID_ADDRESS_STATE);
|
CHECK(code == Kernel::ERR_INVALID_ADDRESS_STATE);
|
||||||
@ -122,7 +123,7 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
|
|||||||
|
|
||||||
SECTION("with incorrect state expectations") {
|
SECTION("with incorrect state expectations") {
|
||||||
ResultCode code = manager->ChangeMemoryState(
|
ResultCode code = manager->ChangeMemoryState(
|
||||||
Memory::HEAP_VADDR, block->size(), Kernel::MemoryState::Locked,
|
Memory::HEAP_VADDR, block.GetSize(), Kernel::MemoryState::Locked,
|
||||||
Kernel::VMAPermission::ReadWrite, Kernel::MemoryState::Aliased,
|
Kernel::VMAPermission::ReadWrite, Kernel::MemoryState::Aliased,
|
||||||
Kernel::VMAPermission::Execute);
|
Kernel::VMAPermission::Execute);
|
||||||
CHECK(code == Kernel::ERR_INVALID_ADDRESS_STATE);
|
CHECK(code == Kernel::ERR_INVALID_ADDRESS_STATE);
|
||||||
@ -133,7 +134,7 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
|
|||||||
CHECK(vma->second.meminfo_state == Kernel::MemoryState::Private);
|
CHECK(vma->second.meminfo_state == Kernel::MemoryState::Private);
|
||||||
}
|
}
|
||||||
|
|
||||||
code = manager->UnmapRange(Memory::HEAP_VADDR, block->size());
|
code = manager->UnmapRange(Memory::HEAP_VADDR, block.GetSize());
|
||||||
REQUIRE(code == RESULT_SUCCESS);
|
REQUIRE(code == RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user