// Copyright 2014 Citra Emulator Project // Licensed under GPLv2 or any later version // Refer to the license.txt file included. #include #include #include #include "common/logging/log.h" #include "common/microprofile.h" #include "common/scope_exit.h" #include "common/string_util.h" #include "core/arm/arm_interface.h" #include "core/core_timing.h" #include "core/hle/function_wrappers.h" #include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/client_session.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/event.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/memory.h" #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/semaphore.h" #include "core/hle/kernel/server_port.h" #include "core/hle/kernel/server_session.h" #include "core/hle/kernel/session.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/timer.h" #include "core/hle/kernel/vm_manager.h" #include "core/hle/kernel/wait_object.h" #include "core/hle/lock.h" #include "core/hle/result.h" #include "core/hle/service/service.h" //////////////////////////////////////////////////////////////////////////////////////////////////// // Namespace SVC using Kernel::ERR_INVALID_HANDLE; using Kernel::Handle; using Kernel::SharedPtr; namespace SVC { enum ControlMemoryOperation { MEMOP_FREE = 1, MEMOP_RESERVE = 2, // This operation seems to be unsupported in the kernel MEMOP_COMMIT = 3, MEMOP_MAP = 4, MEMOP_UNMAP = 5, MEMOP_PROTECT = 6, MEMOP_OPERATION_MASK = 0xFF, MEMOP_REGION_APP = 0x100, MEMOP_REGION_SYSTEM = 0x200, MEMOP_REGION_BASE = 0x300, MEMOP_REGION_MASK = 0xF00, MEMOP_LINEAR = 0x10000, }; /// Map application or GSP heap memory static ResultCode ControlMemory(u32* out_addr, u32 operation, u32 addr0, u32 addr1, u32 size, u32 permissions) { using namespace Kernel; LOG_DEBUG(Kernel_SVC, "called operation=0x%08X, addr0=0x%08X, addr1=0x%08X, size=0x%X, permissions=0x%08X", operation, addr0, addr1, size, permissions); if ((addr0 & Memory::PAGE_MASK) != 0 || (addr1 & Memory::PAGE_MASK) != 0) { return ERR_MISALIGNED_ADDRESS; } if ((size & Memory::PAGE_MASK) != 0) { return ERR_MISALIGNED_SIZE; } u32 region = operation & MEMOP_REGION_MASK; operation &= ~MEMOP_REGION_MASK; if (region != 0) { LOG_WARNING(Kernel_SVC, "ControlMemory with specified region not supported, region=%X", region); } if ((permissions & (u32)MemoryPermission::ReadWrite) != permissions) { return ERR_INVALID_COMBINATION; } VMAPermission vma_permissions = (VMAPermission)permissions; auto& process = *g_current_process; switch (operation & MEMOP_OPERATION_MASK) { case MEMOP_FREE: { // TODO(Subv): What happens if an application tries to FREE a block of memory that has a // SharedMemory pointing to it? if (addr0 >= Memory::HEAP_VADDR && addr0 < Memory::HEAP_VADDR_END) { ResultCode result = process.HeapFree(addr0, size); if (result.IsError()) return result; } else if (addr0 >= process.GetLinearHeapBase() && addr0 < process.GetLinearHeapLimit()) { ResultCode result = process.LinearFree(addr0, size); if (result.IsError()) return result; } else { return ERR_INVALID_ADDRESS; } *out_addr = addr0; break; } case MEMOP_COMMIT: { if (operation & MEMOP_LINEAR) { CASCADE_RESULT(*out_addr, process.LinearAllocate(addr0, size, vma_permissions)); } else { CASCADE_RESULT(*out_addr, process.HeapAllocate(addr0, size, vma_permissions)); } break; } case MEMOP_MAP: // TODO: This is just a hack to avoid regressions until memory aliasing is // implemented { CASCADE_RESULT(*out_addr, process.HeapAllocate(addr0, size, vma_permissions)); break; } case MEMOP_UNMAP: // TODO: This is just a hack to avoid regressions until memory aliasing is // implemented { ResultCode result = process.HeapFree(addr0, size); if (result.IsError()) return result; break; } case MEMOP_PROTECT: { ResultCode result = process.vm_manager.ReprotectRange(addr0, size, vma_permissions); if (result.IsError()) return result; break; } default: LOG_ERROR(Kernel_SVC, "unknown operation=0x%08X", operation); return ERR_INVALID_COMBINATION; } process.vm_manager.LogLayout(Log::Level::Trace); return RESULT_SUCCESS; } /// Maps a memory block to specified address static ResultCode MapMemoryBlock(Kernel::Handle handle, u32 addr, u32 permissions, u32 other_permissions) { using Kernel::SharedMemory; using Kernel::MemoryPermission; LOG_TRACE(Kernel_SVC, "called memblock=0x%08X, addr=0x%08X, mypermissions=0x%08X, otherpermission=%d", handle, addr, permissions, other_permissions); SharedPtr shared_memory = Kernel::g_handle_table.Get(handle); if (shared_memory == nullptr) return ERR_INVALID_HANDLE; MemoryPermission permissions_type = static_cast(permissions); switch (permissions_type) { case MemoryPermission::Read: case MemoryPermission::Write: case MemoryPermission::ReadWrite: case MemoryPermission::Execute: case MemoryPermission::ReadExecute: case MemoryPermission::WriteExecute: case MemoryPermission::ReadWriteExecute: case MemoryPermission::DontCare: return shared_memory->Map(Kernel::g_current_process.get(), addr, permissions_type, static_cast(other_permissions)); default: LOG_ERROR(Kernel_SVC, "unknown permissions=0x%08X", permissions); } return Kernel::ERR_INVALID_COMBINATION; } static ResultCode UnmapMemoryBlock(Kernel::Handle handle, u32 addr) { using Kernel::SharedMemory; LOG_TRACE(Kernel_SVC, "called memblock=0x%08X, addr=0x%08X", handle, addr); // TODO(Subv): Return E0A01BF5 if the address is not in the application's heap SharedPtr shared_memory = Kernel::g_handle_table.Get(handle); if (shared_memory == nullptr) return ERR_INVALID_HANDLE; return shared_memory->Unmap(Kernel::g_current_process.get(), addr); } /// Connect to an OS service given the port name, returns the handle to the port to out static ResultCode ConnectToPort(Kernel::Handle* out_handle, VAddr port_name_address) { if (!Memory::IsValidVirtualAddress(port_name_address)) return Kernel::ERR_NOT_FOUND; static constexpr std::size_t PortNameMaxLength = 11; // Read 1 char beyond the max allowed port name to detect names that are too long. std::string port_name = Memory::ReadCString(port_name_address, PortNameMaxLength + 1); if (port_name.size() > PortNameMaxLength) return Kernel::ERR_PORT_NAME_TOO_LONG; LOG_TRACE(Kernel_SVC, "called port_name=%s", port_name.c_str()); auto it = Service::g_kernel_named_ports.find(port_name); if (it == Service::g_kernel_named_ports.end()) { LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: %s", port_name.c_str()); return Kernel::ERR_NOT_FOUND; } auto client_port = it->second; SharedPtr client_session; CASCADE_RESULT(client_session, client_port->Connect()); // Return the client session CASCADE_RESULT(*out_handle, Kernel::g_handle_table.Create(client_session)); return RESULT_SUCCESS; } /// Makes a blocking IPC call to an OS service. static ResultCode SendSyncRequest(Kernel::Handle handle) { SharedPtr session = Kernel::g_handle_table.Get(handle); if (session == nullptr) { return ERR_INVALID_HANDLE; } LOG_TRACE(Kernel_SVC, "called handle=0x%08X(%s)", handle, session->GetName().c_str()); Core::System::GetInstance().PrepareReschedule(); return session->SendSyncRequest(Kernel::GetCurrentThread()); } /// Close a handle static ResultCode CloseHandle(Kernel::Handle handle) { LOG_TRACE(Kernel_SVC, "Closing handle 0x%08X", handle); return Kernel::g_handle_table.Close(handle); } /// Wait for a handle to synchronize, timeout after the specified nanoseconds static ResultCode WaitSynchronization1(Kernel::Handle handle, s64 nano_seconds) { auto object = Kernel::g_handle_table.Get(handle); Kernel::Thread* thread = Kernel::GetCurrentThread(); if (object == nullptr) return ERR_INVALID_HANDLE; LOG_TRACE(Kernel_SVC, "called handle=0x%08X(%s:%s), nanoseconds=%lld", handle, object->GetTypeName().c_str(), object->GetName().c_str(), nano_seconds); if (object->ShouldWait(thread)) { if (nano_seconds == 0) return Kernel::RESULT_TIMEOUT; thread->wait_objects = {object}; object->AddWaitingThread(thread); thread->status = THREADSTATUS_WAIT_SYNCH_ANY; // Create an event to wake the thread up after the specified nanosecond delay has passed thread->WakeAfterDelay(nano_seconds); thread->wakeup_callback = [](ThreadWakeupReason reason, Kernel::SharedPtr thread, Kernel::SharedPtr object) { ASSERT(thread->status == THREADSTATUS_WAIT_SYNCH_ANY); if (reason == ThreadWakeupReason::Timeout) { thread->SetWaitSynchronizationResult(Kernel::RESULT_TIMEOUT); return; } ASSERT(reason == ThreadWakeupReason::Signal); thread->SetWaitSynchronizationResult(RESULT_SUCCESS); // WaitSynchronization1 doesn't have an output index like WaitSynchronizationN, so we // don't have to do anything else here. }; Core::System::GetInstance().PrepareReschedule(); // Note: The output of this SVC will be set to RESULT_SUCCESS if the thread // resumes due to a signal in its wait objects. // Otherwise we retain the default value of timeout. return Kernel::RESULT_TIMEOUT; } object->Acquire(thread); return RESULT_SUCCESS; } /// Wait for the given handles to synchronize, timeout after the specified nanoseconds static ResultCode WaitSynchronizationN(s32* out, VAddr handles_address, s32 handle_count, bool wait_all, s64 nano_seconds) { Kernel::Thread* thread = Kernel::GetCurrentThread(); if (!Memory::IsValidVirtualAddress(handles_address)) return Kernel::ERR_INVALID_POINTER; // NOTE: on real hardware, there is no nullptr check for 'out' (tested with firmware 4.4). If // this happens, the running application will crash. ASSERT_MSG(out != nullptr, "invalid output pointer specified!"); // Check if 'handle_count' is invalid if (handle_count < 0) return Kernel::ERR_OUT_OF_RANGE; using ObjectPtr = Kernel::SharedPtr; std::vector objects(handle_count); for (int i = 0; i < handle_count; ++i) { Kernel::Handle handle = Memory::Read32(handles_address + i * sizeof(Kernel::Handle)); auto object = Kernel::g_handle_table.Get(handle); if (object == nullptr) return ERR_INVALID_HANDLE; objects[i] = object; } if (wait_all) { bool all_available = std::all_of(objects.begin(), objects.end(), [thread](const ObjectPtr& object) { return !object->ShouldWait(thread); }); if (all_available) { // We can acquire all objects right now, do so. for (auto& object : objects) object->Acquire(thread); // Note: In this case, the `out` parameter is not set, // and retains whatever value it had before. return RESULT_SUCCESS; } // Not all objects were available right now, prepare to suspend the thread. // If a timeout value of 0 was provided, just return the Timeout error code instead of // suspending the thread. if (nano_seconds == 0) return Kernel::RESULT_TIMEOUT; // Put the thread to sleep thread->status = THREADSTATUS_WAIT_SYNCH_ALL; // Add the thread to each of the objects' waiting threads. for (auto& object : objects) { object->AddWaitingThread(thread); } thread->wait_objects = std::move(objects); // Create an event to wake the thread up after the specified nanosecond delay has passed thread->WakeAfterDelay(nano_seconds); thread->wakeup_callback = [](ThreadWakeupReason reason, Kernel::SharedPtr thread, Kernel::SharedPtr object) { ASSERT(thread->status == THREADSTATUS_WAIT_SYNCH_ALL); if (reason == ThreadWakeupReason::Timeout) { thread->SetWaitSynchronizationResult(Kernel::RESULT_TIMEOUT); return; } ASSERT(reason == ThreadWakeupReason::Signal); thread->SetWaitSynchronizationResult(RESULT_SUCCESS); // The wait_all case does not update the output index. }; Core::System::GetInstance().PrepareReschedule(); // This value gets set to -1 by default in this case, it is not modified after this. *out = -1; // Note: The output of this SVC will be set to RESULT_SUCCESS if the thread resumes due to // a signal in one of its wait objects. return Kernel::RESULT_TIMEOUT; } else { // Find the first object that is acquirable in the provided list of objects auto itr = std::find_if(objects.begin(), objects.end(), [thread](const ObjectPtr& object) { return !object->ShouldWait(thread); }); if (itr != objects.end()) { // We found a ready object, acquire it and set the result value Kernel::WaitObject* object = itr->get(); object->Acquire(thread); *out = static_cast(std::distance(objects.begin(), itr)); return RESULT_SUCCESS; } // No objects were ready to be acquired, prepare to suspend the thread. // If a timeout value of 0 was provided, just return the Timeout error code instead of // suspending the thread. if (nano_seconds == 0) return Kernel::RESULT_TIMEOUT; // Put the thread to sleep thread->status = THREADSTATUS_WAIT_SYNCH_ANY; // Add the thread to each of the objects' waiting threads. for (size_t i = 0; i < objects.size(); ++i) { Kernel::WaitObject* object = objects[i].get(); object->AddWaitingThread(thread); } thread->wait_objects = std::move(objects); // Note: If no handles and no timeout were given, then the thread will deadlock, this is // consistent with hardware behavior. // Create an event to wake the thread up after the specified nanosecond delay has passed thread->WakeAfterDelay(nano_seconds); thread->wakeup_callback = [](ThreadWakeupReason reason, Kernel::SharedPtr thread, Kernel::SharedPtr object) { ASSERT(thread->status == THREADSTATUS_WAIT_SYNCH_ANY); if (reason == ThreadWakeupReason::Timeout) { thread->SetWaitSynchronizationResult(Kernel::RESULT_TIMEOUT); return; } ASSERT(reason == ThreadWakeupReason::Signal); thread->SetWaitSynchronizationResult(RESULT_SUCCESS); thread->SetWaitSynchronizationOutput(thread->GetWaitObjectIndex(object.get())); }; Core::System::GetInstance().PrepareReschedule(); // Note: The output of this SVC will be set to RESULT_SUCCESS if the thread resumes due to a // signal in one of its wait objects. // Otherwise we retain the default value of timeout, and -1 in the out parameter *out = -1; return Kernel::RESULT_TIMEOUT; } } /// In a single operation, sends a IPC reply and waits for a new request. static ResultCode ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_count, Kernel::Handle reply_target) { if (!Memory::IsValidVirtualAddress(handles_address)) return Kernel::ERR_INVALID_POINTER; // Check if 'handle_count' is invalid if (handle_count < 0) return Kernel::ERR_OUT_OF_RANGE; using ObjectPtr = SharedPtr; std::vector objects(handle_count); for (int i = 0; i < handle_count; ++i) { Kernel::Handle handle = Memory::Read32(handles_address + i * sizeof(Kernel::Handle)); auto object = Kernel::g_handle_table.Get(handle); if (object == nullptr) return ERR_INVALID_HANDLE; objects[i] = object; } // We are also sending a command reply. // Do not send a reply if the command id in the command buffer is 0xFFFF. u32* cmd_buff = Kernel::GetCommandBuffer(); IPC::Header header{cmd_buff[0]}; if (reply_target != 0 && header.command_id != 0xFFFF) { auto session = Kernel::g_handle_table.Get(reply_target); if (session == nullptr) return ERR_INVALID_HANDLE; auto request_thread = std::move(session->currently_handling); // Mark the request as "handled". session->currently_handling = nullptr; // Error out if there's no request thread or the session was closed. // TODO(Subv): Is the same error code (ClosedByRemote) returned for both of these cases? if (request_thread == nullptr || session->parent->client == nullptr) { *index = -1; return Kernel::ERR_SESSION_CLOSED_BY_REMOTE; } // TODO(Subv): Perform IPC translation from the current thread to request_thread. // Note: The scheduler is not invoked here. request_thread->ResumeFromWait(); } if (handle_count == 0) { *index = 0; // The kernel uses this value as a placeholder for the real error, and returns it when we // pass no handles and do not perform any reply. if (reply_target == 0 || header.command_id == 0xFFFF) return ResultCode(0xE7E3FFFF); return RESULT_SUCCESS; } auto thread = Kernel::GetCurrentThread(); // Find the first object that is acquirable in the provided list of objects auto itr = std::find_if(objects.begin(), objects.end(), [thread](const ObjectPtr& object) { return !object->ShouldWait(thread); }); if (itr != objects.end()) { // We found a ready object, acquire it and set the result value Kernel::WaitObject* object = itr->get(); object->Acquire(thread); *index = static_cast(std::distance(objects.begin(), itr)); if (object->GetHandleType() == Kernel::HandleType::ServerSession) { auto server_session = static_cast(object); if (server_session->parent->client == nullptr) return Kernel::ERR_SESSION_CLOSED_BY_REMOTE; // TODO(Subv): Perform IPC translation from the ServerSession to the current thread. } return RESULT_SUCCESS; } // No objects were ready to be acquired, prepare to suspend the thread. // Put the thread to sleep thread->status = THREADSTATUS_WAIT_SYNCH_ANY; // Add the thread to each of the objects' waiting threads. for (size_t i = 0; i < objects.size(); ++i) { Kernel::WaitObject* object = objects[i].get(); object->AddWaitingThread(thread); } thread->wait_objects = std::move(objects); thread->wakeup_callback = [](ThreadWakeupReason reason, Kernel::SharedPtr thread, Kernel::SharedPtr object) { ASSERT(thread->status == THREADSTATUS_WAIT_SYNCH_ANY); ASSERT(reason == ThreadWakeupReason::Signal); thread->SetWaitSynchronizationResult(RESULT_SUCCESS); thread->SetWaitSynchronizationOutput(thread->GetWaitObjectIndex(object.get())); // TODO(Subv): Perform IPC translation upon wakeup. }; Core::System::GetInstance().PrepareReschedule(); // Note: The output of this SVC will be set to RESULT_SUCCESS if the thread resumes due to a // signal in one of its wait objects, or to 0xC8A01836 if there was a translation error. // By default the index is set to -1. *index = -1; return RESULT_SUCCESS; } /// Create an address arbiter (to allocate access to shared resources) static ResultCode CreateAddressArbiter(Kernel::Handle* out_handle) { using Kernel::AddressArbiter; SharedPtr arbiter = AddressArbiter::Create(); CASCADE_RESULT(*out_handle, Kernel::g_handle_table.Create(std::move(arbiter))); LOG_TRACE(Kernel_SVC, "returned handle=0x%08X", *out_handle); return RESULT_SUCCESS; } /// Arbitrate address static ResultCode ArbitrateAddress(Kernel::Handle handle, u32 address, u32 type, u32 value, s64 nanoseconds) { using Kernel::AddressArbiter; LOG_TRACE(Kernel_SVC, "called handle=0x%08X, address=0x%08X, type=0x%08X, value=0x%08X", handle, address, type, value); SharedPtr arbiter = Kernel::g_handle_table.Get(handle); if (arbiter == nullptr) return ERR_INVALID_HANDLE; auto res = arbiter->ArbitrateAddress(static_cast(type), address, value, nanoseconds); // TODO(Subv): Identify in which specific cases this call should cause a reschedule. Core::System::GetInstance().PrepareReschedule(); return res; } static void Break(u8 break_reason) { LOG_CRITICAL(Debug_Emulated, "Emulated program broke execution!"); std::string reason_str; switch (break_reason) { case 0: reason_str = "PANIC"; break; case 1: reason_str = "ASSERT"; break; case 2: reason_str = "USER"; break; default: reason_str = "UNKNOWN"; break; } LOG_CRITICAL(Debug_Emulated, "Break reason: %s", reason_str.c_str()); } /// Used to output a message on a debug hardware unit - does nothing on a retail unit static void OutputDebugString(VAddr address, int len) { std::vector string(len); Memory::ReadBlock(address, string.data(), len); LOG_DEBUG(Debug_Emulated, "%.*s", len, string.data()); } /// Get resource limit static ResultCode GetResourceLimit(Kernel::Handle* resource_limit, Kernel::Handle process_handle) { LOG_TRACE(Kernel_SVC, "called process=0x%08X", process_handle); SharedPtr process = Kernel::g_handle_table.Get(process_handle); if (process == nullptr) return ERR_INVALID_HANDLE; CASCADE_RESULT(*resource_limit, Kernel::g_handle_table.Create(process->resource_limit)); return RESULT_SUCCESS; } /// Get resource limit current values static ResultCode GetResourceLimitCurrentValues(VAddr values, Kernel::Handle resource_limit_handle, VAddr names, u32 name_count) { LOG_TRACE(Kernel_SVC, "called resource_limit=%08X, names=%08X, name_count=%d", resource_limit_handle, names, name_count); SharedPtr resource_limit = Kernel::g_handle_table.Get(resource_limit_handle); if (resource_limit == nullptr) return ERR_INVALID_HANDLE; for (unsigned int i = 0; i < name_count; ++i) { u32 name = Memory::Read32(names + i * sizeof(u32)); s64 value = resource_limit->GetCurrentResourceValue(name); Memory::Write64(values + i * sizeof(u64), value); } return RESULT_SUCCESS; } /// Get resource limit max values static ResultCode GetResourceLimitLimitValues(VAddr values, Kernel::Handle resource_limit_handle, VAddr names, u32 name_count) { LOG_TRACE(Kernel_SVC, "called resource_limit=%08X, names=%08X, name_count=%d", resource_limit_handle, names, name_count); SharedPtr resource_limit = Kernel::g_handle_table.Get(resource_limit_handle); if (resource_limit == nullptr) return ERR_INVALID_HANDLE; for (unsigned int i = 0; i < name_count; ++i) { u32 name = Memory::Read32(names + i * sizeof(u32)); s64 value = resource_limit->GetMaxResourceValue(names); Memory::Write64(values + i * sizeof(u64), value); } return RESULT_SUCCESS; } /// Creates a new thread static ResultCode CreateThread(Kernel::Handle* out_handle, u32 priority, u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) { using Kernel::Thread; std::string name = Common::StringFromFormat("unknown-%08" PRIX32, entry_point); if (priority > THREADPRIO_LOWEST) { return Kernel::ERR_OUT_OF_RANGE; } using Kernel::ResourceLimit; Kernel::SharedPtr& resource_limit = Kernel::g_current_process->resource_limit; if (resource_limit->GetMaxResourceValue(Kernel::ResourceTypes::PRIORITY) > priority) { return Kernel::ERR_NOT_AUTHORIZED; } switch (processor_id) { case THREADPROCESSORID_ALL: case THREADPROCESSORID_DEFAULT: case THREADPROCESSORID_0: case THREADPROCESSORID_1: break; default: // TODO(bunnei): Implement support for other processor IDs ASSERT_MSG(false, "Unsupported thread processor ID: %d", processor_id); break; } if (processor_id == THREADPROCESSORID_ALL) { LOG_INFO(Kernel_SVC, "Newly created thread is allowed to be run in any Core, unimplemented."); } if (processor_id == THREADPROCESSORID_DEFAULT && Kernel::g_current_process->ideal_processor == THREADPROCESSORID_1) { LOG_WARNING( Kernel_SVC, "Newly created thread is allowed to be run in the SysCore (Core1), unimplemented."); } if (processor_id == THREADPROCESSORID_1) { LOG_ERROR(Kernel_SVC, "Newly created thread must run in the SysCore (Core1), unimplemented."); } CASCADE_RESULT(SharedPtr thread, Kernel::Thread::Create(name, entry_point, priority, arg, processor_id, stack_top, Kernel::g_current_process)); thread->context.fpscr = FPSCR_DEFAULT_NAN | FPSCR_FLUSH_TO_ZERO | FPSCR_ROUND_TOZERO; // 0x03C00000 CASCADE_RESULT(*out_handle, Kernel::g_handle_table.Create(std::move(thread))); Core::System::GetInstance().PrepareReschedule(); LOG_TRACE(Kernel_SVC, "called entrypoint=0x%08X (%s), arg=0x%08X, stacktop=0x%08X, " "threadpriority=0x%08X, processorid=0x%08X : created handle=0x%08X", entry_point, name.c_str(), arg, stack_top, priority, processor_id, *out_handle); return RESULT_SUCCESS; } /// Called when a thread exits static void ExitThread() { LOG_TRACE(Kernel_SVC, "called, pc=0x%08X", Core::CPU().GetPC()); Kernel::ExitCurrentThread(); Core::System::GetInstance().PrepareReschedule(); } /// Gets the priority for the specified thread static ResultCode GetThreadPriority(u32* priority, Kernel::Handle handle) { const SharedPtr thread = Kernel::g_handle_table.Get(handle); if (thread == nullptr) return ERR_INVALID_HANDLE; *priority = thread->GetPriority(); return RESULT_SUCCESS; } /// Sets the priority for the specified thread static ResultCode SetThreadPriority(Kernel::Handle handle, u32 priority) { if (priority > THREADPRIO_LOWEST) { return Kernel::ERR_OUT_OF_RANGE; } SharedPtr thread = Kernel::g_handle_table.Get(handle); if (thread == nullptr) return ERR_INVALID_HANDLE; using Kernel::ResourceLimit; // Note: The kernel uses the current process's resource limit instead of // the one from the thread owner's resource limit. Kernel::SharedPtr& resource_limit = Kernel::g_current_process->resource_limit; if (resource_limit->GetMaxResourceValue(Kernel::ResourceTypes::PRIORITY) > priority) { return Kernel::ERR_NOT_AUTHORIZED; } thread->SetPriority(priority); thread->UpdatePriority(); // Update the mutexes that this thread is waiting for for (auto& mutex : thread->pending_mutexes) mutex->UpdatePriority(); Core::System::GetInstance().PrepareReschedule(); return RESULT_SUCCESS; } /// Create a mutex static ResultCode CreateMutex(Kernel::Handle* out_handle, u32 initial_locked) { using Kernel::Mutex; SharedPtr mutex = Mutex::Create(initial_locked != 0); mutex->name = Common::StringFromFormat("mutex-%08x", Core::CPU().GetReg(14)); CASCADE_RESULT(*out_handle, Kernel::g_handle_table.Create(std::move(mutex))); LOG_TRACE(Kernel_SVC, "called initial_locked=%s : created handle=0x%08X", initial_locked ? "true" : "false", *out_handle); return RESULT_SUCCESS; } /// Release a mutex static ResultCode ReleaseMutex(Kernel::Handle handle) { using Kernel::Mutex; LOG_TRACE(Kernel_SVC, "called handle=0x%08X", handle); SharedPtr mutex = Kernel::g_handle_table.Get(handle); if (mutex == nullptr) return ERR_INVALID_HANDLE; return mutex->Release(Kernel::GetCurrentThread()); } /// Get the ID of the specified process static ResultCode GetProcessId(u32* process_id, Kernel::Handle process_handle) { LOG_TRACE(Kernel_SVC, "called process=0x%08X", process_handle); const SharedPtr process = Kernel::g_handle_table.Get(process_handle); if (process == nullptr) return ERR_INVALID_HANDLE; *process_id = process->process_id; return RESULT_SUCCESS; } /// Get the ID of the process that owns the specified thread static ResultCode GetProcessIdOfThread(u32* process_id, Kernel::Handle thread_handle) { LOG_TRACE(Kernel_SVC, "called thread=0x%08X", thread_handle); const SharedPtr thread = Kernel::g_handle_table.Get(thread_handle); if (thread == nullptr) return ERR_INVALID_HANDLE; const SharedPtr process = thread->owner_process; ASSERT_MSG(process != nullptr, "Invalid parent process for thread=0x%08X", thread_handle); *process_id = process->process_id; return RESULT_SUCCESS; } /// Get the ID for the specified thread. static ResultCode GetThreadId(u32* thread_id, Kernel::Handle handle) { LOG_TRACE(Kernel_SVC, "called thread=0x%08X", handle); const SharedPtr thread = Kernel::g_handle_table.Get(handle); if (thread == nullptr) return ERR_INVALID_HANDLE; *thread_id = thread->GetThreadId(); return RESULT_SUCCESS; } /// Creates a semaphore static ResultCode CreateSemaphore(Kernel::Handle* out_handle, s32 initial_count, s32 max_count) { using Kernel::Semaphore; CASCADE_RESULT(SharedPtr semaphore, Semaphore::Create(initial_count, max_count)); semaphore->name = Common::StringFromFormat("semaphore-%08x", Core::CPU().GetReg(14)); CASCADE_RESULT(*out_handle, Kernel::g_handle_table.Create(std::move(semaphore))); LOG_TRACE(Kernel_SVC, "called initial_count=%d, max_count=%d, created handle=0x%08X", initial_count, max_count, *out_handle); return RESULT_SUCCESS; } /// Releases a certain number of slots in a semaphore static ResultCode ReleaseSemaphore(s32* count, Kernel::Handle handle, s32 release_count) { using Kernel::Semaphore; LOG_TRACE(Kernel_SVC, "called release_count=%d, handle=0x%08X", release_count, handle); SharedPtr semaphore = Kernel::g_handle_table.Get(handle); if (semaphore == nullptr) return ERR_INVALID_HANDLE; CASCADE_RESULT(*count, semaphore->Release(release_count)); return RESULT_SUCCESS; } /// Query process memory static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* page_info, Kernel::Handle process_handle, u32 addr) { using Kernel::Process; Kernel::SharedPtr process = Kernel::g_handle_table.Get(process_handle); if (process == nullptr) return ERR_INVALID_HANDLE; auto vma = process->vm_manager.FindVMA(addr); if (vma == Kernel::g_current_process->vm_manager.vma_map.end()) return Kernel::ERR_INVALID_ADDRESS; memory_info->base_address = vma->second.base; memory_info->permission = static_cast(vma->second.permissions); memory_info->size = vma->second.size; memory_info->state = static_cast(vma->second.meminfo_state); page_info->flags = 0; LOG_TRACE(Kernel_SVC, "called process=0x%08X addr=0x%08X", process_handle, addr); return RESULT_SUCCESS; } /// Query memory static ResultCode QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, u32 addr) { return QueryProcessMemory(memory_info, page_info, Kernel::CurrentProcess, addr); } /// Create an event static ResultCode CreateEvent(Kernel::Handle* out_handle, u32 reset_type) { using Kernel::Event; SharedPtr evt = Event::Create(static_cast(reset_type)); evt->name = Common::StringFromFormat("event-%08x", Core::CPU().GetReg(14)); CASCADE_RESULT(*out_handle, Kernel::g_handle_table.Create(std::move(evt))); LOG_TRACE(Kernel_SVC, "called reset_type=0x%08X : created handle=0x%08X", reset_type, *out_handle); return RESULT_SUCCESS; } /// Duplicates a kernel handle static ResultCode DuplicateHandle(Kernel::Handle* out, Kernel::Handle handle) { CASCADE_RESULT(*out, Kernel::g_handle_table.Duplicate(handle)); LOG_TRACE(Kernel_SVC, "duplicated 0x%08X to 0x%08X", handle, *out); return RESULT_SUCCESS; } /// Signals an event static ResultCode SignalEvent(Kernel::Handle handle) { using Kernel::Event; LOG_TRACE(Kernel_SVC, "called event=0x%08X", handle); SharedPtr evt = Kernel::g_handle_table.Get(handle); if (evt == nullptr) return ERR_INVALID_HANDLE; evt->Signal(); return RESULT_SUCCESS; } /// Clears an event static ResultCode ClearEvent(Kernel::Handle handle) { using Kernel::Event; LOG_TRACE(Kernel_SVC, "called event=0x%08X", handle); SharedPtr evt = Kernel::g_handle_table.Get(handle); if (evt == nullptr) return ERR_INVALID_HANDLE; evt->Clear(); return RESULT_SUCCESS; } /// Creates a timer static ResultCode CreateTimer(Kernel::Handle* out_handle, u32 reset_type) { using Kernel::Timer; SharedPtr timer = Timer::Create(static_cast(reset_type)); timer->name = Common::StringFromFormat("timer-%08x", Core::CPU().GetReg(14)); CASCADE_RESULT(*out_handle, Kernel::g_handle_table.Create(std::move(timer))); LOG_TRACE(Kernel_SVC, "called reset_type=0x%08X : created handle=0x%08X", reset_type, *out_handle); return RESULT_SUCCESS; } /// Clears a timer static ResultCode ClearTimer(Kernel::Handle handle) { using Kernel::Timer; LOG_TRACE(Kernel_SVC, "called timer=0x%08X", handle); SharedPtr timer = Kernel::g_handle_table.Get(handle); if (timer == nullptr) return ERR_INVALID_HANDLE; timer->Clear(); return RESULT_SUCCESS; } /// Starts a timer static ResultCode SetTimer(Kernel::Handle handle, s64 initial, s64 interval) { using Kernel::Timer; LOG_TRACE(Kernel_SVC, "called timer=0x%08X", handle); if (initial < 0 || interval < 0) { return Kernel::ERR_OUT_OF_RANGE_KERNEL; } SharedPtr timer = Kernel::g_handle_table.Get(handle); if (timer == nullptr) return ERR_INVALID_HANDLE; timer->Set(initial, interval); return RESULT_SUCCESS; } /// Cancels a timer static ResultCode CancelTimer(Kernel::Handle handle) { using Kernel::Timer; LOG_TRACE(Kernel_SVC, "called timer=0x%08X", handle); SharedPtr timer = Kernel::g_handle_table.Get(handle); if (timer == nullptr) return ERR_INVALID_HANDLE; timer->Cancel(); return RESULT_SUCCESS; } /// Sleep the current thread static void SleepThread(s64 nanoseconds) { LOG_TRACE(Kernel_SVC, "called nanoseconds=%lld", nanoseconds); // Don't attempt to yield execution if there are no available threads to run, // this way we avoid a useless reschedule to the idle thread. if (nanoseconds == 0 && !Kernel::HaveReadyThreads()) return; // Sleep current thread and check for next thread to schedule Kernel::WaitCurrentThread_Sleep(); // Create an event to wake the thread up after the specified nanosecond delay has passed Kernel::GetCurrentThread()->WakeAfterDelay(nanoseconds); Core::System::GetInstance().PrepareReschedule(); } /// This returns the total CPU ticks elapsed since the CPU was powered-on static s64 GetSystemTick() { s64 result = CoreTiming::GetTicks(); // Advance time to defeat dumb games (like Cubic Ninja) that busy-wait for the frame to end. CoreTiming::AddTicks(150); // Measured time between two calls on a 9.2 o3DS with Ninjhax 1.1b return result; } /// Creates a memory block at the specified address with the specified permissions and size static ResultCode CreateMemoryBlock(Kernel::Handle* out_handle, u32 addr, u32 size, u32 my_permission, u32 other_permission) { using Kernel::SharedMemory; if (size % Memory::PAGE_SIZE != 0) return Kernel::ERR_MISALIGNED_SIZE; SharedPtr shared_memory = nullptr; using Kernel::MemoryPermission; auto VerifyPermissions = [](MemoryPermission permission) { // SharedMemory blocks can not be created with Execute permissions switch (permission) { case MemoryPermission::None: case MemoryPermission::Read: case MemoryPermission::Write: case MemoryPermission::ReadWrite: case MemoryPermission::DontCare: return true; default: return false; } }; if (!VerifyPermissions(static_cast(my_permission)) || !VerifyPermissions(static_cast(other_permission))) return Kernel::ERR_INVALID_COMBINATION; // TODO(Subv): Processes with memory type APPLICATION are not allowed // to create memory blocks with addr = 0, any attempts to do so // should return error 0xD92007EA. if ((addr < Memory::PROCESS_IMAGE_VADDR || addr + size > Memory::SHARED_MEMORY_VADDR_END) && addr != 0) { return Kernel::ERR_INVALID_ADDRESS; } // When trying to create a memory block with address = 0, // if the process has the Shared Device Memory flag in the exheader, // then we have to allocate from the same region as the caller process instead of the BASE // region. Kernel::MemoryRegion region = Kernel::MemoryRegion::BASE; if (addr == 0 && Kernel::g_current_process->flags.shared_device_mem) region = Kernel::g_current_process->flags.memory_region; shared_memory = SharedMemory::Create( Kernel::g_current_process, size, static_cast(my_permission), static_cast(other_permission), addr, region); CASCADE_RESULT(*out_handle, Kernel::g_handle_table.Create(std::move(shared_memory))); LOG_WARNING(Kernel_SVC, "called addr=0x%08X", addr); return RESULT_SUCCESS; } static ResultCode CreatePort(Kernel::Handle* server_port, Kernel::Handle* client_port, VAddr name_address, u32 max_sessions) { // TODO(Subv): Implement named ports. ASSERT_MSG(name_address == 0, "Named ports are currently unimplemented"); using Kernel::ServerPort; using Kernel::ClientPort; auto ports = ServerPort::CreatePortPair(max_sessions); CASCADE_RESULT(*client_port, Kernel::g_handle_table.Create( std::move(std::get>(ports)))); // Note: The 3DS kernel also leaks the client port handle if the server port handle fails to be // created. CASCADE_RESULT(*server_port, Kernel::g_handle_table.Create( std::move(std::get>(ports)))); LOG_TRACE(Kernel_SVC, "called max_sessions=%u", max_sessions); return RESULT_SUCCESS; } static ResultCode CreateSessionToPort(Handle* out_client_session, Handle client_port_handle) { using Kernel::ClientPort; SharedPtr client_port = Kernel::g_handle_table.Get(client_port_handle); if (client_port == nullptr) return ERR_INVALID_HANDLE; CASCADE_RESULT(auto session, client_port->Connect()); CASCADE_RESULT(*out_client_session, Kernel::g_handle_table.Create(std::move(session))); return RESULT_SUCCESS; } static ResultCode CreateSession(Handle* server_session, Handle* client_session) { auto sessions = Kernel::ServerSession::CreateSessionPair(); auto& server = std::get>(sessions); CASCADE_RESULT(*server_session, Kernel::g_handle_table.Create(std::move(server))); auto& client = std::get>(sessions); CASCADE_RESULT(*client_session, Kernel::g_handle_table.Create(std::move(client))); LOG_TRACE(Kernel_SVC, "called"); return RESULT_SUCCESS; } static ResultCode AcceptSession(Handle* out_server_session, Handle server_port_handle) { using Kernel::ServerPort; SharedPtr server_port = Kernel::g_handle_table.Get(server_port_handle); if (server_port == nullptr) return ERR_INVALID_HANDLE; CASCADE_RESULT(auto session, server_port->Accept()); CASCADE_RESULT(*out_server_session, Kernel::g_handle_table.Create(std::move(session))); return RESULT_SUCCESS; } static ResultCode GetSystemInfo(s64* out, u32 type, s32 param) { using Kernel::MemoryRegion; LOG_TRACE(Kernel_SVC, "called type=%u param=%d", type, param); switch ((SystemInfoType)type) { case SystemInfoType::REGION_MEMORY_USAGE: switch ((SystemInfoMemUsageRegion)param) { case SystemInfoMemUsageRegion::ALL: *out = Kernel::GetMemoryRegion(Kernel::MemoryRegion::APPLICATION)->used + Kernel::GetMemoryRegion(Kernel::MemoryRegion::SYSTEM)->used + Kernel::GetMemoryRegion(Kernel::MemoryRegion::BASE)->used; break; case SystemInfoMemUsageRegion::APPLICATION: *out = Kernel::GetMemoryRegion(Kernel::MemoryRegion::APPLICATION)->used; break; case SystemInfoMemUsageRegion::SYSTEM: *out = Kernel::GetMemoryRegion(Kernel::MemoryRegion::SYSTEM)->used; break; case SystemInfoMemUsageRegion::BASE: *out = Kernel::GetMemoryRegion(Kernel::MemoryRegion::BASE)->used; break; default: LOG_ERROR(Kernel_SVC, "unknown GetSystemInfo type=0 region: param=%d", param); *out = 0; break; } break; case SystemInfoType::KERNEL_ALLOCATED_PAGES: LOG_ERROR(Kernel_SVC, "unimplemented GetSystemInfo type=2 param=%d", param); *out = 0; break; case SystemInfoType::KERNEL_SPAWNED_PIDS: *out = 5; break; default: LOG_ERROR(Kernel_SVC, "unknown GetSystemInfo type=%u param=%d", type, param); *out = 0; break; } // This function never returns an error, even if invalid parameters were passed. return RESULT_SUCCESS; } static ResultCode GetProcessInfo(s64* out, Kernel::Handle process_handle, u32 type) { LOG_TRACE(Kernel_SVC, "called process=0x%08X type=%u", process_handle, type); using Kernel::Process; Kernel::SharedPtr process = Kernel::g_handle_table.Get(process_handle); if (process == nullptr) return ERR_INVALID_HANDLE; switch (type) { case 0: case 2: // TODO(yuriks): Type 0 returns a slightly higher number than type 2, but I'm not sure // what's the difference between them. *out = process->heap_used + process->linear_heap_used + process->misc_memory_used; if (*out % Memory::PAGE_SIZE != 0) { LOG_ERROR(Kernel_SVC, "called, memory size not page-aligned"); return Kernel::ERR_MISALIGNED_SIZE; } break; case 1: case 3: case 4: case 5: case 6: case 7: case 8: // These are valid, but not implemented yet LOG_ERROR(Kernel_SVC, "unimplemented GetProcessInfo type=%u", type); break; case 20: *out = Memory::FCRAM_PADDR - process->GetLinearHeapBase(); break; case 21: case 22: case 23: // These return a different error value than higher invalid values LOG_ERROR(Kernel_SVC, "unknown GetProcessInfo type=%u", type); return Kernel::ERR_NOT_IMPLEMENTED; default: LOG_ERROR(Kernel_SVC, "unknown GetProcessInfo type=%u", type); return Kernel::ERR_INVALID_ENUM_VALUE; } return RESULT_SUCCESS; } namespace { struct FunctionDef { using Func = void(); u32 id; Func* func; const char* name; }; } // namespace static const FunctionDef SVC_Table[] = { {0x00, nullptr, "Unknown"}, {0x01, HLE::Wrap, "ControlMemory"}, {0x02, HLE::Wrap, "QueryMemory"}, {0x03, nullptr, "ExitProcess"}, {0x04, nullptr, "GetProcessAffinityMask"}, {0x05, nullptr, "SetProcessAffinityMask"}, {0x06, nullptr, "GetProcessIdealProcessor"}, {0x07, nullptr, "SetProcessIdealProcessor"}, {0x08, HLE::Wrap, "CreateThread"}, {0x09, ExitThread, "ExitThread"}, {0x0A, HLE::Wrap, "SleepThread"}, {0x0B, HLE::Wrap, "GetThreadPriority"}, {0x0C, HLE::Wrap, "SetThreadPriority"}, {0x0D, nullptr, "GetThreadAffinityMask"}, {0x0E, nullptr, "SetThreadAffinityMask"}, {0x0F, nullptr, "GetThreadIdealProcessor"}, {0x10, nullptr, "SetThreadIdealProcessor"}, {0x11, nullptr, "GetCurrentProcessorNumber"}, {0x12, nullptr, "Run"}, {0x13, HLE::Wrap, "CreateMutex"}, {0x14, HLE::Wrap, "ReleaseMutex"}, {0x15, HLE::Wrap, "CreateSemaphore"}, {0x16, HLE::Wrap, "ReleaseSemaphore"}, {0x17, HLE::Wrap, "CreateEvent"}, {0x18, HLE::Wrap, "SignalEvent"}, {0x19, HLE::Wrap, "ClearEvent"}, {0x1A, HLE::Wrap, "CreateTimer"}, {0x1B, HLE::Wrap, "SetTimer"}, {0x1C, HLE::Wrap, "CancelTimer"}, {0x1D, HLE::Wrap, "ClearTimer"}, {0x1E, HLE::Wrap, "CreateMemoryBlock"}, {0x1F, HLE::Wrap, "MapMemoryBlock"}, {0x20, HLE::Wrap, "UnmapMemoryBlock"}, {0x21, HLE::Wrap, "CreateAddressArbiter"}, {0x22, HLE::Wrap, "ArbitrateAddress"}, {0x23, HLE::Wrap, "CloseHandle"}, {0x24, HLE::Wrap, "WaitSynchronization1"}, {0x25, HLE::Wrap, "WaitSynchronizationN"}, {0x26, nullptr, "SignalAndWait"}, {0x27, HLE::Wrap, "DuplicateHandle"}, {0x28, HLE::Wrap, "GetSystemTick"}, {0x29, nullptr, "GetHandleInfo"}, {0x2A, HLE::Wrap, "GetSystemInfo"}, {0x2B, HLE::Wrap, "GetProcessInfo"}, {0x2C, nullptr, "GetThreadInfo"}, {0x2D, HLE::Wrap, "ConnectToPort"}, {0x2E, nullptr, "SendSyncRequest1"}, {0x2F, nullptr, "SendSyncRequest2"}, {0x30, nullptr, "SendSyncRequest3"}, {0x31, nullptr, "SendSyncRequest4"}, {0x32, HLE::Wrap, "SendSyncRequest"}, {0x33, nullptr, "OpenProcess"}, {0x34, nullptr, "OpenThread"}, {0x35, HLE::Wrap, "GetProcessId"}, {0x36, HLE::Wrap, "GetProcessIdOfThread"}, {0x37, HLE::Wrap, "GetThreadId"}, {0x38, HLE::Wrap, "GetResourceLimit"}, {0x39, HLE::Wrap, "GetResourceLimitLimitValues"}, {0x3A, HLE::Wrap, "GetResourceLimitCurrentValues"}, {0x3B, nullptr, "GetThreadContext"}, {0x3C, HLE::Wrap, "Break"}, {0x3D, HLE::Wrap, "OutputDebugString"}, {0x3E, nullptr, "ControlPerformanceCounter"}, {0x3F, nullptr, "Unknown"}, {0x40, nullptr, "Unknown"}, {0x41, nullptr, "Unknown"}, {0x42, nullptr, "Unknown"}, {0x43, nullptr, "Unknown"}, {0x44, nullptr, "Unknown"}, {0x45, nullptr, "Unknown"}, {0x46, nullptr, "Unknown"}, {0x47, HLE::Wrap, "CreatePort"}, {0x48, HLE::Wrap, "CreateSessionToPort"}, {0x49, HLE::Wrap, "CreateSession"}, {0x4A, HLE::Wrap, "AcceptSession"}, {0x4B, nullptr, "ReplyAndReceive1"}, {0x4C, nullptr, "ReplyAndReceive2"}, {0x4D, nullptr, "ReplyAndReceive3"}, {0x4E, nullptr, "ReplyAndReceive4"}, {0x4F, HLE::Wrap, "ReplyAndReceive"}, {0x50, nullptr, "BindInterrupt"}, {0x51, nullptr, "UnbindInterrupt"}, {0x52, nullptr, "InvalidateProcessDataCache"}, {0x53, nullptr, "StoreProcessDataCache"}, {0x54, nullptr, "FlushProcessDataCache"}, {0x55, nullptr, "StartInterProcessDma"}, {0x56, nullptr, "StopDma"}, {0x57, nullptr, "GetDmaState"}, {0x58, nullptr, "RestartDma"}, {0x59, nullptr, "Unknown"}, {0x5A, nullptr, "Unknown"}, {0x5B, nullptr, "Unknown"}, {0x5C, nullptr, "Unknown"}, {0x5D, nullptr, "Unknown"}, {0x5E, nullptr, "Unknown"}, {0x5F, nullptr, "Unknown"}, {0x60, nullptr, "DebugActiveProcess"}, {0x61, nullptr, "BreakDebugProcess"}, {0x62, nullptr, "TerminateDebugProcess"}, {0x63, nullptr, "GetProcessDebugEvent"}, {0x64, nullptr, "ContinueDebugEvent"}, {0x65, nullptr, "GetProcessList"}, {0x66, nullptr, "GetThreadList"}, {0x67, nullptr, "GetDebugThreadContext"}, {0x68, nullptr, "SetDebugThreadContext"}, {0x69, nullptr, "QueryDebugProcessMemory"}, {0x6A, nullptr, "ReadProcessMemory"}, {0x6B, nullptr, "WriteProcessMemory"}, {0x6C, nullptr, "SetHardwareBreakPoint"}, {0x6D, nullptr, "GetDebugThreadParam"}, {0x6E, nullptr, "Unknown"}, {0x6F, nullptr, "Unknown"}, {0x70, nullptr, "ControlProcessMemory"}, {0x71, nullptr, "MapProcessMemory"}, {0x72, nullptr, "UnmapProcessMemory"}, {0x73, nullptr, "CreateCodeSet"}, {0x74, nullptr, "RandomStub"}, {0x75, nullptr, "CreateProcess"}, {0x76, nullptr, "TerminateProcess"}, {0x77, nullptr, "SetProcessResourceLimits"}, {0x78, nullptr, "CreateResourceLimit"}, {0x79, nullptr, "SetResourceLimitValues"}, {0x7A, nullptr, "AddCodeSegment"}, {0x7B, nullptr, "Backdoor"}, {0x7C, nullptr, "KernelSetState"}, {0x7D, HLE::Wrap, "QueryProcessMemory"}, }; static const FunctionDef* GetSVCInfo(u32 func_num) { if (func_num >= ARRAY_SIZE(SVC_Table)) { LOG_ERROR(Kernel_SVC, "unknown svc=0x%02X", func_num); return nullptr; } return &SVC_Table[func_num]; } MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); void CallSVC(u32 immediate) { MICROPROFILE_SCOPE(Kernel_SVC); // Lock the global kernel mutex when we enter the kernel HLE. std::lock_guard lock(HLE::g_hle_lock); const FunctionDef* info = GetSVCInfo(immediate); if (info) { if (info->func) { info->func(); } else { LOG_ERROR(Kernel_SVC, "unimplemented SVC function %s(..)", info->name); } } } } // namespace SVC