2019-09-24 22:54:27 +02:00
|
|
|
#include <sched.h>
|
2020-01-11 05:52:25 +01:00
|
|
|
#include <unistd.h>
|
2019-12-05 16:35:34 +01:00
|
|
|
#include "os.h"
|
|
|
|
#include "jvm.h"
|
2020-01-07 03:36:08 +01:00
|
|
|
#include "nce/guest.h"
|
|
|
|
#include "nce/instr.h"
|
|
|
|
#include "kernel/svc.h"
|
2020-01-21 08:16:57 +01:00
|
|
|
#include "nce.h"
|
2019-09-24 22:54:27 +02:00
|
|
|
|
|
|
|
extern bool Halt;
|
2020-01-11 05:52:25 +01:00
|
|
|
extern skyline::GroupMutex jniMtx;
|
2019-09-24 22:54:27 +02:00
|
|
|
|
|
|
|
namespace skyline {
|
2020-01-07 03:36:08 +01:00
|
|
|
void NCE::KernelThread(pid_t thread) {
|
2020-01-09 02:37:54 +01:00
|
|
|
try {
|
2020-01-21 08:16:57 +01:00
|
|
|
state.thread = state.process->threads.at(thread);
|
|
|
|
state.ctx = reinterpret_cast<ThreadContext *>(state.thread->ctxMemory->kernel.address);
|
2020-01-11 05:52:25 +01:00
|
|
|
while (true) {
|
|
|
|
std::lock_guard jniGd(jniMtx);
|
|
|
|
if (Halt)
|
|
|
|
break;
|
2020-01-09 02:37:54 +01:00
|
|
|
if (state.ctx->state == ThreadState::WaitKernel) {
|
|
|
|
const u16 svc = static_cast<const u16>(state.ctx->commandId);
|
|
|
|
try {
|
|
|
|
if (kernel::svc::SvcTable[svc]) {
|
|
|
|
state.logger->Debug("SVC called 0x{:X}", svc);
|
|
|
|
(*kernel::svc::SvcTable[svc])(state);
|
|
|
|
} else
|
|
|
|
throw exception("Unimplemented SVC 0x{:X}", svc);
|
|
|
|
} catch (const std::exception &e) {
|
|
|
|
throw exception("{} (SVC: 0x{:X})", e.what(), svc);
|
|
|
|
}
|
|
|
|
state.ctx->state = ThreadState::WaitRun;
|
|
|
|
} else if (state.ctx->state == ThreadState::GuestCrash) {
|
|
|
|
state.logger->Warn("Thread with PID {} has crashed due to signal: {}", thread, strsignal(state.ctx->commandId));
|
|
|
|
ThreadTrace();
|
2020-01-11 05:52:25 +01:00
|
|
|
state.ctx->state = ThreadState::WaitRun;
|
2020-01-09 02:37:54 +01:00
|
|
|
break;
|
2020-01-07 03:36:08 +01:00
|
|
|
}
|
|
|
|
}
|
2020-02-01 16:51:32 +01:00
|
|
|
} catch (const std::exception &e) {
|
2020-01-09 02:37:54 +01:00
|
|
|
state.logger->Error(e.what());
|
|
|
|
} catch (...) {
|
|
|
|
state.logger->Error("An unknown exception has occurred");
|
2020-01-07 03:36:08 +01:00
|
|
|
}
|
2020-01-11 05:52:25 +01:00
|
|
|
if (thread == state.process->pid) {
|
|
|
|
jniMtx.lock(GroupMutex::Group::Group2);
|
|
|
|
state.os->KillThread(thread);
|
|
|
|
Halt = true;
|
|
|
|
jniMtx.unlock();
|
|
|
|
}
|
2019-09-24 22:54:27 +02:00
|
|
|
}
|
|
|
|
|
2020-01-07 03:36:08 +01:00
|
|
|
NCE::NCE(DeviceState &state) : state(state) {}
|
2019-09-24 22:54:27 +02:00
|
|
|
|
2020-01-09 02:37:54 +01:00
|
|
|
NCE::~NCE() {
|
|
|
|
for (auto &thread : threadMap)
|
|
|
|
thread.second->join();
|
|
|
|
}
|
|
|
|
|
2019-09-24 22:54:27 +02:00
|
|
|
void NCE::Execute() {
|
2020-01-11 05:52:25 +01:00
|
|
|
while (true) {
|
2019-12-26 19:10:29 +01:00
|
|
|
std::lock_guard jniGd(jniMtx);
|
2020-01-11 05:52:25 +01:00
|
|
|
if (Halt)
|
|
|
|
break;
|
Framebuffer and NativeActivity
What was added:
* Framebuffer
* NativeActivity
* NV Services
* IOCTL Handler
* NV Devices:
* * /dev/nvmap - 0xC0080101, 0xC0080103, 0xC0200104, 0xC0180105, 0xC00C0109, 0xC008010E
* * /dev/nvhost-as-gpu
* * /dev/nvhost-channel - 0x40044801, 0xC0104809, 0xC010480B, 0xC018480C, 0x4004480D, 0xC020481A, 0x40084714
* * /dev/nvhost-ctrl
* * /dev/nvhost-ctrl-gpu - 0x80044701, 0x80284702, 0xC0184706, 0xC0B04705, 0x80084714
* SVCs:
* * SetMemoryAttribute
* * CreateTransferMemory
* * ResetSignal
* * GetSystemTick
* Addition of Compact Logger
What was fixed:
* SVCs:
* * SetHeapSize
* * SetMemoryAttribute
* * QueryMemory
* A release build would not set CMAKE_BUILD_TYPE to "RELEASE"
* The logger code was simplified
2019-11-13 21:09:31 +01:00
|
|
|
state.os->serviceManager.Loop();
|
|
|
|
state.gpu->Loop();
|
|
|
|
}
|
2020-01-11 05:52:25 +01:00
|
|
|
jniMtx.lock(GroupMutex::Group::Group2);
|
|
|
|
Halt = true;
|
|
|
|
jniMtx.unlock();
|
2019-09-24 22:54:27 +02:00
|
|
|
}
|
|
|
|
|
2020-01-11 05:52:25 +01:00
|
|
|
/**
|
|
|
|
* This function will not work if optimizations are enabled as ThreadContext isn't volatile
|
|
|
|
* and due to that is not read on every iteration of the while loop.
|
|
|
|
* However, making ThreadContext or parts of it volatile slows down the applications as a whole.
|
|
|
|
* So, we opted to use the hacky solution and disable optimizations for this single function.
|
|
|
|
*/
|
|
|
|
void ExecuteFunctionCtx(ThreadCall call, Registers &funcRegs, ThreadContext *ctx) __attribute__ ((optnone)) {
|
2020-01-09 02:37:54 +01:00
|
|
|
ctx->commandId = static_cast<u32>(call);
|
2020-01-07 03:36:08 +01:00
|
|
|
Registers registers = ctx->registers;
|
2020-01-09 02:37:54 +01:00
|
|
|
while (ctx->state != ThreadState::WaitInit && ctx->state != ThreadState::WaitKernel);
|
2020-01-07 03:36:08 +01:00
|
|
|
ctx->registers = funcRegs;
|
|
|
|
ctx->state = ThreadState::WaitFunc;
|
2020-01-09 02:37:54 +01:00
|
|
|
while (ctx->state != ThreadState::WaitInit && ctx->state != ThreadState::WaitKernel);
|
2020-01-07 03:36:08 +01:00
|
|
|
funcRegs = ctx->registers;
|
|
|
|
ctx->registers = registers;
|
2019-09-24 22:54:27 +02:00
|
|
|
}
|
|
|
|
|
2020-01-09 02:37:54 +01:00
|
|
|
void NCE::ExecuteFunction(ThreadCall call, Registers &funcRegs, std::shared_ptr<kernel::type::KThread> &thread) {
|
|
|
|
ExecuteFunctionCtx(call, funcRegs, reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.address));
|
|
|
|
}
|
|
|
|
|
2020-01-21 08:16:57 +01:00
|
|
|
void NCE::ExecuteFunction(ThreadCall call, Registers &funcRegs) {
|
2020-02-05 19:37:45 +01:00
|
|
|
if(state.process->status == kernel::type::KProcess::Status::Exiting)
|
|
|
|
throw exception("Executing function on Exiting process");
|
2020-01-21 08:16:57 +01:00
|
|
|
auto thread = state.thread ? state.thread : state.process->threads.at(state.process->pid);
|
|
|
|
ExecuteFunctionCtx(call, funcRegs, reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.address));
|
2019-09-24 22:54:27 +02:00
|
|
|
}
|
|
|
|
|
2020-01-11 05:52:25 +01:00
|
|
|
void NCE::WaitThreadInit(std::shared_ptr<kernel::type::KThread> &thread) __attribute__ ((optnone)) {
|
2020-01-07 03:36:08 +01:00
|
|
|
auto ctx = reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.address);
|
2020-01-09 02:37:54 +01:00
|
|
|
while (ctx->state == ThreadState::NotReady);
|
2019-09-24 22:54:27 +02:00
|
|
|
}
|
|
|
|
|
2020-01-07 03:36:08 +01:00
|
|
|
void NCE::StartThread(u64 entryArg, u32 handle, std::shared_ptr<kernel::type::KThread> &thread) {
|
|
|
|
auto ctx = reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.address);
|
2020-01-09 02:37:54 +01:00
|
|
|
while (ctx->state != ThreadState::WaitInit);
|
2020-01-07 03:36:08 +01:00
|
|
|
ctx->tpidrroEl0 = thread->tls;
|
|
|
|
ctx->registers.x0 = entryArg;
|
|
|
|
ctx->registers.x1 = handle;
|
|
|
|
ctx->state = ThreadState::WaitRun;
|
2020-02-01 16:51:32 +01:00
|
|
|
state.logger->Debug("Starting kernel thread for guest thread: {}", thread->pid);
|
2020-01-07 03:36:08 +01:00
|
|
|
threadMap[thread->pid] = std::make_shared<std::thread>(&NCE::KernelThread, this, thread->pid);
|
2019-09-24 22:54:27 +02:00
|
|
|
}
|
|
|
|
|
2020-01-07 03:36:08 +01:00
|
|
|
void NCE::ThreadTrace(u16 numHist, ThreadContext *ctx) {
|
|
|
|
std::string raw;
|
|
|
|
std::string trace;
|
2019-12-25 20:03:57 +01:00
|
|
|
std::string regStr;
|
2020-01-07 03:36:08 +01:00
|
|
|
ctx = ctx ? ctx : state.ctx;
|
2020-01-09 02:37:54 +01:00
|
|
|
if (numHist) {
|
2020-01-07 03:36:08 +01:00
|
|
|
std::vector<u32> instrs(numHist);
|
2020-01-11 05:52:25 +01:00
|
|
|
u64 size = sizeof(u32) * numHist;
|
2020-01-09 02:37:54 +01:00
|
|
|
u64 offset = ctx->pc - size + (2 * sizeof(u32));
|
2020-01-07 03:36:08 +01:00
|
|
|
state.process->ReadMemory(instrs.data(), offset, size);
|
|
|
|
for (auto &instr : instrs) {
|
|
|
|
instr = __builtin_bswap32(instr);
|
|
|
|
if (offset == ctx->pc)
|
|
|
|
trace += fmt::format("\n-> 0x{:X} : 0x{:08X}", offset, instr);
|
|
|
|
else
|
|
|
|
trace += fmt::format("\n 0x{:X} : 0x{:08X}", offset, instr);
|
|
|
|
raw += fmt::format("{:08X}", instr);
|
|
|
|
offset += sizeof(u32);
|
|
|
|
}
|
|
|
|
}
|
2020-01-11 05:52:25 +01:00
|
|
|
if (ctx->faultAddress)
|
|
|
|
regStr += fmt::format("\nFault Address: 0x{:X}", ctx->faultAddress);
|
|
|
|
if (ctx->sp)
|
|
|
|
regStr += fmt::format("\nStack Pointer: 0x{:X}", ctx->sp);
|
2020-01-09 02:37:54 +01:00
|
|
|
for (u16 index = 0; index < constant::NumRegs - 1; index += 2) {
|
2020-01-15 19:58:58 +01:00
|
|
|
auto xStr = index < 10 ? " X" : "X";
|
|
|
|
regStr += fmt::format("\n{}{}: 0x{:<16X} {}{}: 0x{:X}", xStr, index, ctx->registers.regs[index], xStr, index + 1, ctx->registers.regs[index + 1]);
|
Framebuffer and NativeActivity
What was added:
* Framebuffer
* NativeActivity
* NV Services
* IOCTL Handler
* NV Devices:
* * /dev/nvmap - 0xC0080101, 0xC0080103, 0xC0200104, 0xC0180105, 0xC00C0109, 0xC008010E
* * /dev/nvhost-as-gpu
* * /dev/nvhost-channel - 0x40044801, 0xC0104809, 0xC010480B, 0xC018480C, 0x4004480D, 0xC020481A, 0x40084714
* * /dev/nvhost-ctrl
* * /dev/nvhost-ctrl-gpu - 0x80044701, 0x80284702, 0xC0184706, 0xC0B04705, 0x80084714
* SVCs:
* * SetMemoryAttribute
* * CreateTransferMemory
* * ResetSignal
* * GetSystemTick
* Addition of Compact Logger
What was fixed:
* SVCs:
* * SetHeapSize
* * SetMemoryAttribute
* * QueryMemory
* A release build would not set CMAKE_BUILD_TYPE to "RELEASE"
* The logger code was simplified
2019-11-13 21:09:31 +01:00
|
|
|
}
|
2020-01-09 02:37:54 +01:00
|
|
|
if (numHist) {
|
|
|
|
state.logger->Debug("Process Trace:{}", trace);
|
2020-01-11 05:52:25 +01:00
|
|
|
state.logger->Debug("Raw Instructions: 0x{}", raw);
|
|
|
|
state.logger->Debug("CPU Context:{}", regStr);
|
2020-01-09 02:37:54 +01:00
|
|
|
} else
|
2020-01-11 05:52:25 +01:00
|
|
|
state.logger->Debug("CPU Context:{}", regStr);
|
2019-09-24 22:54:27 +02:00
|
|
|
}
|
|
|
|
|
2020-01-11 05:52:25 +01:00
|
|
|
const std::array<u32, 18> cntpctEl0X0 = {
|
2020-01-07 03:36:08 +01:00
|
|
|
0xA9BF0BE1, // STP X1, X2, [SP, #-16]!
|
|
|
|
0x3C9F0FE0, // STR Q0, [SP, #-16]!
|
|
|
|
0x3C9F0FE1, // STR Q1, [SP, #-16]!
|
|
|
|
0x3C9F0FE2, // STR Q2, [SP, #-16]!
|
|
|
|
0xD53BE001, // MRS X1, CNTFRQ_EL0
|
|
|
|
0xD53BE042, // MRS X2, CNTVCT_EL0
|
|
|
|
0x9E630020, // UCVTF D0, X0
|
|
|
|
0xD2C9F001, // MOV X1, 87411174408192
|
|
|
|
0xF2E82E41, // MOVK X1, 0x4172, LSL 48
|
|
|
|
0x9E670022, // FMOV D2, X1
|
|
|
|
0x9E630041, // UCVTF D1, X1
|
|
|
|
0x1E621800, // FDIV D0, D0, D2
|
|
|
|
0x1E610800, // FMUL D0, D0, D1
|
|
|
|
0x9E790000, // FCVTZU X0, D0
|
|
|
|
0x3CC107E2, // LDR Q2, [SP], #16
|
|
|
|
0x3CC107E1, // LDR Q1, [SP], #16
|
2020-01-11 05:52:25 +01:00
|
|
|
0x3CC107E0, // LDR Q0, [SP], #16
|
2020-01-09 02:37:54 +01:00
|
|
|
0xA8C10BE1, // LDP X1, X2, [SP], #16
|
2020-01-07 03:36:08 +01:00
|
|
|
};
|
2019-09-24 22:54:27 +02:00
|
|
|
|
2020-01-11 05:52:25 +01:00
|
|
|
const std::array<u32, 18> cntpctEl0X1 = {
|
2020-01-07 03:36:08 +01:00
|
|
|
0xA9BF0BE0, // STP X0, X2, [SP, #-16]!
|
|
|
|
0x3C9F0FE0, // STR Q0, [SP, #-16]!
|
|
|
|
0x3C9F0FE1, // STR Q1, [SP, #-16]!
|
|
|
|
0x3C9F0FE2, // STR Q2, [SP, #-16]!
|
|
|
|
0xD53BE000, // MRS X0, CNTFRQ_EL0
|
|
|
|
0xD53BE042, // MRS X2, CNTVCT_EL0
|
|
|
|
0x9E630020, // UCVTF D0, X0
|
|
|
|
0xD2C9F000, // MOV X0, 87411174408192
|
|
|
|
0xF2E82E40, // MOVK X0, 0x4172, LSL 48
|
|
|
|
0x9E670002, // FMOV D2, X0
|
|
|
|
0x9E630041, // UCVTF D1, X2
|
|
|
|
0x1E621800, // FDIV D0, D0, D2
|
|
|
|
0x1E610800, // FMUL D0, D0, D1
|
|
|
|
0x9E790001, // FCVTZU X0, D0
|
|
|
|
0x3CC107E2, // LDR Q2, [SP], #16
|
|
|
|
0x3CC107E1, // LDR Q1, [SP], #16
|
2020-01-11 05:52:25 +01:00
|
|
|
0x3CC107E0, // LDR Q0, [SP], #16
|
2020-01-09 02:37:54 +01:00
|
|
|
0xA8C10BE0, // LDP X0, X2, [SP], #16
|
2020-01-07 03:36:08 +01:00
|
|
|
};
|
2019-09-24 22:54:27 +02:00
|
|
|
|
2020-01-11 05:52:25 +01:00
|
|
|
std::array<u32, 18> cntpctEl0Xn = {
|
2020-01-07 03:36:08 +01:00
|
|
|
0xA9BF07E0, // STP X0, X1, [SP, #-16]!
|
|
|
|
0x3C9F0FE0, // STR Q0, [SP, #-16]!
|
|
|
|
0x3C9F0FE1, // STR Q1, [SP, #-16]!
|
|
|
|
0x3C9F0FE2, // STR Q2, [SP, #-16]!
|
|
|
|
0xD53BE000, // MRS X0, CNTFRQ_EL0
|
|
|
|
0xD53BE041, // MRS X1, CNTVCT_EL0
|
|
|
|
0x9E630000, // UCVTF D0, X0
|
|
|
|
0xD2C9F000, // MOV X0, 87411174408192
|
|
|
|
0xF2E82E40, // MOVK X0, 0x4172, LSL 48
|
|
|
|
0x9E670002, // FMOV D2, X0
|
|
|
|
0x9E630021, // UCVTF D1, X1
|
|
|
|
0x1E621800, // FDIV D0, D0, D2
|
|
|
|
0x1E610800, // FMUL D0, D0, D1
|
2020-01-09 02:37:54 +01:00
|
|
|
0x00000000, // FCVTZU Xn, D0 (Set at runtime)
|
2020-01-07 03:36:08 +01:00
|
|
|
0x3CC107E2, // LDR Q2, [SP], #16
|
|
|
|
0x3CC107E1, // LDR Q1, [SP], #16
|
2020-01-11 05:52:25 +01:00
|
|
|
0x3CC107E0, // LDR Q0, [SP], #16
|
2020-01-09 02:37:54 +01:00
|
|
|
0xA8C107E0, // LDP X0, X1, [SP], #16
|
2020-01-07 03:36:08 +01:00
|
|
|
};
|
2019-12-25 20:03:57 +01:00
|
|
|
|
2020-01-07 03:36:08 +01:00
|
|
|
std::vector<u32> NCE::PatchCode(std::vector<u8> &code, u64 baseAddress, i64 offset) {
|
|
|
|
u32 *start = reinterpret_cast<u32 *>(code.data());
|
|
|
|
u32 *end = start + (code.size() / sizeof(u32));
|
2019-12-25 20:03:57 +01:00
|
|
|
i64 patchOffset = offset;
|
|
|
|
|
2020-01-07 03:36:08 +01:00
|
|
|
std::vector<u32> patch((guest::saveCtxSize + guest::loadCtxSize + guest::svcHandlerSize) / sizeof(u32));
|
|
|
|
|
2020-01-09 02:37:54 +01:00
|
|
|
std::memcpy(patch.data(), reinterpret_cast<void *>(&guest::saveCtx), guest::saveCtxSize);
|
2019-12-25 20:03:57 +01:00
|
|
|
offset += guest::saveCtxSize;
|
|
|
|
|
2020-01-09 02:37:54 +01:00
|
|
|
std::memcpy(reinterpret_cast<u8 *>(patch.data()) + guest::saveCtxSize,
|
|
|
|
reinterpret_cast<void *>(&guest::loadCtx), guest::loadCtxSize);
|
2019-12-25 20:03:57 +01:00
|
|
|
offset += guest::loadCtxSize;
|
|
|
|
|
2020-01-09 02:37:54 +01:00
|
|
|
std::memcpy(reinterpret_cast<u8 *>(patch.data()) + guest::saveCtxSize + guest::loadCtxSize,
|
|
|
|
reinterpret_cast<void *>(&guest::svcHandler), guest::svcHandlerSize);
|
2020-01-07 03:36:08 +01:00
|
|
|
offset += guest::svcHandlerSize;
|
|
|
|
|
2020-01-09 02:37:54 +01:00
|
|
|
for (u32 *address = start; address < end; address++) {
|
2019-12-25 20:03:57 +01:00
|
|
|
auto instrSvc = reinterpret_cast<instr::Svc *>(address);
|
|
|
|
auto instrMrs = reinterpret_cast<instr::Mrs *>(address);
|
|
|
|
|
|
|
|
if (instrSvc->Verify()) {
|
|
|
|
instr::B bjunc(offset);
|
|
|
|
constexpr u32 strLr = 0xF81F0FFE; // STR LR, [SP, #-16]!
|
|
|
|
offset += sizeof(strLr);
|
|
|
|
instr::BL bSvCtx(patchOffset - offset);
|
|
|
|
offset += sizeof(bSvCtx);
|
2020-01-07 03:36:08 +01:00
|
|
|
|
2020-01-09 02:37:54 +01:00
|
|
|
auto movPc = instr::MoveU64Reg(regs::X0, baseAddress + (address - start));
|
2020-01-07 03:36:08 +01:00
|
|
|
offset += sizeof(u32) * movPc.size();
|
|
|
|
instr::Movz movCmd(regs::W1, static_cast<u16>(instrSvc->value));
|
|
|
|
offset += sizeof(movCmd);
|
|
|
|
instr::BL bSvcHandler((patchOffset + guest::saveCtxSize + guest::loadCtxSize) - offset);
|
|
|
|
offset += sizeof(bSvcHandler);
|
|
|
|
|
2019-12-25 20:03:57 +01:00
|
|
|
instr::BL bLdCtx((patchOffset + guest::saveCtxSize) - offset);
|
|
|
|
offset += sizeof(bLdCtx);
|
|
|
|
constexpr u32 ldrLr = 0xF84107FE; // LDR LR, [SP], #16
|
|
|
|
offset += sizeof(ldrLr);
|
|
|
|
instr::B bret(-offset + sizeof(u32));
|
|
|
|
offset += sizeof(bret);
|
|
|
|
|
|
|
|
*address = bjunc.raw;
|
|
|
|
patch.push_back(strLr);
|
|
|
|
patch.push_back(bSvCtx.raw);
|
2020-01-09 02:37:54 +01:00
|
|
|
for (auto &instr : movPc)
|
2020-01-07 03:36:08 +01:00
|
|
|
patch.push_back(instr);
|
|
|
|
patch.push_back(movCmd.raw);
|
|
|
|
patch.push_back(bSvcHandler.raw);
|
2019-12-25 20:03:57 +01:00
|
|
|
patch.push_back(bLdCtx.raw);
|
|
|
|
patch.push_back(ldrLr);
|
|
|
|
patch.push_back(bret.raw);
|
2020-01-09 02:37:54 +01:00
|
|
|
} else if (instrMrs->Verify()) {
|
2019-12-25 20:03:57 +01:00
|
|
|
if (instrMrs->srcReg == constant::TpidrroEl0) {
|
|
|
|
instr::B bjunc(offset);
|
2020-01-07 03:36:08 +01:00
|
|
|
u32 strX0{};
|
2020-01-09 02:37:54 +01:00
|
|
|
if (instrMrs->destReg != regs::X0) {
|
2020-01-07 03:36:08 +01:00
|
|
|
strX0 = 0xF81F0FE0; // STR X0, [SP, #-16]!
|
|
|
|
offset += sizeof(strX0);
|
|
|
|
}
|
|
|
|
u32 mrsX0 = 0xD53BD040; // MRS X0, TPIDR_EL0
|
|
|
|
offset += sizeof(mrsX0);
|
|
|
|
u32 ldrTls = 0xF9408000; // LDR X0, [X0, #256]
|
|
|
|
offset += sizeof(ldrTls);
|
|
|
|
u32 movXn{};
|
|
|
|
u32 ldrX0{};
|
2020-01-09 02:37:54 +01:00
|
|
|
if (instrMrs->destReg != regs::X0) {
|
2020-01-07 03:36:08 +01:00
|
|
|
movXn = instr::Mov(regs::X(instrMrs->destReg), regs::X0).raw;
|
|
|
|
offset += sizeof(movXn);
|
|
|
|
ldrX0 = 0xF84107E0; // LDR X0, [SP], #16
|
|
|
|
offset += sizeof(ldrX0);
|
|
|
|
}
|
|
|
|
instr::B bret(-offset + sizeof(u32));
|
|
|
|
offset += sizeof(bret);
|
|
|
|
|
|
|
|
*address = bjunc.raw;
|
2020-01-09 02:37:54 +01:00
|
|
|
if (strX0)
|
|
|
|
patch.push_back(strX0);
|
2020-01-07 03:36:08 +01:00
|
|
|
patch.push_back(mrsX0);
|
|
|
|
patch.push_back(ldrTls);
|
2020-01-09 02:37:54 +01:00
|
|
|
if (movXn)
|
|
|
|
patch.push_back(movXn);
|
|
|
|
if (ldrX0)
|
|
|
|
patch.push_back(ldrX0);
|
2020-01-07 03:36:08 +01:00
|
|
|
patch.push_back(bret.raw);
|
2020-01-09 02:37:54 +01:00
|
|
|
} else if (instrMrs->srcReg == constant::CntpctEl0) {
|
2020-01-07 03:36:08 +01:00
|
|
|
instr::B bjunc(offset);
|
2020-01-09 02:37:54 +01:00
|
|
|
if (instrMrs->destReg == 0)
|
2020-01-07 03:36:08 +01:00
|
|
|
offset += cntpctEl0X0.size() * sizeof(u32);
|
|
|
|
else if (instrMrs->destReg == 1)
|
|
|
|
offset += cntpctEl0X1.size() * sizeof(u32);
|
|
|
|
else
|
|
|
|
offset += cntpctEl0Xn.size() * sizeof(u32);
|
|
|
|
instr::B bret(-offset + sizeof(u32));
|
|
|
|
offset += sizeof(bret);
|
|
|
|
|
|
|
|
*address = bjunc.raw;
|
2020-01-09 02:37:54 +01:00
|
|
|
if (instrMrs->destReg == 0)
|
|
|
|
for (auto &instr : cntpctEl0X0)
|
2020-01-07 03:36:08 +01:00
|
|
|
patch.push_back(instr);
|
|
|
|
else if (instrMrs->destReg == 1)
|
2020-01-09 02:37:54 +01:00
|
|
|
for (auto &instr : cntpctEl0X1)
|
2020-01-07 03:36:08 +01:00
|
|
|
patch.push_back(instr);
|
2020-01-09 02:37:54 +01:00
|
|
|
else {
|
|
|
|
cntpctEl0Xn[13] = instr::Fcvtzu(regs::X(instrMrs->destReg), 0).raw;
|
|
|
|
for (auto &instr : cntpctEl0Xn)
|
2020-01-07 03:36:08 +01:00
|
|
|
patch.push_back(instr);
|
2020-01-09 02:37:54 +01:00
|
|
|
}
|
2020-01-07 03:36:08 +01:00
|
|
|
patch.push_back(bret.raw);
|
2020-01-09 02:37:54 +01:00
|
|
|
} else if (instrMrs->srcReg == constant::CntfrqEl0) {
|
2020-01-07 03:36:08 +01:00
|
|
|
instr::B bjunc(offset);
|
|
|
|
auto movFreq = instr::MoveU32Reg(static_cast<regs::X>(instrMrs->destReg), constant::TegraX1Freq);
|
|
|
|
offset += sizeof(u32) * movFreq.size();
|
2019-12-25 20:03:57 +01:00
|
|
|
instr::B bret(-offset + sizeof(u32));
|
2020-01-07 03:36:08 +01:00
|
|
|
offset += sizeof(bret);
|
2019-12-25 20:03:57 +01:00
|
|
|
|
|
|
|
*address = bjunc.raw;
|
2020-01-09 02:37:54 +01:00
|
|
|
for (auto &instr : movFreq)
|
2020-01-07 03:36:08 +01:00
|
|
|
patch.push_back(instr);
|
2019-12-25 20:03:57 +01:00
|
|
|
patch.push_back(bret.raw);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
offset -= sizeof(u32);
|
|
|
|
patchOffset -= sizeof(u32);
|
|
|
|
}
|
|
|
|
return patch;
|
|
|
|
}
|
2019-09-24 22:54:27 +02:00
|
|
|
}
|
2019-12-25 20:03:57 +01:00
|
|
|
|