2017-11-25 14:56:57 +01:00
|
|
|
// Copyright 2008 Dolphin Emulator Project / 2017 Citra Emulator Project
|
|
|
|
// Licensed under GPLv2+
|
2014-04-09 01:11:21 +02:00
|
|
|
// Refer to the license.txt file included.
|
2013-10-02 01:10:47 +02:00
|
|
|
|
2014-04-09 01:11:21 +02:00
|
|
|
#pragma once
|
2013-10-02 01:10:47 +02:00
|
|
|
|
2017-11-25 14:56:57 +01:00
|
|
|
/**
|
|
|
|
* This is a system to schedule events into the emulated machine's future. Time is measured
|
|
|
|
* in main CPU clock cycles.
|
|
|
|
*
|
|
|
|
* To schedule an event, you first have to register its type. This is where you pass in the
|
|
|
|
* callback. You then schedule events using the type id you get back.
|
|
|
|
*
|
|
|
|
* The int cyclesLate that the callbacks get is how many cycles late it was.
|
|
|
|
* So to schedule a new event on a regular basis:
|
|
|
|
* inside callback:
|
|
|
|
* ScheduleEvent(periodInCycles - cyclesLate, callback, "whatever")
|
|
|
|
*/
|
|
|
|
|
2018-08-06 04:07:28 +02:00
|
|
|
#include <chrono>
|
2016-09-20 17:21:23 +02:00
|
|
|
#include <functional>
|
2017-11-25 14:56:57 +01:00
|
|
|
#include <limits>
|
2015-01-06 02:17:49 +01:00
|
|
|
#include <string>
|
2018-10-27 21:53:20 +02:00
|
|
|
#include <unordered_map>
|
|
|
|
#include <vector>
|
2020-01-12 21:01:29 +01:00
|
|
|
#include <boost/serialization/split_member.hpp>
|
|
|
|
#include <boost/serialization/vector.hpp>
|
2016-09-20 17:21:23 +02:00
|
|
|
#include "common/common_types.h"
|
2017-11-25 14:56:57 +01:00
|
|
|
#include "common/logging/log.h"
|
2018-10-27 21:53:20 +02:00
|
|
|
#include "common/threadsafe_queue.h"
|
2020-03-28 16:47:36 +01:00
|
|
|
#include "core/global.h"
|
2015-01-06 02:17:49 +01:00
|
|
|
|
2017-11-25 14:56:57 +01:00
|
|
|
// The timing we get from the assembly is 268,111,855.956 Hz
|
|
|
|
// It is possible that this number isn't just an integer because the compiler could have
|
|
|
|
// optimized the multiplication by a multiply-by-constant division.
|
|
|
|
// Rounding to the nearest integer should be fine
|
|
|
|
constexpr u64 BASE_CLOCK_RATE_ARM11 = 268111856;
|
|
|
|
constexpr u64 MAX_VALUE_TO_MULTIPLY = std::numeric_limits<s64>::max() / BASE_CLOCK_RATE_ARM11;
|
2013-10-02 01:10:47 +02:00
|
|
|
|
2020-04-18 20:52:56 +02:00
|
|
|
constexpr s64 msToCycles(int ms) {
|
2017-11-25 14:56:57 +01:00
|
|
|
// since ms is int there is no way to overflow
|
|
|
|
return BASE_CLOCK_RATE_ARM11 * static_cast<s64>(ms) / 1000;
|
2013-10-02 01:10:47 +02:00
|
|
|
}
|
|
|
|
|
2020-04-18 20:52:56 +02:00
|
|
|
constexpr s64 msToCycles(float ms) {
|
2017-11-25 14:56:57 +01:00
|
|
|
return static_cast<s64>(BASE_CLOCK_RATE_ARM11 * (0.001f) * ms);
|
2013-10-02 01:10:47 +02:00
|
|
|
}
|
|
|
|
|
2020-04-18 20:52:56 +02:00
|
|
|
constexpr s64 msToCycles(double ms) {
|
2017-11-25 14:56:57 +01:00
|
|
|
return static_cast<s64>(BASE_CLOCK_RATE_ARM11 * (0.001) * ms);
|
2013-10-02 01:10:47 +02:00
|
|
|
}
|
|
|
|
|
2020-04-18 20:52:56 +02:00
|
|
|
constexpr s64 usToCycles(float us) {
|
2017-11-25 14:56:57 +01:00
|
|
|
return static_cast<s64>(BASE_CLOCK_RATE_ARM11 * (0.000001f) * us);
|
2013-10-02 01:10:47 +02:00
|
|
|
}
|
|
|
|
|
2020-04-18 20:52:56 +02:00
|
|
|
constexpr s64 usToCycles(int us) {
|
2017-11-25 14:56:57 +01:00
|
|
|
return (BASE_CLOCK_RATE_ARM11 * static_cast<s64>(us) / 1000000);
|
2013-10-02 01:10:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inline s64 usToCycles(s64 us) {
|
2019-08-20 16:25:18 +02:00
|
|
|
if (us / 1000000 > static_cast<s64>(MAX_VALUE_TO_MULTIPLY)) {
|
2018-06-29 13:18:07 +02:00
|
|
|
LOG_ERROR(Core_Timing, "Integer overflow, use max value");
|
2017-11-25 14:56:57 +01:00
|
|
|
return std::numeric_limits<s64>::max();
|
|
|
|
}
|
2019-08-20 16:25:18 +02:00
|
|
|
if (us > static_cast<s64>(MAX_VALUE_TO_MULTIPLY)) {
|
2018-06-29 13:18:07 +02:00
|
|
|
LOG_DEBUG(Core_Timing, "Time very big, do rounding");
|
2017-11-25 14:56:57 +01:00
|
|
|
return BASE_CLOCK_RATE_ARM11 * (us / 1000000);
|
|
|
|
}
|
|
|
|
return (BASE_CLOCK_RATE_ARM11 * us) / 1000000;
|
2013-10-02 01:10:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inline s64 usToCycles(u64 us) {
|
2017-11-25 14:56:57 +01:00
|
|
|
if (us / 1000000 > MAX_VALUE_TO_MULTIPLY) {
|
2018-06-29 13:18:07 +02:00
|
|
|
LOG_ERROR(Core_Timing, "Integer overflow, use max value");
|
2017-11-25 14:56:57 +01:00
|
|
|
return std::numeric_limits<s64>::max();
|
|
|
|
}
|
|
|
|
if (us > MAX_VALUE_TO_MULTIPLY) {
|
2018-06-29 13:18:07 +02:00
|
|
|
LOG_DEBUG(Core_Timing, "Time very big, do rounding");
|
2017-11-25 14:56:57 +01:00
|
|
|
return BASE_CLOCK_RATE_ARM11 * static_cast<s64>(us / 1000000);
|
|
|
|
}
|
|
|
|
return (BASE_CLOCK_RATE_ARM11 * static_cast<s64>(us)) / 1000000;
|
|
|
|
}
|
|
|
|
|
2020-04-18 20:52:56 +02:00
|
|
|
constexpr s64 nsToCycles(float ns) {
|
2017-11-25 14:56:57 +01:00
|
|
|
return static_cast<s64>(BASE_CLOCK_RATE_ARM11 * (0.000000001f) * ns);
|
|
|
|
}
|
|
|
|
|
2020-04-18 20:52:56 +02:00
|
|
|
constexpr s64 nsToCycles(int ns) {
|
2017-11-25 14:56:57 +01:00
|
|
|
return BASE_CLOCK_RATE_ARM11 * static_cast<s64>(ns) / 1000000000;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline s64 nsToCycles(s64 ns) {
|
2019-08-20 16:25:18 +02:00
|
|
|
if (ns / 1000000000 > static_cast<s64>(MAX_VALUE_TO_MULTIPLY)) {
|
2018-06-29 13:18:07 +02:00
|
|
|
LOG_ERROR(Core_Timing, "Integer overflow, use max value");
|
2017-11-25 14:56:57 +01:00
|
|
|
return std::numeric_limits<s64>::max();
|
|
|
|
}
|
2019-08-20 16:25:18 +02:00
|
|
|
if (ns > static_cast<s64>(MAX_VALUE_TO_MULTIPLY)) {
|
2018-06-29 13:18:07 +02:00
|
|
|
LOG_DEBUG(Core_Timing, "Time very big, do rounding");
|
2017-11-25 14:56:57 +01:00
|
|
|
return BASE_CLOCK_RATE_ARM11 * (ns / 1000000000);
|
|
|
|
}
|
|
|
|
return (BASE_CLOCK_RATE_ARM11 * ns) / 1000000000;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline s64 nsToCycles(u64 ns) {
|
|
|
|
if (ns / 1000000000 > MAX_VALUE_TO_MULTIPLY) {
|
2018-06-29 13:18:07 +02:00
|
|
|
LOG_ERROR(Core_Timing, "Integer overflow, use max value");
|
2017-11-25 14:56:57 +01:00
|
|
|
return std::numeric_limits<s64>::max();
|
|
|
|
}
|
|
|
|
if (ns > MAX_VALUE_TO_MULTIPLY) {
|
2018-06-29 13:18:07 +02:00
|
|
|
LOG_DEBUG(Core_Timing, "Time very big, do rounding");
|
2017-11-25 14:56:57 +01:00
|
|
|
return BASE_CLOCK_RATE_ARM11 * (static_cast<s64>(ns) / 1000000000);
|
|
|
|
}
|
|
|
|
return (BASE_CLOCK_RATE_ARM11 * static_cast<s64>(ns)) / 1000000000;
|
|
|
|
}
|
|
|
|
|
2020-04-18 20:52:56 +02:00
|
|
|
constexpr u64 cyclesToNs(s64 cycles) {
|
2017-11-25 14:56:57 +01:00
|
|
|
return cycles * 1000000000 / BASE_CLOCK_RATE_ARM11;
|
2013-10-02 01:10:47 +02:00
|
|
|
}
|
|
|
|
|
2020-04-18 20:52:56 +02:00
|
|
|
constexpr s64 cyclesToUs(s64 cycles) {
|
2017-11-25 14:56:57 +01:00
|
|
|
return cycles * 1000000 / BASE_CLOCK_RATE_ARM11;
|
2013-10-02 01:10:47 +02:00
|
|
|
}
|
|
|
|
|
2020-04-18 20:52:56 +02:00
|
|
|
constexpr u64 cyclesToMs(s64 cycles) {
|
2017-11-25 14:56:57 +01:00
|
|
|
return cycles * 1000 / BASE_CLOCK_RATE_ARM11;
|
2015-01-06 02:17:49 +01:00
|
|
|
}
|
2013-10-02 01:10:47 +02:00
|
|
|
|
2018-10-27 21:53:20 +02:00
|
|
|
namespace Core {
|
2018-08-06 03:27:11 +02:00
|
|
|
|
|
|
|
using TimedCallback = std::function<void(u64 userdata, int cycles_late)>;
|
|
|
|
|
2018-10-27 21:53:20 +02:00
|
|
|
struct TimingEventType {
|
|
|
|
TimedCallback callback;
|
|
|
|
const std::string* name;
|
|
|
|
};
|
|
|
|
|
|
|
|
class Timing {
|
2020-01-12 21:01:29 +01:00
|
|
|
|
2018-10-27 21:53:20 +02:00
|
|
|
public:
|
|
|
|
struct Event {
|
|
|
|
s64 time;
|
|
|
|
u64 fifo_order;
|
|
|
|
u64 userdata;
|
|
|
|
const TimingEventType* type;
|
|
|
|
|
|
|
|
bool operator>(const Event& right) const;
|
|
|
|
bool operator<(const Event& right) const;
|
2020-01-12 21:01:29 +01:00
|
|
|
|
|
|
|
private:
|
|
|
|
template <class Archive>
|
|
|
|
void save(Archive& ar, const unsigned int) const {
|
|
|
|
ar& time;
|
|
|
|
ar& fifo_order;
|
|
|
|
ar& userdata;
|
|
|
|
std::string name = *(type->name);
|
|
|
|
ar << name;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class Archive>
|
|
|
|
void load(Archive& ar, const unsigned int) {
|
|
|
|
ar& time;
|
|
|
|
ar& fifo_order;
|
|
|
|
ar& userdata;
|
|
|
|
std::string name;
|
|
|
|
ar >> name;
|
2020-03-28 16:47:36 +01:00
|
|
|
type = Global<Timing>().RegisterEvent(name, nullptr);
|
2020-01-12 21:01:29 +01:00
|
|
|
}
|
|
|
|
friend class boost::serialization::access;
|
|
|
|
|
|
|
|
BOOST_SERIALIZATION_SPLIT_MEMBER()
|
2018-10-27 21:53:20 +02:00
|
|
|
};
|
|
|
|
|
2020-05-12 22:48:30 +02:00
|
|
|
// currently Service::HID::pad_update_ticks is the smallest interval for an event that gets
|
|
|
|
// always scheduled. Therfore we use this as orientation for the MAX_SLICE_LENGTH
|
|
|
|
// For performance bigger slice length are desired, though this will lead to cores desync
|
|
|
|
// But we never want to schedule events into the current slice, because then cores might to
|
|
|
|
// run small slices to sync up again. This is especially important for events that are always
|
|
|
|
// scheduled and repated.
|
|
|
|
static constexpr int MAX_SLICE_LENGTH = BASE_CLOCK_RATE_ARM11 / 234;
|
2018-10-27 21:53:20 +02:00
|
|
|
|
2020-02-21 19:31:32 +01:00
|
|
|
class Timer {
|
|
|
|
public:
|
2020-03-28 13:46:24 +01:00
|
|
|
Timer();
|
2020-02-21 19:31:32 +01:00
|
|
|
~Timer();
|
|
|
|
|
|
|
|
s64 GetMaxSliceLength() const;
|
|
|
|
|
2020-05-12 22:48:30 +02:00
|
|
|
void Advance();
|
|
|
|
|
|
|
|
void SetNextSlice(s64 max_slice_length = MAX_SLICE_LENGTH);
|
2020-02-21 19:31:32 +01:00
|
|
|
|
|
|
|
void Idle();
|
|
|
|
|
|
|
|
u64 GetTicks() const;
|
|
|
|
u64 GetIdleTicks() const;
|
|
|
|
|
|
|
|
void AddTicks(u64 ticks);
|
|
|
|
|
|
|
|
s64 GetDowncount() const;
|
|
|
|
|
|
|
|
void ForceExceptionCheck(s64 cycles);
|
|
|
|
|
|
|
|
void MoveEvents();
|
|
|
|
|
2022-10-27 15:05:49 +02:00
|
|
|
// Use these two functions to adjust the guest system tick on host blocking operations, so
|
|
|
|
// that the guest can tell how much time passed during the host call.
|
|
|
|
u32 StartAdjust();
|
|
|
|
void EndAdjust(u32 start_adjust_handle);
|
|
|
|
|
2020-02-21 19:31:32 +01:00
|
|
|
private:
|
|
|
|
friend class Timing;
|
|
|
|
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
|
|
|
|
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
|
|
|
|
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
|
2021-01-03 02:39:41 +01:00
|
|
|
// accommodated by the standard adaptor class.
|
2020-02-21 19:31:32 +01:00
|
|
|
std::vector<Event> event_queue;
|
|
|
|
u64 event_fifo_id = 0;
|
|
|
|
// the queue for storing the events from other threads threadsafe until they will be added
|
|
|
|
// to the event_queue by the emu thread
|
|
|
|
Common::MPSCQueue<Event> ts_queue;
|
|
|
|
// Are we in a function that has been called from Advance()
|
|
|
|
// If events are sheduled from a function that gets called from Advance(),
|
|
|
|
// don't change slice_length and downcount.
|
|
|
|
// The time between CoreTiming being intialized and the first call to Advance() is
|
|
|
|
// considered the slice boundary between slice -1 and slice 0. Dispatcher loops must call
|
|
|
|
// Advance() before executing the first cycle of each slice to prepare the slice length and
|
|
|
|
// downcount for that slice.
|
|
|
|
bool is_timer_sane = true;
|
|
|
|
|
|
|
|
s64 slice_length = MAX_SLICE_LENGTH;
|
|
|
|
s64 downcount = MAX_SLICE_LENGTH;
|
|
|
|
s64 executed_ticks = 0;
|
2019-12-16 06:04:33 +01:00
|
|
|
u64 idled_cycles = 0;
|
2022-10-27 15:05:49 +02:00
|
|
|
|
|
|
|
std::chrono::time_point<std::chrono::steady_clock> adjust_value_last;
|
|
|
|
u32 adjust_value_curr_handle = 0;
|
2019-12-16 06:04:33 +01:00
|
|
|
// Stores a scaling for the internal clockspeed. Changing this number results in
|
|
|
|
// under/overclocking the guest cpu
|
|
|
|
double cpu_clock_scale = 1.0;
|
2020-03-07 22:23:08 +01:00
|
|
|
|
|
|
|
template <class Archive>
|
|
|
|
void serialize(Archive& ar, const unsigned int) {
|
|
|
|
MoveEvents();
|
2020-03-27 22:48:58 +01:00
|
|
|
// NOTE: ts_queue should be empty now
|
2020-05-12 22:48:30 +02:00
|
|
|
// TODO(SaveState): Remove the next two lines when we break compatibility
|
|
|
|
s64 x;
|
|
|
|
ar& x; // to keep compatibility with old save states that stored global_timer
|
2020-03-07 22:23:08 +01:00
|
|
|
ar& event_queue;
|
|
|
|
ar& event_fifo_id;
|
2020-03-27 22:48:58 +01:00
|
|
|
ar& slice_length;
|
|
|
|
ar& downcount;
|
|
|
|
ar& executed_ticks;
|
2020-03-07 22:23:08 +01:00
|
|
|
ar& idled_cycles;
|
|
|
|
}
|
|
|
|
friend class boost::serialization::access;
|
2020-02-21 19:31:32 +01:00
|
|
|
};
|
|
|
|
|
2019-12-16 06:04:33 +01:00
|
|
|
explicit Timing(std::size_t num_cores, u32 cpu_clock_percentage);
|
2020-02-21 19:31:32 +01:00
|
|
|
|
|
|
|
~Timing(){};
|
2018-10-27 21:53:20 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns the event_type identifier. if name is not unique, it will assert.
|
|
|
|
*/
|
|
|
|
TimingEventType* RegisterEvent(const std::string& name, TimedCallback callback);
|
|
|
|
|
2020-02-21 19:31:32 +01:00
|
|
|
void ScheduleEvent(s64 cycles_into_future, const TimingEventType* event_type, u64 userdata = 0,
|
|
|
|
std::size_t core_id = std::numeric_limits<std::size_t>::max());
|
2018-10-27 21:53:20 +02:00
|
|
|
|
|
|
|
void UnscheduleEvent(const TimingEventType* event_type, u64 userdata);
|
|
|
|
|
|
|
|
/// We only permit one event of each type in the queue at a time.
|
|
|
|
void RemoveEvent(const TimingEventType* event_type);
|
|
|
|
|
2020-02-21 19:31:32 +01:00
|
|
|
void SetCurrentTimer(std::size_t core_id);
|
2018-10-27 21:53:20 +02:00
|
|
|
|
2020-02-21 19:31:32 +01:00
|
|
|
s64 GetTicks() const;
|
2018-10-27 21:53:20 +02:00
|
|
|
|
2020-02-21 19:31:32 +01:00
|
|
|
s64 GetGlobalTicks() const;
|
|
|
|
|
2019-12-16 06:04:33 +01:00
|
|
|
/**
|
|
|
|
* Updates the value of the cpu clock scaling to the new percentage.
|
|
|
|
*/
|
|
|
|
void UpdateClockSpeed(u32 cpu_clock_percentage);
|
|
|
|
|
2018-10-27 21:53:20 +02:00
|
|
|
std::chrono::microseconds GetGlobalTimeUs() const;
|
|
|
|
|
2020-02-21 19:31:32 +01:00
|
|
|
std::shared_ptr<Timer> GetTimer(std::size_t cpu_id);
|
2018-10-27 21:53:20 +02:00
|
|
|
|
2021-02-08 04:24:05 +01:00
|
|
|
// Used after deserializing to unprotect the event queue.
|
|
|
|
void UnlockEventQueue() {
|
|
|
|
event_queue_locked = false;
|
|
|
|
}
|
|
|
|
|
2018-10-27 21:53:20 +02:00
|
|
|
private:
|
|
|
|
// unordered_map stores each element separately as a linked list node so pointers to
|
|
|
|
// elements remain stable regardless of rehashes/resizing.
|
2019-12-16 06:04:33 +01:00
|
|
|
std::unordered_map<std::string, TimingEventType> event_types = {};
|
2018-10-27 21:53:20 +02:00
|
|
|
|
2020-02-21 19:31:32 +01:00
|
|
|
std::vector<std::shared_ptr<Timer>> timers;
|
2020-04-22 07:44:58 +02:00
|
|
|
Timer* current_timer = nullptr;
|
2020-01-12 21:01:29 +01:00
|
|
|
|
2021-02-08 04:24:05 +01:00
|
|
|
// When true, the event queue can't be modified. Used while deserializing to workaround
|
|
|
|
// destructor side effects.
|
|
|
|
bool event_queue_locked = false;
|
|
|
|
|
2020-01-12 21:01:29 +01:00
|
|
|
template <class Archive>
|
2020-04-22 07:44:58 +02:00
|
|
|
void serialize(Archive& ar, const unsigned int file_version) {
|
2020-01-12 21:01:29 +01:00
|
|
|
// event_types set during initialization of other things
|
2020-03-07 22:23:08 +01:00
|
|
|
ar& timers;
|
2020-04-22 07:44:58 +02:00
|
|
|
if (file_version == 0) {
|
|
|
|
std::shared_ptr<Timer> x;
|
|
|
|
ar& x;
|
|
|
|
current_timer = x.get();
|
|
|
|
} else {
|
|
|
|
ar& current_timer;
|
|
|
|
}
|
2021-02-08 04:24:05 +01:00
|
|
|
if (Archive::is_loading::value) {
|
|
|
|
event_queue_locked = true;
|
|
|
|
}
|
2020-01-12 21:01:29 +01:00
|
|
|
}
|
|
|
|
friend class boost::serialization::access;
|
2018-10-27 21:53:20 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace Core
|
2020-04-22 07:44:58 +02:00
|
|
|
|
|
|
|
BOOST_CLASS_VERSION(Core::Timing, 1)
|