Rename RangeTable to SegmentTable

It was determined that `RangeTable` was too ambiguous of a name as it could be interpreted to be holding ranges rather than looking them up, to avoid confusion the terminology has been changed to `range` to `segment`. As "segment table" is more clear in describing that it is a table comprised of descriptors regarding segments and it avoids any overlaps with terminology concerning "pages" which would be overly specific for this data structure or the ambiguous "ranges".
This commit is contained in:
PixelyIon 2022-08-06 16:02:05 +05:30
parent 5398eff045
commit c72316d9f6
No known key found for this signature in database
GPG Key ID: 11BC6C3201BC2C05
2 changed files with 49 additions and 49 deletions

View File

@ -8,50 +8,50 @@
namespace skyline {
/**
* @brief A two-level range table implementation that utilizes kernel-backed demand paging, the multi-level aspect is to allow for a RangeType that applies to a large amount of ranges to be mapped at a lower granularity (L2) and go to a higher granuality (L1) as needed
* @tparam RangeType The type of range to use for the range table entries, this must be a trivial type as it'll be returned filled with 0s when a range is unset
* @tparam Size The size of the table in terms of units (not ranges unless L1Bits = 1), this represents size in bytes for a range table covering address space
* @tparam L1Bits The size of an L1 range as a power of 2, this should be lower than L2 and will determine the minimum granularity of the table
* @tparam L2Bits The size of an L2 range as a power of 2, this should be higher than L2 and will determine the maximum granularity of the table
* @brief A two-level segment table implementation that utilizes kernel-backed demand paging, the multi-level aspect is to allow for a SegmentType that applies to a large amount of segments to be mapped at a lower granularity (L2) and go to a higher granuality (L1) as needed
* @tparam SegmentType The type of segment to use for the segment table entries, this must be a trivial type as it'll be returned filled with 0s when a segment is unset
* @tparam Size The size of the table in terms of units (not segments unless L1Bits = 1), this represents size in bytes for a segment table covering address space
* @tparam L1Bits The size of an L1 segment as a power of 2, this should be lower than L2 and will determine the minimum granularity of the table
* @tparam L2Bits The size of an L2 segment as a power of 2, this should be higher than L2 and will determine the maximum granularity of the table
* @tparam EnablePointerAccess Whether or not to enable pointer access to the table, this is useful when host addresses are used as the key for the table
* @note This class is **NOT** thread-safe, any access to the table must be protected by a mutex
*/
template<typename RangeType, size_t Size, size_t L1Bits, size_t L2Bits, bool EnablePointerAccess = false> requires std::is_trivial_v<RangeType>
class RangeTable {
template<typename SegmentType, size_t Size, size_t L1Bits, size_t L2Bits, bool EnablePointerAccess = false> requires std::is_trivial_v<SegmentType>
class SegmentTable {
private:
static constexpr size_t L1Size{1 << L1Bits}, L1Entries{util::DivideCeil(Size, L1Size)};
span<RangeType, L1Entries> level1Table; //!< The first level of the range table, this is the highest granularity of the table and contains only the range
span<SegmentType, L1Entries> level1Table; //!< The first level of the segment table, this is the highest granularity of the table and contains only the segment
/**
* @brief An entry in a range table level aside from the lowest level which directly holds the type, this has an associated range and a flag if the lookup should move to a higher granularity (and the corresponding lower level)
* @brief An entry in a segment table level aside from the lowest level which directly holds the type, this has an associated segment and a flag if the lookup should move to a higher granularity (and the corresponding lower level)
*/
struct alignas(8) RangeEntry {
RangeType range; //!< The range associated with the entry, this is 0'd out if the entry is unset
bool valid; //!< If the associated range is valid, the entry must not be accessed without checking validity first
SegmentType segment; //!< The segment associated with the entry, this is 0'd out if the entry is unset
bool valid; //!< If the associated segment is valid, the entry must not be accessed without checking validity first
bool level1Set; //!< If to ignore this level and look in the next level table instead
};
static constexpr size_t L2Size{1 << L2Bits}, L2Entries{util::DivideCeil(Size, L2Size)}, L1inL2Count{L1Size / L2Size};
span<RangeEntry, L2Entries> level2Table; //!< The second level of the range table, this is the lowest granularity of the table
span<RangeEntry, L2Entries> level2Table; //!< The second level of the segment table, this is the lowest granularity of the table
template<typename Type, size_t Amount>
static span<Type, Amount> AllocateTable() {
void *ptr{mmap(nullptr, Amount * sizeof(Type), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0)};
if (ptr == MAP_FAILED)
throw exception{"Failed to allocate 0x{:X} bytes of memory for range table: {}", Amount * sizeof(Type), strerror(errno)};
throw exception{"Failed to allocate 0x{:X} bytes of memory for segment table: {}", Amount * sizeof(Type), strerror(errno)};
return span<Type, Amount>(static_cast<Type *>(ptr), Amount);
}
public:
RangeTable() : level1Table{AllocateTable<RangeType, L1Entries>()}, level2Table{AllocateTable<RangeEntry, L2Entries>()} {}
SegmentTable() : level1Table{AllocateTable<SegmentType, L1Entries>()}, level2Table{AllocateTable<RangeEntry, L2Entries>()} {}
RangeTable(const RangeTable &other) : level1Table{AllocateTable<RangeType, L1Entries>()}, level2Table{AllocateTable<RangeEntry, L2Entries>()} {
SegmentTable(const SegmentTable &other) : level1Table{AllocateTable<SegmentType, L1Entries>()}, level2Table{AllocateTable<RangeEntry, L2Entries>()} {
level1Table.copy_from(other.level1Table);
level2Table.copy_from(other.level2Table);
}
RangeTable &operator=(const RangeTable &other) {
level1Table = AllocateTable<RangeType, L1Entries>();
SegmentTable &operator=(const SegmentTable &other) {
level1Table = AllocateTable<SegmentType, L1Entries>();
level2Table = AllocateTable<RangeEntry, L2Entries>();
level1Table.copy_from(other.level1Table);
@ -59,15 +59,15 @@ namespace skyline {
return *this;
}
RangeTable(RangeTable &&other) : level1Table{std::exchange(other.level1Table, nullptr)}, level2Table{std::exchange(other.level2Table, nullptr)} {}
SegmentTable(SegmentTable &&other) : level1Table{std::exchange(other.level1Table, nullptr)}, level2Table{std::exchange(other.level2Table, nullptr)} {}
RangeTable &operator=(RangeTable &&other) {
SegmentTable &operator=(SegmentTable &&other) {
level1Table = std::exchange(other.level1Table, nullptr);
level2Table = std::exchange(other.level2Table, nullptr);
return *this;
}
~RangeTable() {
~SegmentTable() {
if (level1Table.valid())
munmap(level1Table.data(), level1Table.size_bytes());
if (level2Table.valid())
@ -75,44 +75,44 @@ namespace skyline {
}
/**
* @return A read-only reference to the range at the given index, this'll return a 0'd out range if the range is unset
* @return A read-only reference to the segment at the given index, this'll return a 0'd out segment if the segment is unset
*/
const RangeType &operator[](size_t index) const {
const SegmentType &operator[](size_t index) const {
auto &l2Entry{level2Table[index >> L2Bits]};
if (l2Entry.valid)
return l2Entry.range;
return l2Entry.segment;
return level1Table[index >> L1Bits];
}
/**
* @brief Sets a single range at the specified index to the supplied value
* @brief Sets a single segment at the specified index to the supplied value
*/
void Set(size_t index, RangeType range) {
void Set(size_t index, SegmentType segment) {
auto &l2Entry{level2Table[index >> L2Bits]};
if (l2Entry.valid || l2Entry.level1Set) {
if (l2Entry.range == range)
if (l2Entry.segment == segment)
return;
l2Entry.valid = false;
l2Entry.level1Set = true;
size_t l1L2Start{(index >> L2Bits) << (L2Bits - L1Bits)};
for (size_t i{l1L2Start}; i < l1L2Start + L1inL2Count; i++)
level1Table[i] = l2Entry.range;
level1Table[i] = l2Entry.segment;
level1Table[index >> L1Bits] = range;
level1Table[index >> L1Bits] = segment;
} else if (l2Entry.level1Set) {
level1Table[index >> L1Bits] = range;
level1Table[index >> L1Bits] = segment;
} else {
l2Entry.range = range;
l2Entry.segment = segment;
l2Entry.valid = true;
l2Entry.level1Set = true;
}
}
/**
* @brief Sets a range of ranges between the start and end to the supplied value
* @brief Sets a segment of segments between the start and end to the supplied value
*/
void Set(size_t start, size_t end, RangeType range) {
void Set(size_t start, size_t end, SegmentType segment) {
size_t l2AlignedAddress{util::AlignUp(start, L2Size)};
size_t l1StartPaddingStart{start >> L1Bits};
@ -125,17 +125,17 @@ namespace skyline {
size_t l1L2Start{(start >> L2Bits) << (L2Bits - L1Bits)};
for (size_t i{l1L2Start}; i < l1StartPaddingStart; i++)
level1Table[i] = l2Entry.range;
level1Table[i] = l2Entry.segment;
for (size_t i{l1StartPaddingStart}; i < l1StartPaddingEnd; i++)
level1Table[i] = range;
level1Table[i] = segment;
} else if (!l2Entry.level1Set) {
l2Entry.range = range;
l2Entry.segment = segment;
l2Entry.valid = true;
l2Entry.level1Set = false;
} else {
for (size_t i{l1StartPaddingStart}; i < l1StartPaddingEnd; i++)
level1Table[i] = range;
level1Table[i] = segment;
}
}
@ -143,7 +143,7 @@ namespace skyline {
size_t l2IndexEnd{end >> L2Bits};
for (size_t i{l2IndexStart}; i < l2IndexEnd; i++) {
auto &l2Entry{level2Table[i]};
l2Entry.range = range;
l2Entry.segment = segment;
l2Entry.valid = true;
l2Entry.level1Set = false;
}
@ -157,17 +157,17 @@ namespace skyline {
l2Entry.level1Set = true;
for (size_t i{l1EndPaddingStart}; i < l1EndPaddingEnd; i++)
level1Table[i] = range;
level1Table[i] = segment;
for (size_t i{l1EndPaddingEnd}; i < l1EndPaddingStart + L1inL2Count; i++)
level1Table[i] = l2Entry.range;
level1Table[i] = l2Entry.segment;
} else if (!l2Entry.level1Set) {
l2Entry.range = range;
l2Entry.segment = segment;
l2Entry.valid = true;
l2Entry.level1Set = false;
} else {
for (size_t i{l1EndPaddingStart}; i < l1EndPaddingEnd; i++)
level1Table[i] = range;
level1Table[i] = segment;
}
}
}
@ -176,23 +176,23 @@ namespace skyline {
template<typename T>
requires std::is_pointer_v<T>
const RangeType &operator[](T pointer) const {
const SegmentType &operator[](T pointer) const {
return (*this)[reinterpret_cast<size_t>(pointer)];
}
template<typename T>
requires std::is_pointer_v<T>
void Set(T pointer, RangeType range) {
Set(reinterpret_cast<size_t>(pointer), range);
void Set(T pointer, SegmentType segment) {
Set(reinterpret_cast<size_t>(pointer), segment);
}
template<typename T>
requires std::is_pointer_v<T>
void Set(T start, T end, RangeType range) {
Set(reinterpret_cast<size_t>(start), reinterpret_cast<size_t>(end), range);
void Set(T start, T end, SegmentType segment) {
Set(reinterpret_cast<size_t>(start), reinterpret_cast<size_t>(end), segment);
}
void Set(span<u8> span, RangeType range) {
void Set(span<u8> span, SegmentType segment) {
Set(reinterpret_cast<size_t>(span.begin().base()), reinterpret_cast<size_t>(span.end().base()));
}
};

View File

@ -3,7 +3,7 @@
#pragma once
#include <common/range_table.h>
#include <common/segment_table.h>
#include "buffer.h"
namespace skyline::gpu {
@ -21,7 +21,7 @@ namespace skyline::gpu {
static constexpr size_t AddressSpaceSize{1ULL << 39}; //!< The size of the guest CPU AS in bytes
static constexpr size_t PageSizeBits{12}; //!< The size of a single page of the guest CPU AS as a power of two (4 KiB == 1 << 12)
static constexpr size_t L2EntryGranularity{19}; //!< The amount of AS (in bytes) a single L2 PTE covers (512 KiB == 1 << 19)
RangeTable<Buffer*, AddressSpaceSize, PageSizeBits, L2EntryGranularity> bufferTable; //!< A page table of all buffer mappings for O(1) lookups on full matches
SegmentTable<Buffer*, AddressSpaceSize, PageSizeBits, L2EntryGranularity> bufferTable; //!< A page table of all buffer mappings for O(1) lookups on full matches
std::mutex megaBufferMutex; //!< Synchronizes access to the allocated megabuffers