Account for SegmentTable insertions entirely within an L2 entry

We would always write all L1 entries that correspond to an L2 entry, even if setting an input range ended before that. This would effectively reduce the atomicity of the segment table to that of the L2 range and lead to breaking API guarantees by returning entirely wrong segment values for a lookup covering a region that was overwritten.
This commit is contained in:
PixelyIon 2022-08-06 16:20:40 +05:30
parent c72316d9f6
commit 36b8d3c445
No known key found for this signature in database
GPG Key ID: 11BC6C3201BC2C05

View File

@ -116,7 +116,7 @@ namespace skyline {
size_t l2AlignedAddress{util::AlignUp(start, L2Size)};
size_t l1StartPaddingStart{start >> L1Bits};
size_t l1StartPaddingEnd{l2AlignedAddress >> L1Bits};
size_t l1StartPaddingEnd{l2AlignedAddress < end ? (l2AlignedAddress >> L1Bits) : (end >> L1Bits)};
if (l1StartPaddingStart != l1StartPaddingEnd) {
auto &l2Entry{level2Table[start >> L2Bits]};
if (l2Entry.valid) {
@ -129,6 +129,10 @@ namespace skyline {
for (size_t i{l1StartPaddingStart}; i < l1StartPaddingEnd; i++)
level1Table[i] = segment;
size_t l1L2End{l2AlignedAddress >> L1Bits};
for (size_t i{l1StartPaddingEnd}; i < l1L2End; i++)
level1Table[i] = l2Entry.segment;
} else if (!l2Entry.level1Set) {
l2Entry.segment = segment;
l2Entry.valid = true;
@ -139,6 +143,9 @@ namespace skyline {
}
}
if (end <= l2AlignedAddress)
return;
size_t l2IndexStart{l2AlignedAddress >> L2Bits};
size_t l2IndexEnd{end >> L2Bits};
for (size_t i{l2IndexStart}; i < l2IndexEnd; i++) {