Use small_vector for VMM TranslateRange results

This was the source of a lot of heap allocs, moving to small_vector helps to avoid most of them
This commit is contained in:
Billy Laws 2022-07-31 13:11:57 +01:00
parent 1fe6d92970
commit b6b04fa6c5
2 changed files with 6 additions and 3 deletions

View File

@ -3,6 +3,7 @@
#pragma once
#include <boost/container/small_vector.hpp>
#include <concepts>
#include <common.h>
@ -10,6 +11,8 @@ namespace skyline {
template<typename VaType, size_t AddressSpaceBits>
concept AddressSpaceValid = std::is_unsigned_v<VaType> && sizeof(VaType) * 8 >= AddressSpaceBits;
using TranslatedAddressRange = boost::container::small_vector<span<u8>, 1>;
struct EmptyStruct {};
/**
@ -116,7 +119,7 @@ namespace skyline {
/**
* @return A vector of all physical ranges inside of the given virtual range
*/
std::vector<span<u8>> TranslateRange(VaType virt, VaType size, std::function<void(span<u8>)> cpuAccessCallback = {});
TranslatedAddressRange TranslateRange(VaType virt, VaType size, std::function<void(span<u8>)> cpuAccessCallback = {});
void Read(u8 *destination, VaType virt, VaType size, std::function<void(span<u8>)> cpuAccessCallback = {});

View File

@ -233,7 +233,7 @@ namespace skyline {
munmap(sparseMap, SparseMapSize);
}
MM_MEMBER(std::vector<span<u8>>)::TranslateRange(VaType virt, VaType size, std::function<void(span<u8>)> cpuAccessCallback) {
MM_MEMBER(TranslatedAddressRange)::TranslateRange(VaType virt, VaType size, std::function<void(span<u8>)> cpuAccessCallback) {
TRACE_EVENT("containers", "FlatMemoryManager::TranslateRange");
std::scoped_lock lock(this->blockMutex);
@ -247,7 +247,7 @@ namespace skyline {
u8 *blockPhys{predecessor->phys + (virt - predecessor->virt)};
VaType blockSize{std::min(successor->virt - virt, size)};
std::vector<span<u8>> ranges;
TranslatedAddressRange ranges;
while (size) {
// Return a zeroed out map to emulate sparse mappings