From bdd28c0edf986244fc7a2845df5537b5bc4bcf7a Mon Sep 17 00:00:00 2001
From: JosJuice <josjuice@gmail.com>
Date: Mon, 31 Aug 2020 14:46:13 +0200
Subject: [PATCH] DiscIO: Expand WIA/RVZ header size heuristic comments

---
 Source/Core/DiscIO/WIABlob.cpp | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/Source/Core/DiscIO/WIABlob.cpp b/Source/Core/DiscIO/WIABlob.cpp
index 3faa4c38c2..30294d2817 100644
--- a/Source/Core/DiscIO/WIABlob.cpp
+++ b/Source/Core/DiscIO/WIABlob.cpp
@@ -1743,19 +1743,24 @@ WIARVZFileReader<RVZ>::Convert(BlobReader* infile, const VolumeDisc* infile_volu
   const size_t raw_data_entries_size = raw_data_entries.size() * sizeof(RawDataEntry);
   const size_t group_entries_size = group_entries.size() * sizeof(GroupEntry);
 
-  // Conservative estimate for how much space will be taken up by headers.
-  // The compression methods None and Purge have very predictable overhead,
-  // and the other methods are able to compress group entries well
+  // An estimate for how much space will be taken up by headers.
+  // We will reserve this much space at the beginning of the file, and if the headers don't
+  // fit on that space, we will need to write them at the end of the file instead.
   const u64 headers_size_upper_bound = [&] {
+    // 0x100 is added to account for compression overhead (in particular for Purge).
     u64 upper_bound = sizeof(WIAHeader1) + sizeof(WIAHeader2) + partition_entries_size +
                       raw_data_entries_size + 0x100;
 
-    // RVZ's added data in GroupEntry usually compresses well
+    // RVZ's added data in GroupEntry usually compresses well, so we'll assume the compression ratio
+    // for RVZ GroupEntries is 9 / 16 or better. This constant is somehwat arbitrarily chosen, but
+    // no games were found that get a worse compression ratio than that. There are some games that
+    // get a worse ratio than 1 / 2, such as Metroid: Other M (PAL) with the default settings.
     if (RVZ && compression_type > WIARVZCompressionType::Purge)
       upper_bound += static_cast<u64>(group_entries_size) * 9 / 16;
     else
       upper_bound += group_entries_size;
 
+    // This alignment is also somewhat arbitrary.
     return Common::AlignUp(upper_bound, VolumeWii::BLOCK_TOTAL_SIZE);
   }();