Optimize the hash checks when decrypting a non hash file

This commit is contained in:
Maschell 2019-04-30 14:31:53 +02:00
parent da196e845f
commit 4b9ef5da70

View File

@ -60,12 +60,10 @@ public class NUSDecryption extends AESDecryption {
public void decryptFileStream(InputStream inputStream, OutputStream outputStream, long fileOffset, long filesize, short contentIndex, byte[] h3hash, public void decryptFileStream(InputStream inputStream, OutputStream outputStream, long fileOffset, long filesize, short contentIndex, byte[] h3hash,
long expectedSizeForHash) throws IOException, CheckSumWrongException { long expectedSizeForHash) throws IOException, CheckSumWrongException {
MessageDigest sha1 = null; MessageDigest sha1 = null;
MessageDigest sha1fallback = null;
if (h3hash != null) { if (h3hash != null) {
try { try {
sha1 = MessageDigest.getInstance("SHA1"); sha1 = MessageDigest.getInstance("SHA1");
sha1fallback = MessageDigest.getInstance("SHA1");
} catch (NoSuchAlgorithmException e) { } catch (NoSuchAlgorithmException e) {
e.printStackTrace(); e.printStackTrace();
} }
@ -86,9 +84,7 @@ public class NUSDecryption extends AESDecryption {
int inBlockBuffer; int inBlockBuffer;
long written = 0; long written = 0;
long writtenFallback = 0; long writtenHash = 0;
int skipoffset = (int) (fileOffset % 0x8000);
try { try {
// The input stream has been prepared to start 16 bytes earlier on this case. // The input stream has been prepared to start 16 bytes earlier on this case.
@ -139,35 +135,30 @@ public class NUSDecryption extends AESDecryption {
outputStream.write(output, 0, toWrite); outputStream.write(output, 0, toWrite);
if (sha1 != null && sha1fallback != null) { if (sha1 != null) {
sha1.update(output, 0, toWrite);
// In some cases it's using the hash of the whole .app file instead of the part // In some cases it's using the hash of the whole .app file instead of the part
// that's been actually used. // that's been actually used.
long toFallback = inBlockBuffer; long toFallback = inBlockBuffer;
if (writtenFallback + toFallback > expectedSizeForHash) { if (written + toFallback > expectedSizeForHash) {
toFallback = expectedSizeForHash - writtenFallback; toFallback = expectedSizeForHash - written;
} }
sha1fallback.update(output, 0, (int) toFallback); sha1.update(output, 0, (int) toFallback);
writtenFallback += toFallback; writtenHash += toFallback;
} }
if (written >= filesize && h3hash == null) { if (written >= filesize && h3hash == null) {
break; break;
} }
} while (inBlockBuffer == BLOCKSIZE); } while (inBlockBuffer == BLOCKSIZE);
if (sha1 != null && sha1fallback != null) { if (sha1 != null) {
long missingInHash = expectedSizeForHash - writtenHash;
long missingInHash = expectedSizeForHash - writtenFallback;
if (missingInHash > 0) { if (missingInHash > 0) {
sha1fallback.update(new byte[(int) missingInHash]); sha1.update(new byte[(int) missingInHash]);
} }
byte[] calculated_hash1 = sha1.digest(); byte[] calculated_hash1 = sha1.digest();
byte[] calculated_hash2 = sha1fallback.digest(); if (!Arrays.equals(calculated_hash1, h3hash)) {
byte[] expected_hash = h3hash; throw new CheckSumWrongException("hash checksum failed", calculated_hash1, h3hash);
if (!Arrays.equals(calculated_hash1, expected_hash) && !Arrays.equals(calculated_hash2, expected_hash)) {
throw new CheckSumWrongException("hash checksum failed", calculated_hash1, expected_hash);
} else { } else {
log.finest("Hash DOES match saves output stream."); log.finest("Hash DOES match saves output stream.");
} }