Add retry limit for failed chunk repairs

If repairing a chunk failed then the downloader could get stuck in infinite loop trying to repair the chunk.
This fixes the issue by adding a retry limit for failed chunk repairs.
This commit is contained in:
Sude 2017-09-01 08:08:33 +03:00
parent ac602f8188
commit 709235613f

View File

@ -289,7 +289,6 @@ int Downloader::login()
if (!boost::filesystem::remove(Globals::globalConfig.curlConf.sCookiePath)) if (!boost::filesystem::remove(Globals::globalConfig.curlConf.sCookiePath))
std::cerr << "Failed to delete " << Globals::globalConfig.curlConf.sCookiePath << std::endl; std::cerr << "Failed to delete " << Globals::globalConfig.curlConf.sCookiePath << std::endl;
//if (!gogWebsite->Login(email, password))
if (!gogWebsite->Login(email, password)) if (!gogWebsite->Login(email, password))
{ {
std::cerr << "HTTP: Login failed" << std::endl; std::cerr << "HTTP: Login failed" << std::endl;
@ -1555,6 +1554,9 @@ int Downloader::repairFile(const std::string& url, const std::string& filepath,
// Check all chunks // Check all chunks
int iChunksRepaired = 0; int iChunksRepaired = 0;
int iChunkRetryCount = 0;
int iChunkRetryLimit = 3;
bool bChunkRetryLimitReached = false;
for (int i=0; i<chunks; i++) for (int i=0; i<chunks; i++)
{ {
off_t chunk_begin = chunk_from.at(i); off_t chunk_begin = chunk_from.at(i);
@ -1583,7 +1585,18 @@ int Downloader::repairFile(const std::string& url, const std::string& filepath,
std::string hash = Util::getChunkHash(chunk, chunk_size, RHASH_MD5); std::string hash = Util::getChunkHash(chunk, chunk_size, RHASH_MD5);
if (hash != chunk_hash.at(i)) if (hash != chunk_hash.at(i))
{ {
if (bChunkRetryLimitReached)
{
std::cout << "Failed - chunk retry limit reached\r" << std::flush;
free(chunk);
res = 0;
break;
}
if (iChunkRetryCount < 1)
std::cout << "Failed - downloading chunk" << std::endl; std::cout << "Failed - downloading chunk" << std::endl;
else
std::cout << "Failed - retrying chunk download" << std::endl;
// use fseeko to support large files on 32 bit platforms // use fseeko to support large files on 32 bit platforms
fseeko(outfile, chunk_begin, SEEK_SET); fseeko(outfile, chunk_begin, SEEK_SET);
curl_easy_setopt(curlhandle, CURLOPT_URL, url.c_str()); curl_easy_setopt(curlhandle, CURLOPT_URL, url.c_str());
@ -1595,10 +1608,17 @@ int Downloader::repairFile(const std::string& url, const std::string& filepath,
if (Globals::globalConfig.bReport) if (Globals::globalConfig.bReport)
iChunksRepaired++; iChunksRepaired++;
i--; //verify downloaded chunk i--; //verify downloaded chunk
iChunkRetryCount++;
if (iChunkRetryCount >= iChunkRetryLimit)
{
bChunkRetryLimitReached = true;
}
} }
else else
{ {
std::cout << "OK\r" << std::flush; std::cout << "OK\r" << std::flush;
iChunkRetryCount = 0; // reset retry count
} }
free(chunk); free(chunk);
res = 1; res = 1;
@ -1608,10 +1628,17 @@ int Downloader::repairFile(const std::string& url, const std::string& filepath,
if (Globals::globalConfig.bReport) if (Globals::globalConfig.bReport)
{ {
std::string report_line = "Repaired [" + std::to_string(iChunksRepaired) + "/" + std::to_string(chunks) + "] " + filepath; std::string report_line;
if (bChunkRetryLimitReached)
report_line = "Repair failed: " + filepath;
else
report_line = "Repaired [" + std::to_string(iChunksRepaired) + "/" + std::to_string(chunks) + "] " + filepath;
this->report_ofs << report_line << std::endl; this->report_ofs << report_line << std::endl;
} }
if (bChunkRetryLimitReached)
return res;
// Set timestamp for downloaded file to same value as file on server // Set timestamp for downloaded file to same value as file on server
long filetime = -1; long filetime = -1;
CURLcode result = curl_easy_getinfo(curlhandle, CURLINFO_FILETIME, &filetime); CURLcode result = curl_easy_getinfo(curlhandle, CURLINFO_FILETIME, &filetime);