From d7518dba50e2285c41c7002a1d86f876401fd9ea Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Mon, 24 Jul 2017 14:30:41 +0200 Subject: fail earlier if server answers with too much data We tend to operate on rather large static files, which means we usually get Content-Length information from the server. If we combine this information with the filesize we are expecting (factoring in pipelining) we can avoid reading a bunch of data we are ending up rejecting anyhow by just closing the connection saving bandwidth and time both for the server as well as the client. --- methods/curl.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'methods/curl.cc') diff --git a/methods/curl.cc b/methods/curl.cc index dfdd3b06b..a19318098 100644 --- a/methods/curl.cc +++ b/methods/curl.cc @@ -139,8 +139,8 @@ HttpsMethod::write_data(void *buffer, size_t size, size_t nmemb, void *userp) if (TotalWritten > me->https->Queue->MaximumSize) { me->https->SetFailReason("MaximumSizeExceeded"); - _error->Error("Writing more data than expected (%llu > %llu)", - TotalWritten, me->https->Queue->MaximumSize); + _error->Error(_("File is larger than expected (%llu > %llu). Mirror sync in progress?"), + TotalWritten, me->https->Queue->MaximumSize); return 0; } } -- cgit v1.2.3