summaryrefslogtreecommitdiff
path: root/methods/server.cc
diff options
context:
space:
mode:
authorJulian Andres Klode <julian.klode@canonical.com>2018-08-31 16:07:07 +0200
committerJulian Andres Klode <julian.klode@canonical.com>2018-09-28 13:53:11 +0200
commit439ae5eceae0b44315d278e963332369f0decfa0 (patch)
treeb5f701ed24b22e493ac61b585f71fc78866e476e /methods/server.cc
parent1bdcf519e7d35cbf249b28a4395364cb6c754f02 (diff)
http: Stop pipeline after close only if it was not filled before
It is perfectly valid behavior for a server to respond with Connection: close eventually, even when pipelining. Turning off pipelining due to that is wrong. For example, some Ubuntu mirrors close the connection after 101 requests. If I have more packages to install, only the first 101 would benefit from pipelining. This commit introduces a new check to only turn of pipelining for future connections if the pipeline for this connection did not have 3 successful fetches before, that should work quite well to detect broken server/proxy combinations like in bug 832113. (cherry picked from commit df696650b7a8c58bbd92e0e1619e956f21010a96) LP: #1794957 (cherry picked from commit 3de7454c796f245371c33076ae01529d6d36d715)
Diffstat (limited to 'methods/server.cc')
-rw-r--r--methods/server.cc20
1 files changed, 17 insertions, 3 deletions
diff --git a/methods/server.cc b/methods/server.cc
index cac77e24c..6a7cae2bc 100644
--- a/methods/server.cc
+++ b/methods/server.cc
@@ -40,6 +40,10 @@ string ServerMethod::FailFile;
int ServerMethod::FailFd = -1;
time_t ServerMethod::FailTime = 0;
+// Number of successful requests in a pipeline needed to continue
+// pipelining after a connection reset.
+constexpr int PIPELINE_MIN_SUCCESSFUL_ANSWERS_TO_CONTINUE = 3;
+
// ServerState::RunHeaders - Get the headers before the data /*{{{*/
// ---------------------------------------------------------------------
/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
@@ -230,8 +234,11 @@ bool ServerState::HeaderLine(string Line)
/* Some servers send error pages (as they are dynamically generated)
for simplicity via a connection close instead of e.g. chunked,
so assuming an always closing server only if we get a file + close */
- if (Result >= 200 && Result < 300)
+ if (Result >= 200 && Result < 300 && PipelineAnswersReceived < PIPELINE_MIN_SUCCESSFUL_ANSWERS_TO_CONTINUE)
+ {
PipelineAllowed = false;
+ PipelineAnswersReceived = 0;
+ }
}
else if (stringcasecmp(Val,"keep-alive") == 0)
Persistent = true;
@@ -512,7 +519,10 @@ int ServerMethod::Loop()
// Reset the pipeline
if (Server->IsOpen() == false)
+ {
QueueBack = Queue;
+ Server->PipelineAnswersReceived = 0;
+ }
// Connnect to the host
if (Server->Open() == false)
@@ -632,6 +642,10 @@ int ServerMethod::Loop()
BeforeI = I;
}
}
+ if (Server->Pipeline == true)
+ {
+ Server->PipelineAnswersReceived++;
+ }
Res.TakeHashes(*resultHashes);
URIDone(Res);
}
@@ -759,8 +773,8 @@ unsigned long long ServerMethod::FindMaximumObjectSizeInQueue() const /*{{{*/
}
/*}}}*/
ServerMethod::ServerMethod(char const * const Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/
- aptMethod(Binary, Ver, Flags), Server(nullptr), File(NULL), PipelineDepth(10),
- AllowRedirect(false), Debug(false)
+ aptMethod(Binary, Ver, Flags), Server(nullptr), File(NULL),
+ AllowRedirect(false), Debug(false), PipelineDepth(10)
{
}
/*}}}*/