diff options
author | David Kalnischkies <david@kalnischkies.de> | 2016-04-07 17:48:17 +0200 |
---|---|---|
committer | David Kalnischkies <david@kalnischkies.de> | 2016-04-25 15:35:52 +0200 |
commit | 742f67eaede80d2f9b3631d8697ebd63b8f95427 (patch) | |
tree | 1331d0dc34b14005ce85c542cbf0631899cd6cfa /methods/server.cc | |
parent | 6d9b79dd961242702f0079e3dab2dd1c62bbc61e (diff) |
don't ask server if we have entire file in partial/
We have this situation in cases were parts of the transaction are
refused (e.g. in a hashsum mismatch) and rerun the update (e.g. in the
hope that we get a mirror which is synced this time).
Previously we would ask the server with an if-range and in the best case
recieve a 416 in response (less featureful server might end up giving us
the entire file again or we get the wrong file this time giving us a
hashsum mismatch…), which is a waste of time if we know already by
checking the hashsums that we got the complete and correct file.
Diffstat (limited to 'methods/server.cc')
-rw-r--r-- | methods/server.cc | 78 |
1 files changed, 54 insertions, 24 deletions
diff --git a/methods/server.cc b/methods/server.cc index 322b8d94c..b323ef4f3 100644 --- a/methods/server.cc +++ b/methods/server.cc @@ -419,36 +419,66 @@ void ServerMethod::SigTerm(int) depth. */ bool ServerMethod::Fetch(FetchItem *) { - if (Server == 0) + if (Server == nullptr || QueueBack == nullptr) return true; - // Queue the requests - int Depth = -1; - for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth; - I = I->Next, Depth++) - { - if (Depth >= 0) - { - // If pipelining is disabled, we only queue 1 request - if (Server->Pipeline == false) - break; - // if we have no hashes, do at most one such request - // as we can't fixup pipeling misbehaviors otherwise - else if (I->ExpectedHashes.usable() == false) - break; - } - + // If pipelining is disabled, we only queue 1 request + auto const AllowedDepth = Server->Pipeline ? PipelineDepth : 0; + // how deep is our pipeline currently? + decltype(PipelineDepth) CurrentDepth = 0; + for (FetchItem const *I = Queue; I != QueueBack; I = I->Next) + ++CurrentDepth; + + do { // Make sure we stick with the same server - if (Server->Comp(I->Uri) == false) + if (Server->Comp(QueueBack->Uri) == false) + break; + + bool const UsableHashes = QueueBack->ExpectedHashes.usable(); + // if we have no hashes, do at most one such request + // as we can't fixup pipeling misbehaviors otherwise + if (CurrentDepth != 0 && UsableHashes == false) break; - if (QueueBack == I) + + if (UsableHashes && FileExists(QueueBack->DestFile)) { - QueueBack = I->Next; - SendReq(I); - continue; + FileFd partial(QueueBack->DestFile, FileFd::ReadOnly); + Hashes wehave(QueueBack->ExpectedHashes); + if (QueueBack->ExpectedHashes.FileSize() == partial.FileSize()) + { + if (wehave.AddFD(partial) && + wehave.GetHashStringList() == QueueBack->ExpectedHashes) + { + FetchResult Res; + Res.Filename = QueueBack->DestFile; + Res.ResumePoint = QueueBack->ExpectedHashes.FileSize(); + URIStart(Res); + // move item to the start of the queue as URIDone will + // always dequeued the first item in the queue + if (Queue != QueueBack) + { + FetchItem *Prev = Queue; + for (; Prev->Next != QueueBack; Prev = Prev->Next) + /* look for the previous queue item */; + Prev->Next = QueueBack->Next; + QueueBack->Next = Queue; + Queue = QueueBack; + QueueBack = Prev->Next; + } + Res.TakeHashes(wehave); + URIDone(Res); + continue; + } + else + RemoveFile("Fetch-Partial", QueueBack->DestFile); + } } - } - + auto const Tmp = QueueBack; + QueueBack = QueueBack->Next; + SendReq(Tmp); + ++CurrentDepth; + } while (CurrentDepth <= AllowedDepth && QueueBack != nullptr); + return true; } /*}}}*/ |