summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Kalnischkies <david@kalnischkies.de>2015-06-15 13:16:43 +0200
committerDavid Kalnischkies <david@kalnischkies.de>2015-06-15 23:34:05 +0200
commit08ea7806458de0995414eaae852e0a5985875642 (patch)
tree97ac4a9db80bff757d94666df4db429fbf39cbbe
parent9f697f69cf1adaced476598cfe08ab03c76c5d18 (diff)
deal better with acquiring the same URI multiple times
This is an unlikely event for indexes and co, but it can happen quiet easily e.g. for changelogs where you want to get the changelogs for multiple binary package(version)s which happen to all be built from a single source. The interesting part is that the Acquire system actually detected this already and set the item requesting the URI again to StatDone - expect that this is hardly sufficient: an Item must be Complete=true as well to be considered truely done and that is only the tip of the ::Done handling iceberg. So instead of this StatDone hack we allow QItems to be owned by multiple items and notify all owners about everything now, so that for the point of each item they got it downloaded just for them.
-rw-r--r--apt-pkg/acquire-item.cc25
-rw-r--r--apt-pkg/acquire-item.h1
-rw-r--r--apt-pkg/acquire-worker.cc280
-rw-r--r--apt-pkg/acquire-worker.h3
-rw-r--r--apt-pkg/acquire.cc126
-rw-r--r--apt-pkg/acquire.h44
-rwxr-xr-xtest/integration/test-acquire-same-file-multiple-times80
-rwxr-xr-xtest/integration/test-apt-get-download2
8 files changed, 409 insertions, 152 deletions
diff --git a/apt-pkg/acquire-item.cc b/apt-pkg/acquire-item.cc
index 4bf4e62f8..dc4f61b56 100644
--- a/apt-pkg/acquire-item.cc
+++ b/apt-pkg/acquire-item.cc
@@ -438,6 +438,11 @@ APT_PURE pkgAcquire * pkgAcquire::Item::GetOwner() const /*{{{*/
return Owner;
}
/*}}}*/
+pkgAcquire::ItemDesc &pkgAcquire::Item::GetItemDesc() /*{{{*/
+{
+ return Desc;
+}
+ /*}}}*/
APT_CONST bool pkgAcquire::Item::IsTrusted() const /*{{{*/
{
return false;
@@ -840,7 +845,7 @@ bool pkgAcqMetaBase::CheckDownloadDone(pkgAcqTransactionItem * const I, const st
return false;
}
- if (FileName != I->DestFile)
+ if (FileName != I->DestFile && RealFileExists(I->DestFile) == false)
{
I->Local = true;
I->Desc.URI = "copy:" + FileName;
@@ -2482,7 +2487,7 @@ void pkgAcqIndex::StageDownloadDone(string const &Message, HashStringList const
// Methods like e.g. "file:" will give us a (compressed) FileName that is
// not the "DestFile" we set, in this case we uncompress from the local file
- if (FileName != DestFile)
+ if (FileName != DestFile && RealFileExists(DestFile) == false)
Local = true;
else
EraseFileName = FileName;
@@ -2760,7 +2765,7 @@ void pkgAcqArchive::Done(string const &Message, HashStringList const &Hashes,
}
// Reference filename
- if (FileName != DestFile)
+ if (DestFile != FileName && RealFileExists(DestFile) == false)
{
StoreFilename = DestFile = FileName;
Local = true;
@@ -2903,18 +2908,6 @@ void pkgAcqChangelog::Init(std::string const &DestDir, std::string const &DestFi
strprintf(Desc.Description, "%s %s %s Changelog", URI::SiteOnly(Desc.URI).c_str(), SrcName.c_str(), SrcVersion.c_str());
Desc.Owner = this;
QueueURI(Desc);
-
- if (Status == StatDone) // this happens if we queue the same changelog two times
- {
- Complete = true;
- for (pkgAcquire::UriIterator I = Owner->UriBegin(); I != Owner->UriEnd(); ++I)
- if (I->URI == Desc.URI)
- if (DestFile != I->Owner->DestFile)
- if (symlink(I->Owner->DestFile.c_str(), DestFile.c_str()) != 0)
- {
- ; // ignore error, there isn't anthing we could do to handle the edgecase of an edgecase
- }
- }
}
/*}}}*/
std::string pkgAcqChangelog::URI(pkgCache::VerIterator const &Ver) /*{{{*/
@@ -3107,7 +3100,7 @@ void pkgAcqFile::Done(string const &Message,HashStringList const &CalcHashes,
return;
// We have to copy it into place
- if (FileName != DestFile)
+ if (RealFileExists(DestFile.c_str()) == false)
{
Local = true;
if (_config->FindB("Acquire::Source-Symlinks",true) == false ||
diff --git a/apt-pkg/acquire-item.h b/apt-pkg/acquire-item.h
index 606fd4173..9dbacc1ea 100644
--- a/apt-pkg/acquire-item.h
+++ b/apt-pkg/acquire-item.h
@@ -249,6 +249,7 @@ class pkgAcquire::Item : public WeakPointable /*{{{*/
/** \return the acquire process with which this item is associated. */
pkgAcquire *GetOwner() const;
+ pkgAcquire::ItemDesc &GetItemDesc();
/** \return \b true if this object is being fetched from a trusted source. */
virtual bool IsTrusted() const;
diff --git a/apt-pkg/acquire-worker.cc b/apt-pkg/acquire-worker.cc
index 099a1f87d..d6318a21b 100644
--- a/apt-pkg/acquire-worker.cc
+++ b/apt-pkg/acquire-worker.cc
@@ -258,18 +258,27 @@ bool pkgAcquire::Worker::RunMessages()
ItemDone();
- pkgAcquire::Item *Owner = Itm->Owner;
- pkgAcquire::ItemDesc Desc = *Itm;
-
// Change the status so that it can be dequeued
- Owner->Status = pkgAcquire::Item::StatIdle;
+ for (pkgAcquire::Queue::QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
+ {
+ pkgAcquire::Item *Owner = *O;
+ Owner->Status = pkgAcquire::Item::StatIdle;
+ }
// Mark the item as done (taking care of all queues)
// and then put it in the main queue again
+ std::vector<Item*> const ItmOwners = Itm->Owners;
OwnerQ->ItemDone(Itm);
- OwnerQ->Owner->Enqueue(Desc);
+ Itm = NULL;
+ for (pkgAcquire::Queue::QItem::owner_iterator O = ItmOwners.begin(); O != ItmOwners.end(); ++O)
+ {
+ pkgAcquire::Item *Owner = *O;
+ pkgAcquire::ItemDesc desc = Owner->GetItemDesc();
+ desc.URI = NewURI;
+ OwnerQ->Owner->Enqueue(desc);
- if (Log != 0)
- Log->Done(Desc);
+ if (Log != 0)
+ Log->Done(Owner->GetItemDesc());
+ }
break;
}
@@ -286,14 +295,18 @@ bool pkgAcquire::Worker::RunMessages()
CurrentSize = 0;
TotalSize = strtoull(LookupTag(Message,"Size","0").c_str(), NULL, 10);
ResumePoint = strtoull(LookupTag(Message,"Resume-Point","0").c_str(), NULL, 10);
- Itm->Owner->Start(Message, TotalSize);
-
- // Display update before completion
- if (Log != 0 && Log->MorePulses == true)
- Log->Pulse(Itm->Owner->GetOwner());
+ for (pkgAcquire::Queue::QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
+ {
+ (*O)->Start(Message, TotalSize);
- if (Log != 0)
- Log->Fetch(*Itm);
+ // Display update before completion
+ if (Log != 0)
+ {
+ if (Log->MorePulses == true)
+ Log->Pulse((*O)->GetOwner());
+ Log->Fetch((*O)->GetItemDesc());
+ }
+ }
break;
}
@@ -307,105 +320,110 @@ bool pkgAcquire::Worker::RunMessages()
break;
}
- pkgAcquire::Item *Owner = Itm->Owner;
- pkgAcquire::ItemDesc Desc = *Itm;
-
- if (RealFileExists(Owner->DestFile))
- ChangeOwnerAndPermissionOfFile("201::URIDone", Owner->DestFile.c_str(), "root", "root", 0644);
+ PrepareFiles("201::URIDone", Itm);
// Display update before completion
if (Log != 0 && Log->MorePulses == true)
- Log->Pulse(Owner->GetOwner());
+ for (pkgAcquire::Queue::QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
+ Log->Pulse((*O)->GetOwner());
- OwnerQ->ItemDone(Itm);
-
- HashStringList const ExpectedHashes = Owner->GetExpectedHashes();
- // see if we got hashes to verify
+ std::string const filename = LookupTag(Message, "Filename", Itm->Owner->DestFile.c_str());
HashStringList ReceivedHashes;
- for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
{
- std::string const tagname = std::string(*type) + "-Hash";
- std::string const hashsum = LookupTag(Message, tagname.c_str());
- if (hashsum.empty() == false)
- ReceivedHashes.push_back(HashString(*type, hashsum));
- }
- // not all methods always sent Hashes our way
- if (ExpectedHashes.usable() == true && ReceivedHashes.usable() == false)
- {
- std::string const filename = LookupTag(Message, "Filename", Owner->DestFile.c_str());
- if (filename.empty() == false && RealFileExists(filename))
+ // see if we got hashes to verify
+ for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
{
- Hashes calc(ExpectedHashes);
- FileFd file(filename, FileFd::ReadOnly, FileFd::None);
- calc.AddFD(file);
- ReceivedHashes = calc.GetHashStringList();
+ std::string const tagname = std::string(*type) + "-Hash";
+ std::string const hashsum = LookupTag(Message, tagname.c_str());
+ if (hashsum.empty() == false)
+ ReceivedHashes.push_back(HashString(*type, hashsum));
+ }
+ // not all methods always sent Hashes our way
+ if (ReceivedHashes.usable() == false)
+ {
+ HashStringList const ExpectedHashes = Itm->GetExpectedHashes();
+ if (ExpectedHashes.usable() == true && RealFileExists(filename))
+ {
+ Hashes calc(ExpectedHashes);
+ FileFd file(filename, FileFd::ReadOnly, FileFd::None);
+ calc.AddFD(file);
+ ReceivedHashes = calc.GetHashStringList();
+ }
}
}
- if(_config->FindB("Debug::pkgAcquire::Auth", false) == true)
- {
- std::clog << "201 URI Done: " << Owner->DescURI() << endl
- << "ReceivedHash:" << endl;
- for (HashStringList::const_iterator hs = ReceivedHashes.begin(); hs != ReceivedHashes.end(); ++hs)
- std::clog << "\t- " << hs->toStr() << std::endl;
- std::clog << "ExpectedHash:" << endl;
- for (HashStringList::const_iterator hs = ExpectedHashes.begin(); hs != ExpectedHashes.end(); ++hs)
- std::clog << "\t- " << hs->toStr() << std::endl;
- std::clog << endl;
- }
+ // only local files can refer other filenames and counting them as fetched would be unfair
+ if (Log != NULL && filename != Itm->Owner->DestFile)
+ Log->Fetched(ReceivedHashes.FileSize(),atoi(LookupTag(Message,"Resume-Point","0").c_str()));
+
+ std::vector<Item*> const ItmOwners = Itm->Owners;
+ OwnerQ->ItemDone(Itm);
+ Itm = NULL;
- // decide if what we got is what we expected
- bool consideredOkay = false;
bool const isIMSHit = StringToBool(LookupTag(Message,"IMS-Hit"),false) ||
StringToBool(LookupTag(Message,"Alt-IMS-Hit"),false);
- if (ExpectedHashes.usable())
+
+ for (pkgAcquire::Queue::QItem::owner_iterator O = ItmOwners.begin(); O != ItmOwners.end(); ++O)
{
- if (ReceivedHashes.usable() == false)
+ pkgAcquire::Item * const Owner = *O;
+ HashStringList const ExpectedHashes = Owner->GetExpectedHashes();
+ if(_config->FindB("Debug::pkgAcquire::Auth", false) == true)
{
- /* IMS-Hits can't be checked here as we will have uncompressed file,
- but the hashes for the compressed file. What we have was good through
- so all we have to ensure later is that we are not stalled. */
- consideredOkay = isIMSHit;
+ std::clog << "201 URI Done: " << Owner->DescURI() << endl
+ << "ReceivedHash:" << endl;
+ for (HashStringList::const_iterator hs = ReceivedHashes.begin(); hs != ReceivedHashes.end(); ++hs)
+ std::clog << "\t- " << hs->toStr() << std::endl;
+ std::clog << "ExpectedHash:" << endl;
+ for (HashStringList::const_iterator hs = ExpectedHashes.begin(); hs != ExpectedHashes.end(); ++hs)
+ std::clog << "\t- " << hs->toStr() << std::endl;
+ std::clog << endl;
}
- else if (ReceivedHashes == ExpectedHashes)
- consideredOkay = true;
- else
- consideredOkay = false;
- }
- else if (Owner->HashesRequired() == true)
- consideredOkay = false;
- else
- consideredOkay = true;
+ // decide if what we got is what we expected
+ bool consideredOkay = false;
+ if (ExpectedHashes.usable())
+ {
+ if (ReceivedHashes.usable() == false)
+ {
+ /* IMS-Hits can't be checked here as we will have uncompressed file,
+ but the hashes for the compressed file. What we have was good through
+ so all we have to ensure later is that we are not stalled. */
+ consideredOkay = isIMSHit;
+ }
+ else if (ReceivedHashes == ExpectedHashes)
+ consideredOkay = true;
+ else
+ consideredOkay = false;
- if (consideredOkay == true)
- {
- Owner->Done(Message, ReceivedHashes, Config);
- ItemDone();
+ }
+ else if (Owner->HashesRequired() == true)
+ consideredOkay = false;
+ else
+ consideredOkay = true;
- // Log that we are done
- if (Log != 0)
+ if (consideredOkay == true)
{
- if (isIMSHit)
+ Owner->Done(Message, ReceivedHashes, Config);
+
+ // Log that we are done
+ if (Log != 0)
{
- /* Hide 'hits' for local only sources - we also manage to
- hide gets */
- if (Config->LocalOnly == false)
- Log->IMSHit(Desc);
+ if (isIMSHit)
+ Log->IMSHit(Owner->GetItemDesc());
+ else
+ Log->Done(Owner->GetItemDesc());
}
- else
- Log->Done(Desc);
}
- }
- else
- {
- Owner->Status = pkgAcquire::Item::StatAuthError;
- Owner->Failed(Message,Config);
- ItemDone();
+ else
+ {
+ Owner->Status = pkgAcquire::Item::StatAuthError;
+ Owner->Failed(Message,Config);
- if (Log != 0)
- Log->Fail(Desc);
+ if (Log != 0)
+ Log->Fail(Owner->GetItemDesc());
+ }
}
+ ItemDone();
break;
}
@@ -419,30 +437,32 @@ bool pkgAcquire::Worker::RunMessages()
break;
}
+ PrepareFiles("400::URIFailure", Itm);
+
// Display update before completion
if (Log != 0 && Log->MorePulses == true)
- Log->Pulse(Itm->Owner->GetOwner());
-
- pkgAcquire::Item *Owner = Itm->Owner;
- pkgAcquire::ItemDesc Desc = *Itm;
-
- if (RealFileExists(Owner->DestFile))
- ChangeOwnerAndPermissionOfFile("400::URIFailure", Owner->DestFile.c_str(), "root", "root", 0644);
+ for (pkgAcquire::Queue::QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
+ Log->Pulse((*O)->GetOwner());
+ std::vector<Item*> const ItmOwners = Itm->Owners;
OwnerQ->ItemDone(Itm);
+ Itm = NULL;
- // set some status
- if(LookupTag(Message,"FailReason") == "Timeout" ||
- LookupTag(Message,"FailReason") == "TmpResolveFailure" ||
- LookupTag(Message,"FailReason") == "ResolveFailure" ||
- LookupTag(Message,"FailReason") == "ConnectionRefused")
- Owner->Status = pkgAcquire::Item::StatTransientNetworkError;
+ for (pkgAcquire::Queue::QItem::owner_iterator O = ItmOwners.begin(); O != ItmOwners.end(); ++O)
+ {
+ // set some status
+ if(LookupTag(Message,"FailReason") == "Timeout" ||
+ LookupTag(Message,"FailReason") == "TmpResolveFailure" ||
+ LookupTag(Message,"FailReason") == "ResolveFailure" ||
+ LookupTag(Message,"FailReason") == "ConnectionRefused")
+ (*O)->Status = pkgAcquire::Item::StatTransientNetworkError;
- Owner->Failed(Message,Config);
- ItemDone();
+ (*O)->Failed(Message,Config);
- if (Log != 0)
- Log->Fail(Desc);
+ if (Log != 0)
+ Log->Fail((*O)->GetItemDesc());
+ }
+ ItemDone();
break;
}
@@ -578,16 +598,24 @@ bool pkgAcquire::Worker::QueueItem(pkgAcquire::Queue::QItem *Item)
Message.reserve(300);
Message += "URI: " + Item->URI;
Message += "\nFilename: " + Item->Owner->DestFile;
- HashStringList const hsl = Item->Owner->GetExpectedHashes();
+
+ HashStringList const hsl = Item->GetExpectedHashes();
for (HashStringList::const_iterator hs = hsl.begin(); hs != hsl.end(); ++hs)
Message += "\nExpected-" + hs->HashType() + ": " + hs->HashValue();
- if(Item->Owner->FileSize > 0)
+
+ if (hsl.FileSize() == 0)
{
- string MaximumSize;
- strprintf(MaximumSize, "%llu", Item->Owner->FileSize);
- Message += "\nMaximum-Size: " + MaximumSize;
+ unsigned long long FileSize = Item->GetMaximumSize();
+ if(FileSize > 0)
+ {
+ string MaximumSize;
+ strprintf(MaximumSize, "%llu", FileSize);
+ Message += "\nMaximum-Size: " + MaximumSize;
+ }
}
- Message += Item->Owner->Custom600Headers();
+
+ Item->SyncDestinationFiles();
+ Message += Item->Custom600Headers();
Message += "\n\n";
if (RealFileExists(Item->Owner->DestFile))
@@ -686,3 +714,33 @@ void pkgAcquire::Worker::ItemDone()
Status = string();
}
/*}}}*/
+void pkgAcquire::Worker::PrepareFiles(char const * const caller, pkgAcquire::Queue::QItem const * const Itm)/*{{{*/
+{
+ if (RealFileExists(Itm->Owner->DestFile))
+ {
+ ChangeOwnerAndPermissionOfFile(caller, Itm->Owner->DestFile.c_str(), "root", "root", 0644);
+ std::string const filename = Itm->Owner->DestFile;
+ for (pkgAcquire::Queue::QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
+ {
+ pkgAcquire::Item const * const Owner = *O;
+ if (Owner->DestFile == filename)
+ continue;
+ unlink(Owner->DestFile.c_str());
+ if (link(filename.c_str(), Owner->DestFile.c_str()) != 0)
+ {
+ // diferent mounts can't happen for us as we download to lists/ by default,
+ // but if the system is reused by others the locations can potentially be on
+ // different disks, so use symlink as poor-men replacement.
+ // FIXME: Real copying as last fallback, but that is costly, so offload to a method preferable
+ if (symlink(filename.c_str(), Owner->DestFile.c_str()) != 0)
+ _error->Error("Can't create (sym)link of file %s to %s", filename.c_str(), Owner->DestFile.c_str());
+ }
+ }
+ }
+ else
+ {
+ for (pkgAcquire::Queue::QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
+ unlink((*O)->DestFile.c_str());
+ }
+}
+ /*}}}*/
diff --git a/apt-pkg/acquire-worker.h b/apt-pkg/acquire-worker.h
index db8889c8e..3a3ef706d 100644
--- a/apt-pkg/acquire-worker.h
+++ b/apt-pkg/acquire-worker.h
@@ -326,6 +326,9 @@ class pkgAcquire::Worker : public WeakPointable
* \b false, also rudely interrupts the worker with a SIGINT.
*/
virtual ~Worker();
+
+private:
+ APT_HIDDEN void PrepareFiles(char const * const caller, pkgAcquire::Queue::QItem const * const Itm);
};
/** @} */
diff --git a/apt-pkg/acquire.cc b/apt-pkg/acquire.cc
index 0c815c005..14c8863dc 100644
--- a/apt-pkg/acquire.cc
+++ b/apt-pkg/acquire.cc
@@ -23,6 +23,7 @@
#include <apt-pkg/strutl.h>
#include <apt-pkg/fileutl.h>
+#include <algorithm>
#include <string>
#include <vector>
#include <iostream>
@@ -682,7 +683,8 @@ bool pkgAcquire::Queue::Enqueue(ItemDesc &Item)
for (; *I != 0; I = &(*I)->Next)
if (Item.URI == (*I)->URI)
{
- Item.Owner->Status = Item::StatDone;
+ (*I)->Owners.push_back(Item.Owner);
+ Item.Owner->Status = (*I)->Owner->Status;
return false;
}
@@ -705,13 +707,13 @@ bool pkgAcquire::Queue::Dequeue(Item *Owner)
{
if (Owner->Status == pkgAcquire::Item::StatFetching)
return _error->Error("Tried to dequeue a fetching object");
-
+
bool Res = false;
-
+
QItem **I = &Items;
for (; *I != 0;)
{
- if ((*I)->Owner == Owner)
+ if (Owner == (*I)->Owner)
{
QItem *Jnk= *I;
*I = (*I)->Next;
@@ -722,7 +724,7 @@ bool pkgAcquire::Queue::Dequeue(Item *Owner)
else
I = &(*I)->Next;
}
-
+
return Res;
}
/*}}}*/
@@ -799,9 +801,12 @@ pkgAcquire::Queue::QItem *pkgAcquire::Queue::FindItem(string URI,pkgAcquire::Wor
bool pkgAcquire::Queue::ItemDone(QItem *Itm)
{
PipeDepth--;
- if (Itm->Owner->Status == pkgAcquire::Item::StatFetching)
- Itm->Owner->Status = pkgAcquire::Item::StatDone;
-
+ for (QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
+ {
+ if ((*O)->Status == pkgAcquire::Item::StatFetching)
+ (*O)->Status = pkgAcquire::Item::StatDone;
+ }
+
if (Itm->Owner->QueueCounter <= 1)
Owner->Dequeue(Itm->Owner);
else
@@ -809,7 +814,7 @@ bool pkgAcquire::Queue::ItemDone(QItem *Itm)
Dequeue(Itm->Owner);
Owner->Bump();
}
-
+
return Cycle();
}
/*}}}*/
@@ -824,7 +829,7 @@ bool pkgAcquire::Queue::Cycle()
if (PipeDepth < 0)
return _error->Error("Pipedepth failure");
-
+
// Look for a queable item
QItem *I = Items;
while (PipeDepth < (signed)MaxPipeDepth)
@@ -832,18 +837,19 @@ bool pkgAcquire::Queue::Cycle()
for (; I != 0; I = I->Next)
if (I->Owner->Status == pkgAcquire::Item::StatIdle)
break;
-
+
// Nothing to do, queue is idle.
if (I == 0)
return true;
-
+
I->Worker = Workers;
- I->Owner->Status = pkgAcquire::Item::StatFetching;
+ for (QItem::owner_iterator O = I->Owners.begin(); O != I->Owners.end(); ++O)
+ (*O)->Status = pkgAcquire::Item::StatFetching;
PipeDepth++;
if (Workers->QueueItem(I) == false)
return false;
}
-
+
return true;
}
/*}}}*/
@@ -855,6 +861,94 @@ void pkgAcquire::Queue::Bump()
Cycle();
}
/*}}}*/
+HashStringList pkgAcquire::Queue::QItem::GetExpectedHashes() const /*{{{*/
+{
+ /* each Item can have multiple owners and each owner might have different
+ hashes, even if that is unlikely in practice and if so at least some
+ owners will later fail. There is one situation through which is not a
+ failure and still needs this handling: Two owners who expect the same
+ file, but one owner only knows the SHA1 while the other only knows SHA256. */
+ HashStringList superhsl;
+ for (pkgAcquire::Queue::QItem::owner_iterator O = Owners.begin(); O != Owners.end(); ++O)
+ {
+ HashStringList const hsl = (*O)->GetExpectedHashes();
+ if (hsl.usable() == false)
+ continue;
+ if (superhsl.usable() == false)
+ superhsl = hsl;
+ else
+ {
+ // we merge both lists - if we find disagreement send no hashes
+ HashStringList::const_iterator hs = hsl.begin();
+ for (; hs != hsl.end(); ++hs)
+ if (superhsl.push_back(*hs) == false)
+ break;
+ if (hs != hsl.end())
+ {
+ superhsl.clear();
+ break;
+ }
+ }
+ }
+ return superhsl;
+}
+ /*}}}*/
+APT_PURE unsigned long long pkgAcquire::Queue::QItem::GetMaximumSize() const /*{{{*/
+{
+ unsigned long long Maximum = std::numeric_limits<unsigned long long>::max();
+ for (pkgAcquire::Queue::QItem::owner_iterator O = Owners.begin(); O != Owners.end(); ++O)
+ {
+ if ((*O)->FileSize == 0)
+ continue;
+ Maximum = std::min(Maximum, (*O)->FileSize);
+ }
+ if (Maximum == std::numeric_limits<unsigned long long>::max())
+ return 0;
+ return Maximum;
+}
+ /*}}}*/
+void pkgAcquire::Queue::QItem::SyncDestinationFiles() const /*{{{*/
+{
+ /* ensure that the first owner has the best partial file of all and
+ the rest have (potentially dangling) symlinks to it so that
+ everything (like progress reporting) finds it easily */
+ std::string superfile = Owner->DestFile;
+ off_t supersize = 0;
+ for (pkgAcquire::Queue::QItem::owner_iterator O = Owners.begin(); O != Owners.end(); ++O)
+ {
+ if ((*O)->DestFile == superfile)
+ continue;
+ struct stat file;
+ if (lstat((*O)->DestFile.c_str(),&file) == 0)
+ {
+ if ((file.st_mode & S_IFREG) == 0)
+ unlink((*O)->DestFile.c_str());
+ else if (supersize < file.st_size)
+ {
+ supersize = file.st_size;
+ unlink(superfile.c_str());
+ rename((*O)->DestFile.c_str(), superfile.c_str());
+ }
+ else
+ unlink((*O)->DestFile.c_str());
+ if (symlink(superfile.c_str(), (*O)->DestFile.c_str()) != 0)
+ {
+ ; // not a problem per-se and no real alternative
+ }
+ }
+ }
+}
+ /*}}}*/
+std::string pkgAcquire::Queue::QItem::Custom600Headers() const /*{{{*/
+{
+ /* The others are relatively easy to merge, but this one?
+ Lets not merge and see how far we can run with it…
+ Likely, nobody will ever notice as all the items will
+ be of the same class and hence generate the same headers. */
+ return Owner->Custom600Headers();
+}
+ /*}}}*/
+
// AcquireStatus::pkgAcquireStatus - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -914,9 +1008,9 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner)
{
CurrentBytes += I->CurrentSize;
ResumeSize += I->ResumePoint;
-
+
// Files with unknown size always have 100% completion
- if (I->CurrentItem->Owner->FileSize == 0 &&
+ if (I->CurrentItem->Owner->FileSize == 0 &&
I->CurrentItem->Owner->Complete == false)
TotalBytes += I->CurrentSize;
}
diff --git a/apt-pkg/acquire.h b/apt-pkg/acquire.h
index fc90624e1..02031dafd 100644
--- a/apt-pkg/acquire.h
+++ b/apt-pkg/acquire.h
@@ -68,9 +68,10 @@
#include <apt-pkg/macros.h>
#include <apt-pkg/weakptr.h>
+#include <apt-pkg/hashes.h>
-#include <vector>
#include <string>
+#include <vector>
#include <stddef.h>
#include <sys/time.h>
@@ -390,13 +391,13 @@ class pkgAcquire
*/
struct pkgAcquire::ItemDesc : public WeakPointable
{
- /** \brief The URI from which to download this item. */
+ /** \brief URI from which to download this item. */
std::string URI;
- /** brief A description of this item. */
+ /** \brief description of this item. */
std::string Description;
- /** brief A shorter description of this item. */
+ /** \brief shorter description of this item. */
std::string ShortDesc;
- /** brief The underlying item which is to be downloaded. */
+ /** \brief underlying item which is to be downloaded. */
Item *Owner;
};
/*}}}*/
@@ -419,13 +420,26 @@ class pkgAcquire::Queue
protected:
/** \brief A single item placed in this queue. */
- struct QItem : pkgAcquire::ItemDesc
+ struct QItem : public WeakPointable
{
/** \brief The next item in the queue. */
QItem *Next;
/** \brief The worker associated with this item, if any. */
pkgAcquire::Worker *Worker;
+ /** \brief The URI from which to download this item. */
+ std::string URI;
+ /** \brief A description of this item. */
+ std::string Description;
+ /** \brief A shorter description of this item. */
+ std::string ShortDesc;
+ /** \brief The underlying items interested in the download */
+ std::vector<Item*> Owners;
+ // both, backward compatibility and easy access as syncing is interal
+ Item * Owner;
+
+ typedef std::vector<Item*>::const_iterator owner_iterator;
+
/** \brief Assign the ItemDesc portion of this QItem from
* another ItemDesc
*/
@@ -434,10 +448,24 @@ class pkgAcquire::Queue
URI = I.URI;
Description = I.Description;
ShortDesc = I.ShortDesc;
+ Owners.clear();
+ Owners.push_back(I.Owner);
Owner = I.Owner;
};
+
+ /** @return the sum of all expected hashes by all owners */
+ HashStringList GetExpectedHashes() const;
+
+ /** @return smallest maximum size of all owners */
+ unsigned long long GetMaximumSize() const;
+
+ /** \brief get partial files in order */
+ void SyncDestinationFiles() const;
+
+ /** @return the custom headers to use for this item */
+ std::string Custom600Headers() const;
};
-
+
/** \brief The name of this queue. */
std::string Name;
@@ -590,7 +618,7 @@ class pkgAcquire::UriIterator
}
};
- inline pkgAcquire::ItemDesc const *operator ->() const {return CurItem;};
+ inline pkgAcquire::Queue::QItem const *operator ->() const {return CurItem;};
inline bool operator !=(UriIterator const &rhs) const {return rhs.CurQ != CurQ || rhs.CurItem != CurItem;};
inline bool operator ==(UriIterator const &rhs) const {return rhs.CurQ == CurQ && rhs.CurItem == CurItem;};
diff --git a/test/integration/test-acquire-same-file-multiple-times b/test/integration/test-acquire-same-file-multiple-times
new file mode 100755
index 000000000..d329a39cb
--- /dev/null
+++ b/test/integration/test-acquire-same-file-multiple-times
@@ -0,0 +1,80 @@
+#!/bin/sh
+set -e
+
+TESTDIR=$(readlink -f $(dirname $0))
+. $TESTDIR/framework
+setupenvironment
+configarchitecture 'amd64'
+
+TESTFILE="$TESTDIR/framework"
+cp $TESTFILE aptarchive/foo
+APTARCHIVE="$(readlink -f ./aptarchive)"
+
+filedown() {
+ msgtest 'Downloading the same URI twice over file' "$1"
+ testsuccess --nomsg apthelper download-file file:///$APTARCHIVE/foo ./downloaded/foo1 '' file:///$APTARCHIVE/foo ./downloaded/foo2 '' -o Debug::pkgAcquire::Worker=1
+ cp rootdir/tmp/testsuccess.output download.log
+ #cat download.log
+ testsuccess cmp $TESTFILE ./downloaded/foo1
+ testsuccess cmp ./downloaded/foo1 ./downloaded/foo2
+ #testequal '1' grep -c '200%20URI%20Start' ./download.log
+ testequal '1' grep -c '201%20URI%20Done' ./download.log
+ rm -f ./downloaded/foo1 ./downloaded/foo2
+}
+
+testrun() {
+ $1 'no partial'
+ cp $TESTFILE ./downloaded/foo1
+ $1 'complete partial 1'
+ cp $TESTFILE ./downloaded/foo2
+ $1 'complete partial 2'
+ cp $TESTFILE ./downloaded/foo1
+ cp $TESTFILE ./downloaded/foo2
+ $1 'complete partial 1+2'
+ dd if=$TESTFILE of=./downloaded/foo1 bs=500 count=1 2>/dev/null
+ $1 'partial partial 1'
+ dd if=$TESTFILE of=./downloaded/foo2 bs=500 count=1 2>/dev/null
+ $1 'partial partial 2'
+ dd if=$TESTFILE of=./downloaded/foo1 bs=500 count=1 2>/dev/null
+ dd if=$TESTFILE of=./downloaded/foo2 bs=500 count=1 2>/dev/null
+ $1 'partial partial 1+2'
+}
+testrun 'filedown'
+
+changetowebserver -o aptwebserver::redirect::replace::/foo2=/foo
+
+httpdown() {
+ msgtest 'Downloading the same URI to different files' 'twice over http'
+ testsuccess --nomsg apthelper download-file http://localhost:8080/foo ./downloaded/foo1 '' http://localhost:8080/foo ./downloaded/foo2 '' -o Debug::pkgAcquire::Worker=1
+ cp rootdir/tmp/testsuccess.output download.log
+ testsuccess cmp $TESTDIR/framework ./downloaded/foo1
+ testsuccess cmp ./downloaded/foo1 ./downloaded/foo2
+ testequal '1' grep -c '200%20URI%20Start' ./download.log
+ testequal '1' grep -c '201%20URI%20Done' ./download.log
+ rm -f ./downloaded/foo1 ./downloaded/foo2
+}
+testrun 'httpdown'
+
+httpredirectdown() {
+ msgtest 'Redirect leads' 'first URI to the second URI'
+ testsuccess --nomsg apthelper download-file http://localhost:8080/foo2 ./downloaded/foo1 '' http://localhost:8080/foo ./downloaded/foo2 '' -o Debug::pkgAcquire::Worker=1
+ cp rootdir/tmp/testsuccess.output download.log
+ testsuccess cmp $TESTDIR/framework ./downloaded/foo1
+ testsuccess cmp ./downloaded/foo1 ./downloaded/foo2
+ testequal '1' grep -c '200%20URI%20Start' ./download.log
+ testequal '1' grep -c '103%20Redirect' ./download.log
+ testequal '1' grep -c '201%20URI%20Done' ./download.log
+ rm -f ./downloaded/foo1 ./downloaded/foo2
+}
+testrun 'httpredirectdown'
+
+httpsamedown() {
+ msgtest 'Downloading two files via the same URI to' 'the same file'
+ testsuccess --nomsg apthelper download-file http://localhost:8080/foo ./downloaded/foo1 '' http://localhost:8080/foo ./downloaded/foo1 '' -o Debug::pkgAcquire::Worker=1
+ cp rootdir/tmp/testsuccess.output download.log
+ testsuccess cmp $TESTDIR/framework ./downloaded/foo1
+ testequal '1' grep -c '200%20URI%20Start' ./download.log
+ testequal '1' grep -c '201%20URI%20Done' ./download.log
+ rm -f ./downloaded/foo1
+}
+testrun 'httpsamedown'
diff --git a/test/integration/test-apt-get-download b/test/integration/test-apt-get-download
index 6503bbd1c..fa0b65672 100755
--- a/test/integration/test-apt-get-download
+++ b/test/integration/test-apt-get-download
@@ -38,7 +38,7 @@ testdownload() {
APT="${APT}/${3}"
fi
msgtest "Test download of package file $1 with" "$APT"
- testsuccess --nomsg aptget download ${APT}
+ testsuccess --nomsg aptget download ${APT} -o Debug::pkgAcquire::Worker=1 -o Debug::pkgAcquire::Auth=1
testsuccess test -f "$1"
rm -f "$1"
}