summaryrefslogtreecommitdiff
path: root/apt-pkg/acquire-item.cc
diff options
context:
space:
mode:
authorMichael Vogt <mvo@ubuntu.com>2014-10-06 17:42:39 +0200
committerMichael Vogt <mvo@ubuntu.com>2014-10-06 17:42:39 +0200
commita2d40703e4a5590a689ace4466f92e590434944d (patch)
tree5e878fcc11eb94d96c65940ef3d30e922f217950 /apt-pkg/acquire-item.cc
parentffd2dd93a640b47663ebdccc4fda00b426b3db71 (diff)
parent00a06b8eb82cf930511fc003bd16d7034e5a0cb5 (diff)
make http size check work
Diffstat (limited to 'apt-pkg/acquire-item.cc')
-rw-r--r--apt-pkg/acquire-item.cc670
1 files changed, 469 insertions, 201 deletions
diff --git a/apt-pkg/acquire-item.cc b/apt-pkg/acquire-item.cc
index 70d03b845..779f828d3 100644
--- a/apt-pkg/acquire-item.cc
+++ b/apt-pkg/acquire-item.cc
@@ -64,6 +64,10 @@ static void printHashSumComparision(std::string const &URI, HashStringList const
/*}}}*/
// Acquire::Item::Item - Constructor /*{{{*/
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
pkgAcquire::Item::Item(pkgAcquire *Owner, HashStringList const &ExpectedHashes) :
Owner(Owner), FileSize(0), PartialSize(0), Mode(0), ID(0), Complete(false),
Local(false), QueueCounter(0), ExpectedAdditionalItems(0),
@@ -72,6 +76,9 @@ pkgAcquire::Item::Item(pkgAcquire *Owner, HashStringList const &ExpectedHashes)
Owner->Add(this);
Status = StatIdle;
}
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
/*}}}*/
// Acquire::Item::~Item - Destructor /*{{{*/
// ---------------------------------------------------------------------
@@ -369,7 +376,7 @@ pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner,
Desc.URI = Target->URI + ".diff/Index";
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
- DestFile += URItoFileName(Target->URI) + string(".DiffIndex");
+ DestFile += URItoFileName(Desc.URI);
if(Debug)
std::clog << "pkgAcqDiffIndex: " << Desc.URI << std::endl;
@@ -385,9 +392,7 @@ pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner,
Desc.URI.substr(0,strlen("file:/")) == "file:/")
{
// we don't have a pkg file or we don't want to queue
- if(Debug)
- std::clog << "No index file, local or canceld by user" << std::endl;
- Failed("", NULL);
+ Failed("No index file, local or canceld by user", NULL);
return;
}
@@ -405,7 +410,7 @@ pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner,
string pkgAcqDiffIndex::Custom600Headers() const
{
string Final = _config->FindDir("Dir::State::lists");
- Final += URItoFileName(RealURI) + string(".IndexDiff");
+ Final += URItoFileName(Desc.URI);
if(Debug)
std::clog << "Custom600Header-IMS: " << Final << std::endl;
@@ -419,156 +424,264 @@ string pkgAcqDiffIndex::Custom600Headers() const
/*}}}*/
bool pkgAcqDiffIndex::ParseDiffIndex(string IndexDiffFile) /*{{{*/
{
+ // failing here is fine: our caller will take care of trying to
+ // get the complete file if patching fails
if(Debug)
std::clog << "pkgAcqDiffIndex::ParseIndexDiff() " << IndexDiffFile
<< std::endl;
- pkgTagSection Tags;
- string ServerSha1;
- vector<DiffInfo> available_patches;
-
FileFd Fd(IndexDiffFile,FileFd::ReadOnly);
pkgTagFile TF(&Fd);
if (_error->PendingError() == true)
return false;
- if(TF.Step(Tags) == true)
+ pkgTagSection Tags;
+ if(unlikely(TF.Step(Tags) == false))
+ return false;
+
+ HashStringList ServerHashes;
+ unsigned long long ServerSize = 0;
+
+ for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
{
- bool found = false;
- DiffInfo d;
- string size;
+ std::string tagname = *type;
+ tagname.append("-Current");
+ std::string const tmp = Tags.FindS(tagname.c_str());
+ if (tmp.empty() == true)
+ continue;
- string const tmp = Tags.FindS("SHA1-Current");
+ string hash;
+ unsigned long long size;
std::stringstream ss(tmp);
- ss >> ServerSha1 >> size;
- unsigned long const ServerSize = atol(size.c_str());
+ ss >> hash >> size;
+ if (unlikely(hash.empty() == true))
+ continue;
+ if (unlikely(ServerSize != 0 && ServerSize != size))
+ continue;
+ ServerHashes.push_back(HashString(*type, hash));
+ ServerSize = size;
+ }
- FileFd fd(CurrentPackagesFile, FileFd::ReadOnly);
- SHA1Summation SHA1;
- SHA1.AddFD(fd);
- string const local_sha1 = SHA1.Result();
+ if (ServerHashes.usable() == false)
+ {
+ if (Debug == true)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Did not find a good hashsum in the index" << std::endl;
+ return false;
+ }
- if(local_sha1 == ServerSha1)
- {
- // we have the same sha1 as the server so we are done here
- if(Debug)
- std::clog << "Package file is up-to-date" << std::endl;
- // list cleanup needs to know that this file as well as the already
- // present index is ours, so we create an empty diff to save it for us
- new pkgAcqIndexDiffs(Owner, Target, ExpectedHashes, MetaIndexParser,
- ServerSha1, available_patches);
- return true;
- }
- else
+ if (ServerHashes != HashSums())
+ {
+ if (Debug == true)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Index has different hashes than parser, probably older, so fail pdiffing" << std::endl;
+ return false;
+ }
+
+ if (ServerHashes.VerifyFile(CurrentPackagesFile) == true)
+ {
+ // we have the same sha1 as the server so we are done here
+ if(Debug)
+ std::clog << "pkgAcqDiffIndex: Package file is up-to-date" << std::endl;
+ // list cleanup needs to know that this file as well as the already
+ // present index is ours, so we create an empty diff to save it for us
+ new pkgAcqIndexDiffs(Owner, Target, ExpectedHashes, MetaIndexParser);
+ return true;
+ }
+
+ FileFd fd(CurrentPackagesFile, FileFd::ReadOnly);
+ Hashes LocalHashesCalc;
+ LocalHashesCalc.AddFD(fd);
+ HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
+ if(Debug)
+ std::clog << "Server-Current: " << ServerHashes.find(NULL)->toStr() << " and we start at "
+ << fd.Name() << " " << fd.FileSize() << " " << LocalHashes.find(NULL)->toStr() << std::endl;
+
+ // parse all of (provided) history
+ vector<DiffInfo> available_patches;
+ bool firstAcceptedHashes = true;
+ for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
+ {
+ if (LocalHashes.find(*type) == NULL)
+ continue;
+
+ std::string tagname = *type;
+ tagname.append("-History");
+ std::string const tmp = Tags.FindS(tagname.c_str());
+ if (tmp.empty() == true)
+ continue;
+
+ string hash, filename;
+ unsigned long long size;
+ std::stringstream ss(tmp);
+
+ while (ss >> hash >> size >> filename)
{
- if(Debug)
- std::clog << "SHA1-Current: " << ServerSha1 << " and we start at "<< fd.Name() << " " << fd.Size() << " " << local_sha1 << std::endl;
+ if (unlikely(hash.empty() == true || filename.empty() == true))
+ continue;
- // check the historie and see what patches we need
- string const history = Tags.FindS("SHA1-History");
- std::stringstream hist(history);
- while(hist >> d.sha1 >> size >> d.file)
+ // see if we have a record for this file already
+ std::vector<DiffInfo>::iterator cur = available_patches.begin();
+ for (; cur != available_patches.end(); ++cur)
{
- // read until the first match is found
- // from that point on, we probably need all diffs
- if(d.sha1 == local_sha1)
- found=true;
- else if (found == false)
+ if (cur->file != filename || unlikely(cur->result_size != size))
continue;
-
- if(Debug)
- std::clog << "Need to get diff: " << d.file << std::endl;
- available_patches.push_back(d);
+ cur->result_hashes.push_back(HashString(*type, hash));
+ break;
}
-
- if (available_patches.empty() == false)
+ if (cur != available_patches.end())
+ continue;
+ if (firstAcceptedHashes == true)
{
- // patching with too many files is rather slow compared to a fast download
- unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
- if (fileLimit != 0 && fileLimit < available_patches.size())
- {
- if (Debug)
- std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
- << ") so fallback to complete download" << std::endl;
- return false;
- }
-
- // see if the patches are too big
- found = false; // it was true and it will be true again at the end
- d = *available_patches.begin();
- string const firstPatch = d.file;
- unsigned long patchesSize = 0;
- std::stringstream patches(Tags.FindS("SHA1-Patches"));
- while(patches >> d.sha1 >> size >> d.file)
- {
- if (firstPatch == d.file)
- found = true;
- else if (found == false)
- continue;
-
- patchesSize += atol(size.c_str());
- }
- unsigned long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100);
- if (sizeLimit > 0 && (sizeLimit/100) < patchesSize)
- {
- if (Debug)
- std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100
- << ") so fallback to complete download" << std::endl;
- return false;
- }
+ DiffInfo next;
+ next.file = filename;
+ next.result_hashes.push_back(HashString(*type, hash));
+ next.result_size = size;
+ next.patch_size = 0;
+ available_patches.push_back(next);
+ }
+ else
+ {
+ if (Debug == true)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
+ << " wasn't in the list for the first parsed hash! (history)" << std::endl;
+ break;
}
}
+ firstAcceptedHashes = false;
+ }
+
+ if (unlikely(available_patches.empty() == true))
+ {
+ if (Debug)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
+ << "Couldn't find any patches for the patch series." << std::endl;
+ return false;
+ }
+
+ for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
+ {
+ if (LocalHashes.find(*type) == NULL)
+ continue;
+
+ std::string tagname = *type;
+ tagname.append("-Patches");
+ std::string const tmp = Tags.FindS(tagname.c_str());
+ if (tmp.empty() == true)
+ continue;
- // we have something, queue the next diff
- if(found)
+ string hash, filename;
+ unsigned long long size;
+ std::stringstream ss(tmp);
+
+ while (ss >> hash >> size >> filename)
{
- // queue the diffs
- string::size_type const last_space = Description.rfind(" ");
- if(last_space != string::npos)
- Description.erase(last_space, Description.size()-last_space);
-
- /* decide if we should download patches one by one or in one go:
- The first is good if the server merges patches, but many don't so client
- based merging can be attempt in which case the second is better.
- "bad things" will happen if patches are merged on the server,
- but client side merging is attempt as well */
- bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true);
- if (pdiff_merge == true)
- {
- // reprepro adds this flag if it has merged patches on the server
- std::string const precedence = Tags.FindS("X-Patch-Precedence");
- pdiff_merge = (precedence != "merged");
- }
+ if (unlikely(hash.empty() == true || filename.empty() == true))
+ continue;
- if (pdiff_merge == false)
- {
- new pkgAcqIndexDiffs(Owner, Target, ExpectedHashes, MetaIndexParser,
- ServerSha1, available_patches);
- }
- else
+ // see if we have a record for this file already
+ std::vector<DiffInfo>::iterator cur = available_patches.begin();
+ for (; cur != available_patches.end(); ++cur)
{
- std::vector<pkgAcqIndexMergeDiffs*> *diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size());
- for(size_t i = 0; i < available_patches.size(); ++i)
- (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, Target,
- ExpectedHashes,
- MetaIndexParser,
- available_patches[i],
- diffs);
+ if (cur->file != filename)
+ continue;
+ if (unlikely(cur->patch_size != 0 && cur->patch_size != size))
+ continue;
+ cur->patch_hashes.push_back(HashString(*type, hash));
+ cur->patch_size = size;
+ break;
}
-
- Complete = false;
- Status = StatDone;
- Dequeue();
- return true;
+ if (cur != available_patches.end())
+ continue;
+ if (Debug == true)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
+ << " wasn't in the list for the first parsed hash! (patches)" << std::endl;
+ break;
}
}
-
- // Nothing found, report and return false
- // Failing here is ok, if we return false later, the full
- // IndexFile is queued
- if(Debug)
- std::clog << "Can't find a patch in the index file" << std::endl;
- return false;
+
+ bool foundStart = false;
+ for (std::vector<DiffInfo>::iterator cur = available_patches.begin();
+ cur != available_patches.end(); ++cur)
+ {
+ if (LocalHashes != cur->result_hashes)
+ continue;
+
+ available_patches.erase(available_patches.begin(), cur);
+ foundStart = true;
+ break;
+ }
+
+ if (foundStart == false || unlikely(available_patches.empty() == true))
+ {
+ if (Debug)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
+ << "Couldn't find the start of the patch series." << std::endl;
+ return false;
+ }
+
+ // patching with too many files is rather slow compared to a fast download
+ unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
+ if (fileLimit != 0 && fileLimit < available_patches.size())
+ {
+ if (Debug)
+ std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
+ << ") so fallback to complete download" << std::endl;
+ return false;
+ }
+
+ // calculate the size of all patches we have to get
+ // note that all sizes are uncompressed, while we download compressed files
+ unsigned long long patchesSize = 0;
+ for (std::vector<DiffInfo>::const_iterator cur = available_patches.begin();
+ cur != available_patches.end(); ++cur)
+ patchesSize += cur->patch_size;
+ unsigned long long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100);
+ if (false && sizeLimit > 0 && (sizeLimit/100) < patchesSize)
+ {
+ if (Debug)
+ std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100
+ << ") so fallback to complete download" << std::endl;
+ return false;
+ }
+
+ // we have something, queue the diffs
+ string::size_type const last_space = Description.rfind(" ");
+ if(last_space != string::npos)
+ Description.erase(last_space, Description.size()-last_space);
+
+ /* decide if we should download patches one by one or in one go:
+ The first is good if the server merges patches, but many don't so client
+ based merging can be attempt in which case the second is better.
+ "bad things" will happen if patches are merged on the server,
+ but client side merging is attempt as well */
+ bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true);
+ if (pdiff_merge == true)
+ {
+ // reprepro adds this flag if it has merged patches on the server
+ std::string const precedence = Tags.FindS("X-Patch-Precedence");
+ pdiff_merge = (precedence != "merged");
+ }
+
+ if (pdiff_merge == false)
+ {
+ new pkgAcqIndexDiffs(Owner, Target, ExpectedHashes, MetaIndexParser,
+ available_patches);
+ }
+ else
+ {
+ std::vector<pkgAcqIndexMergeDiffs*> *diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size());
+ for(size_t i = 0; i < available_patches.size(); ++i)
+ (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, Target,
+ ExpectedHashes,
+ MetaIndexParser,
+ available_patches[i],
+ diffs);
+ }
+
+ Complete = false;
+ Status = StatDone;
+ Dequeue();
+ return true;
}
/*}}}*/
void pkgAcqDiffIndex::Failed(string Message,pkgAcquire::MethodConfig * /*Cnf*/)/*{{{*/
@@ -606,7 +719,7 @@ void pkgAcqDiffIndex::Done(string Message,unsigned long long Size,HashStringList
DestFile = FinalFile;
if(!ParseDiffIndex(DestFile))
- return Failed("", NULL);
+ return Failed("Parsing pdiff Index failed", NULL);
Complete = true;
Status = StatDone;
@@ -623,12 +736,10 @@ pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire *Owner,
struct IndexTarget const * const Target,
HashStringList const &ExpectedHashes,
indexRecords *MetaIndexParser,
- string ServerSha1,
vector<DiffInfo> diffs)
: pkgAcqBaseIndex(Owner, Target, ExpectedHashes, MetaIndexParser),
- available_patches(diffs), ServerSha1(ServerSha1)
+ available_patches(diffs)
{
-
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
DestFile += URItoFileName(Target->URI);
@@ -703,15 +814,21 @@ bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/
FinalFile += URItoFileName(RealURI);
FileFd fd(FinalFile, FileFd::ReadOnly);
- SHA1Summation SHA1;
- SHA1.AddFD(fd);
- string local_sha1 = string(SHA1.Result());
+ Hashes LocalHashesCalc;
+ LocalHashesCalc.AddFD(fd);
+ HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
if(Debug)
- std::clog << "QueueNextDiff: "
- << FinalFile << " (" << local_sha1 << ")"<<std::endl;
+ std::clog << "QueueNextDiff: " << FinalFile << " (" << LocalHashes.find(NULL)->toStr() << ")" << std::endl;
+
+ if (unlikely(LocalHashes.usable() == false || ExpectedHashes.usable() == false))
+ {
+ Failed("Local/Expected hashes are not usable", NULL);
+ return false;
+ }
// final file reached before all patches are applied
- if(local_sha1 == ServerSha1)
+ if(LocalHashes == ExpectedHashes)
{
Finish(true);
return true;
@@ -719,10 +836,10 @@ bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/
// remove all patches until the next matching patch is found
// this requires the Index file to be ordered
- for(vector<DiffInfo>::iterator I=available_patches.begin();
+ for(vector<DiffInfo>::iterator I = available_patches.begin();
available_patches.empty() == false &&
I != available_patches.end() &&
- I->sha1 != local_sha1;
+ I->result_hashes != LocalHashes;
++I)
{
available_patches.erase(I);
@@ -731,7 +848,7 @@ bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/
// error checking and falling back if no patch was found
if(available_patches.empty() == true)
{
- Failed("", NULL);
+ Failed("No patches left to reach target", NULL);
return false;
}
@@ -743,7 +860,7 @@ bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/
if(Debug)
std::clog << "pkgAcqIndexDiffs::QueueNextDiff(): " << Desc.URI << std::endl;
-
+
QueueURI(Desc);
return true;
@@ -763,6 +880,17 @@ void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size, HashStringLi
// success in downloading a diff, enter ApplyDiff state
if(State == StateFetchDiff)
{
+ FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip);
+ class Hashes LocalHashesCalc;
+ LocalHashesCalc.AddFD(fd);
+ HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
+ if (fd.Size() != available_patches[0].patch_size ||
+ available_patches[0].patch_hashes != LocalHashes)
+ {
+ Failed("Patch has Size/Hashsum mismatch", NULL);
+ return;
+ }
// rred excepts the patch as $FinalFile.ed
Rename(DestFile,FinalFile+".ed");
@@ -774,7 +902,15 @@ void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size, HashStringLi
Local = true;
Desc.URI = "rred:" + FinalFile;
QueueURI(Desc);
+ ActiveSubprocess = "rred";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
Mode = "rred";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
return;
}
@@ -799,7 +935,7 @@ void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size, HashStringLi
if(available_patches.empty() == false) {
new pkgAcqIndexDiffs(Owner, Target,
ExpectedHashes, MetaIndexParser,
- ServerSha1, available_patches);
+ available_patches);
return Finish();
} else
return Finish(true);
@@ -871,6 +1007,17 @@ void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStri
if (State == StateFetchDiff)
{
+ FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip);
+ class Hashes LocalHashesCalc;
+ LocalHashesCalc.AddFD(fd);
+ HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
+ if (fd.Size() != patch.patch_size || patch.patch_hashes != LocalHashes)
+ {
+ Failed("Patch has Size/Hashsum mismatch", NULL);
+ return;
+ }
+
// rred expects the patch as $FinalFile.ed.$patchname.gz
Rename(DestFile, FinalFile + ".ed." + patch.file + ".gz");
@@ -894,7 +1041,15 @@ void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStri
Local = true;
Desc.URI = "rred:" + FinalFile;
QueueURI(Desc);
+ ActiveSubprocess = "rred";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
Mode = "rred";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
return;
}
// success in download/apply all diffs, clean up
@@ -952,8 +1107,6 @@ pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner,
}
CompressionExtension = comprExt;
- Verify = true;
-
Init(URI, URIDesc, ShortDesc);
}
pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner, IndexTarget const *Target,
@@ -979,13 +1132,6 @@ pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner, IndexTarget const *Target,
if (CompressionExtension.empty() == false)
CompressionExtension.erase(CompressionExtension.end()-1);
- // only verify non-optional targets, see acquire-item.h for a FIXME
- // to make this more flexible
- if (Target->IsOptional())
- Verify = false;
- else
- Verify = true;
-
Init(Target->URI, Target->Description, Target->ShortDesc);
}
/*}}}*/
@@ -1008,6 +1154,7 @@ void pkgAcqIndex::Init(string const &URI, string const &URIDesc, string const &S
else
{
Desc.URI = URI + '.' + comprExt;
+ DestFile = DestFile + '.' + comprExt;
if(Target)
MetaKey = string(Target->MetaKey) + '.' + comprExt;
}
@@ -1018,6 +1165,8 @@ void pkgAcqIndex::Init(string const &URI, string const &URIDesc, string const &S
indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey);
if(Record)
FileSize = Record->Size;
+
+ InitByHashIfNeeded(MetaKey);
}
Desc.Description = URIDesc;
@@ -1027,21 +1176,51 @@ void pkgAcqIndex::Init(string const &URI, string const &URIDesc, string const &S
QueueURI(Desc);
}
/*}}}*/
+// AcqIndex::AdjustForByHash - modify URI for by-hash support /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+void pkgAcqIndex::InitByHashIfNeeded(const std::string MetaKey)
+{
+ // TODO:
+ // - (maybe?) add support for by-hash into the sources.list as flag
+ // - make apt-ftparchive generate the hashes (and expire?)
+ std::string HostKnob = "APT::Acquire::" + ::URI(Desc.URI).Host + "::By-Hash";
+ if(_config->FindB("APT::Acquire::By-Hash", false) == true ||
+ _config->FindB(HostKnob, false) == true ||
+ MetaIndexParser->GetSupportsAcquireByHash())
+ {
+ indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey);
+ if(Record)
+ {
+ // FIXME: should we really use the best hash here? or a fixed one?
+ const HashString *TargetHash = Record->Hashes.find("");
+ std::string ByHash = "/by-hash/" + TargetHash->HashType() + "/" + TargetHash->HashValue();
+ size_t trailing_slash = Desc.URI.find_last_of("/");
+ Desc.URI = Desc.URI.replace(
+ trailing_slash,
+ Desc.URI.substr(trailing_slash+1).size()+1,
+ ByHash);
+ } else {
+ _error->Warning(
+ "Fetching ByHash requested but can not find record for %s",
+ MetaKey.c_str());
+ }
+ }
+}
+ /*}}}*/
// AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
/* The only header we use is the last-modified header. */
string pkgAcqIndex::Custom600Headers() const
{
+ std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' '));
string Final = _config->FindDir("Dir::State::lists");
Final += URItoFileName(RealURI);
if (_config->FindB("Acquire::GzipIndexes",false))
- Final += ".gz";
+ Final += compExt;
string msg = "\nIndex-File: true";
- // FIXME: this really should use "IndexTarget::IsOptional()" but that
- // seems to be difficult without breaking ABI
- if (ShortDesc().find("Translation") != 0)
- msg += "\nFail-Ignore: true";
+
struct stat Buf;
if (stat(Final.c_str(),&Buf) == 0)
msg += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
@@ -1069,6 +1248,31 @@ void pkgAcqIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{*/
Item::Failed(Message,Cnf);
}
/*}}}*/
+// pkgAcqIndex::GetFinalFilename - Return the full final file path /*{{{*/
+std::string pkgAcqIndex::GetFinalFilename(std::string const &URI,
+ std::string const &compExt)
+{
+ std::string FinalFile = _config->FindDir("Dir::State::lists");
+ FinalFile += URItoFileName(URI);
+ if (_config->FindB("Acquire::GzipIndexes",false) == true)
+ FinalFile += '.' + compExt;
+ return FinalFile;
+}
+ /*}}}*/
+// AcqIndex::ReverifyAfterIMS - Reverify index after an ims-hit /*{{{*/
+void pkgAcqIndex::ReverifyAfterIMS(std::string const &FileName)
+{
+ std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' '));
+ if (_config->FindB("Acquire::GzipIndexes",false) == true)
+ DestFile += compExt;
+
+ string FinalFile = GetFinalFilename(RealURI, compExt);
+ Rename(FinalFile, FileName);
+ Decompression = true;
+ Desc.URI = "copy:" + FileName;
+ QueueURI(Desc);
+}
+ /*}}}*/
// AcqIndex::Done - Finished a fetch /*{{{*/
// ---------------------------------------------------------------------
/* This goes through a number of states.. On the initial fetch the
@@ -1080,39 +1284,41 @@ void pkgAcqIndex::Done(string Message,unsigned long long Size,HashStringList con
pkgAcquire::MethodConfig *Cfg)
{
Item::Done(Message,Size,Hashes,Cfg);
+ std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' '));
if (Decompression == true)
{
if (ExpectedHashes.usable() && ExpectedHashes != Hashes)
{
+ Desc.URI = RealURI;
RenameOnError(HashSumMismatch);
printHashSumComparision(RealURI, ExpectedHashes, Hashes);
return;
}
- /* Verify the index file for correctness (all indexes must
- * have a Package field) (LP: #346386) (Closes: #627642) */
- if (Verify == true)
+ // FIXME: this can go away once we only ever download stuff that
+ // has a valid hash and we never do GET based probing
+ //
+ /* Always verify the index file for correctness (all indexes must
+ * have a Package field) (LP: #346386) (Closes: #627642)
+ */
+ FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Extension);
+ // Only test for correctness if the file is not empty (empty is ok)
+ if (fd.Size() > 0)
{
- FileFd fd(DestFile, FileFd::ReadOnly);
- // Only test for correctness if the file is not empty (empty is ok)
- if (fd.FileSize() > 0)
- {
- pkgTagSection sec;
- pkgTagFile tag(&fd);
-
- // all our current indexes have a field 'Package' in each section
- if (_error->PendingError() == true || tag.Step(sec) == false || sec.Exists("Package") == false)
- {
- RenameOnError(InvalidFormat);
- return;
- }
+ pkgTagSection sec;
+ pkgTagFile tag(&fd);
+
+ // all our current indexes have a field 'Package' in each section
+ if (_error->PendingError() == true || tag.Step(sec) == false || sec.Exists("Package") == false)
+ {
+ RenameOnError(InvalidFormat);
+ return;
}
}
// Done, move it into position
- string FinalFile = _config->FindDir("Dir::State::lists");
- FinalFile += URItoFileName(RealURI);
+ string FinalFile = GetFinalFilename(RealURI, compExt);
Rename(DestFile,FinalFile);
chmod(FinalFile.c_str(),0644);
@@ -1120,7 +1326,9 @@ void pkgAcqIndex::Done(string Message,unsigned long long Size,HashStringList con
will work OK */
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
DestFile += URItoFileName(RealURI);
-
+ if (_config->FindB("Acquire::GzipIndexes",false))
+ DestFile += '.' + compExt;
+
// Remove the compressed version.
if (Erase == true)
unlink(DestFile.c_str());
@@ -1135,15 +1343,20 @@ void pkgAcqIndex::Done(string Message,unsigned long long Size,HashStringList con
string FileName = LookupTag(Message,"Alt-Filename");
if (FileName.empty() == false)
{
- // The files timestamp matches
- if (StringToBool(LookupTag(Message,"Alt-IMS-Hit"),false) == true)
- return;
Decompression = true;
Local = true;
DestFile += ".decomp";
Desc.URI = "copy:" + FileName;
QueueURI(Desc);
+ ActiveSubprocess = "copy";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
Mode = "copy";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
return;
}
@@ -1154,33 +1367,44 @@ void pkgAcqIndex::Done(string Message,unsigned long long Size,HashStringList con
ErrorText = "Method gave a blank filename";
}
- std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' '));
-
- // The files timestamp matches
- if (StringToBool(LookupTag(Message,"IMS-Hit"),false) == true) {
- if (_config->FindB("Acquire::GzipIndexes",false) && compExt == "gz")
- // Update DestFile for .gz suffix so that the clean operation keeps it
- DestFile += ".gz";
- return;
- }
-
if (FileName == DestFile)
Erase = true;
else
Local = true;
-
+
+ // do not reverify cdrom sources as apt-cdrom may rewrite the Packages
+ // file when its doing the indexcopy
+ if (RealURI.substr(0,6) == "cdrom:" &&
+ StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
+ return;
+
+ // The files timestamp matches, for non-local URLs reverify the local
+ // file, for local file, uncompress again to ensure the hashsum is still
+ // matching the Release file
+ if (!Local && StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
+ {
+ // set destfile to the final destfile
+ if(_config->FindB("Acquire::GzipIndexes",false) == false)
+ {
+ DestFile = _config->FindDir("Dir::State::lists") + "partial/";
+ DestFile += URItoFileName(RealURI);
+ }
+
+ ReverifyAfterIMS(FileName);
+ return;
+ }
string decompProg;
- // If we enable compressed indexes and already have gzip, keep it
- if (_config->FindB("Acquire::GzipIndexes",false) && compExt == "gz" && !Local) {
- string FinalFile = _config->FindDir("Dir::State::lists");
- FinalFile += URItoFileName(RealURI) + ".gz";
- Rename(DestFile,FinalFile);
- chmod(FinalFile.c_str(),0644);
-
- // Update DestFile for .gz suffix so that the clean operation keeps it
- DestFile = _config->FindDir("Dir::State::lists") + "partial/";
- DestFile += URItoFileName(RealURI) + ".gz";
+ // If we enable compressed indexes, queue for hash verification
+ if (_config->FindB("Acquire::GzipIndexes",false))
+ {
+ DestFile = _config->FindDir("Dir::State::lists");
+ DestFile += URItoFileName(RealURI) + '.' + compExt;
+
+ Decompression = true;
+ Desc.URI = "copy:" + FileName;
+ QueueURI(Desc);
+
return;
}
@@ -1199,8 +1423,15 @@ void pkgAcqIndex::Done(string Message,unsigned long long Size,HashStringList con
Desc.URI = decompProg + ":" + FileName;
QueueURI(Desc);
- // FIXME: this points to a c++ string that goes out of scope
- Mode = decompProg.c_str();
+ ActiveSubprocess = decompProg;
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+ Mode = ActiveSubprocess.c_str();
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
}
/*}}}*/
// AcqIndexTrans::pkgAcqIndexTrans - Constructor /*{{{*/
@@ -1225,8 +1456,11 @@ pkgAcqIndexTrans::pkgAcqIndexTrans(pkgAcquire *Owner, IndexTarget const * const
// ---------------------------------------------------------------------
string pkgAcqIndexTrans::Custom600Headers() const
{
+ std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' '));
string Final = _config->FindDir("Dir::State::lists");
Final += URItoFileName(RealURI);
+ if (_config->FindB("Acquire::GzipIndexes",false))
+ Final += compExt;
struct stat Buf;
if (stat(Final.c_str(),&Buf) != 0)
@@ -1499,7 +1733,15 @@ void pkgAcqMetaIndex::Done(string Message,unsigned long long Size,HashStringList
AuthPass = true;
Desc.URI = "gpgv:" + SigFile;
QueueURI(Desc);
- Mode = "gpgv";
+ ActiveSubprocess = "gpgv";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+ Mode = "gpgv";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
return;
}
}
@@ -1580,6 +1822,32 @@ void pkgAcqMetaIndex::AuthDone(string Message) /*{{{*/
std::cerr << "Signature verification succeeded: "
<< DestFile << std::endl;
+ // do not trust any previously unverified content that we may have
+ string LastGoodSigFile = _config->FindDir("Dir::State::lists").append("partial/").append(URItoFileName(RealURI));
+ if (DestFile != SigFile)
+ LastGoodSigFile.append(".gpg");
+ LastGoodSigFile.append(".reverify");
+ if(IMSHit == false && RealFileExists(LastGoodSigFile) == false)
+ {
+ for (vector <struct IndexTarget*>::const_iterator Target = IndexTargets->begin();
+ Target != IndexTargets->end();
+ ++Target)
+ {
+ // remove old indexes
+ std::string index = _config->FindDir("Dir::State::lists") +
+ URItoFileName((*Target)->URI);
+ unlink(index.c_str());
+ // and also old gzipindexes
+ std::vector<std::string> types = APT::Configuration::getCompressionTypes();
+ for (std::vector<std::string>::const_iterator t = types.begin(); t != types.end(); ++t)
+ {
+ index += '.' + (*t);
+ unlink(index.c_str());
+ }
+ }
+ }
+
+
// Download further indexes with verification
QueueIndexes(true);