summaryrefslogtreecommitdiff
path: root/apt-pkg
diff options
context:
space:
mode:
authorMichael Vogt <mvo@debian.org>2014-02-27 22:52:34 +0100
committerMichael Vogt <mvo@debian.org>2014-02-27 22:52:34 +0100
commitfce69e7a0f38299c57ef96ae1c1dd9a5379bfd5a (patch)
treebe7d18baa836e9df166ec63f6c9fe6f94bb84b40 /apt-pkg
parenta5e790985752c6820e08e7a7e650e1607fa826e4 (diff)
parentfc104da6a583736223b2f941e43a05ea26b63a7d (diff)
Merge branch 'debian/sid' into debian/experimental
Conflicts: apt-private/private-list.cc configure.ac debian/apt.install.in debian/changelog
Diffstat (limited to 'apt-pkg')
-rw-r--r--apt-pkg/acquire-item.cc306
-rw-r--r--apt-pkg/acquire-item.h119
-rw-r--r--apt-pkg/acquire-worker.cc9
-rw-r--r--apt-pkg/acquire.cc6
-rw-r--r--apt-pkg/algorithms.cc322
-rw-r--r--apt-pkg/algorithms.h20
-rw-r--r--apt-pkg/aptconfiguration.cc16
-rw-r--r--apt-pkg/aptconfiguration.h10
-rw-r--r--apt-pkg/cacheiterators.h6
-rw-r--r--apt-pkg/cacheset.cc31
-rw-r--r--apt-pkg/cacheset.h6
-rw-r--r--apt-pkg/cdrom.cc155
-rw-r--r--apt-pkg/cdrom.h6
-rw-r--r--apt-pkg/clean.cc2
-rw-r--r--apt-pkg/contrib/cdromutl.cc4
-rw-r--r--apt-pkg/contrib/cmndline.cc3
-rw-r--r--apt-pkg/contrib/crc-16.cc2
-rw-r--r--apt-pkg/contrib/error.h2
-rw-r--r--apt-pkg/contrib/fileutl.cc192
-rw-r--r--apt-pkg/contrib/fileutl.h5
-rw-r--r--apt-pkg/contrib/gpgv.cc23
-rw-r--r--apt-pkg/contrib/gpgv.h2
-rw-r--r--apt-pkg/contrib/hashes.cc30
-rw-r--r--apt-pkg/contrib/hashes.h9
-rw-r--r--apt-pkg/contrib/hashsum.cc7
-rw-r--r--apt-pkg/contrib/macros.h2
-rw-r--r--apt-pkg/contrib/md5.h2
-rw-r--r--apt-pkg/contrib/mmap.cc8
-rw-r--r--apt-pkg/contrib/mmap.h4
-rw-r--r--apt-pkg/contrib/progress.h2
-rw-r--r--apt-pkg/contrib/sha2_internal.cc2
-rw-r--r--apt-pkg/contrib/strutl.cc66
-rw-r--r--apt-pkg/contrib/strutl.h33
-rw-r--r--apt-pkg/deb/deblistparser.cc8
-rw-r--r--apt-pkg/deb/debmetaindex.cc17
-rw-r--r--apt-pkg/deb/debmetaindex.h7
-rw-r--r--apt-pkg/deb/debsrcrecords.h6
-rw-r--r--apt-pkg/deb/debsystem.cc2
-rw-r--r--apt-pkg/deb/debversion.cc4
-rw-r--r--apt-pkg/deb/dpkgpm.cc725
-rw-r--r--apt-pkg/deb/dpkgpm.h32
-rw-r--r--apt-pkg/depcache.cc29
-rw-r--r--apt-pkg/depcache.h4
-rw-r--r--apt-pkg/edsp.h6
-rw-r--r--apt-pkg/indexfile.h8
-rw-r--r--apt-pkg/indexrecords.cc5
-rw-r--r--apt-pkg/indexrecords.h1
-rw-r--r--apt-pkg/init.cc12
-rw-r--r--apt-pkg/install-progress.cc375
-rw-r--r--apt-pkg/install-progress.h156
-rw-r--r--apt-pkg/makefile6
-rw-r--r--apt-pkg/metaindex.h24
-rw-r--r--apt-pkg/orderlist.cc8
-rw-r--r--apt-pkg/packagemanager.cc87
-rw-r--r--apt-pkg/packagemanager.h25
-rw-r--r--apt-pkg/pkgcache.cc4
-rw-r--r--apt-pkg/pkgcache.h2
-rw-r--r--apt-pkg/pkgsystem.h4
-rw-r--r--apt-pkg/policy.cc4
-rw-r--r--apt-pkg/sourcelist.cc141
-rw-r--r--apt-pkg/sourcelist.h10
-rw-r--r--apt-pkg/srcrecords.cc5
-rw-r--r--apt-pkg/tagfile.cc113
-rw-r--r--apt-pkg/tagfile.h2
-rw-r--r--apt-pkg/update.cc126
-rw-r--r--apt-pkg/update.h21
-rw-r--r--apt-pkg/upgrade.cc263
-rw-r--r--apt-pkg/upgrade.h30
68 files changed, 2615 insertions, 1069 deletions
diff --git a/apt-pkg/acquire-item.cc b/apt-pkg/acquire-item.cc
index 12000a8c1..230d5aba9 100644
--- a/apt-pkg/acquire-item.cc
+++ b/apt-pkg/acquire-item.cc
@@ -129,7 +129,7 @@ void pkgAcquire::Item::Done(string Message,unsigned long long Size,string Hash,
/*}}}*/
// Acquire::Item::Rename - Rename a file /*{{{*/
// ---------------------------------------------------------------------
-/* This helper function is used by alot of item methods as thier final
+/* This helper function is used by a lot of item methods as their final
step */
void pkgAcquire::Item::Rename(string From,string To)
{
@@ -143,6 +143,32 @@ void pkgAcquire::Item::Rename(string From,string To)
}
}
/*}}}*/
+bool pkgAcquire::Item::RenameOnError(pkgAcquire::Item::RenameOnErrorState const error)/*{{{*/
+{
+ if(FileExists(DestFile))
+ Rename(DestFile, DestFile + ".FAILED");
+
+ switch (error)
+ {
+ case HashSumMismatch:
+ ErrorText = _("Hash Sum mismatch");
+ Status = StatAuthError;
+ ReportMirrorFailure("HashChecksumFailure");
+ break;
+ case SizeMismatch:
+ ErrorText = _("Size mismatch");
+ Status = StatAuthError;
+ ReportMirrorFailure("SizeFailure");
+ break;
+ case InvalidFormat:
+ ErrorText = _("Invalid file format");
+ Status = StatError;
+ // do not report as usually its not the mirrors fault, but Portal/Proxy
+ break;
+ }
+ return false;
+}
+ /*}}}*/
// Acquire::Item::ReportMirrorFailure /*{{{*/
// ---------------------------------------------------------------------
void pkgAcquire::Item::ReportMirrorFailure(string FailCode)
@@ -273,7 +299,7 @@ void pkgAcqSubIndex::Done(string Message,unsigned long long Size,string Md5Hash,
return;
}
- // sucess in downloading the index
+ // success in downloading the index
// rename the index
if(Debug)
std::clog << "Renaming: " << DestFile << " -> " << FinalFile << std::endl;
@@ -301,7 +327,7 @@ bool pkgAcqSubIndex::ParseIndex(string const &IndexFile) /*{{{*/
/*}}}*/
// AcqDiffIndex::AcqDiffIndex - Constructor /*{{{*/
// ---------------------------------------------------------------------
-/* Get the DiffIndex file first and see if there are patches availabe
+/* Get the DiffIndex file first and see if there are patches available
* If so, create a pkgAcqIndexDiffs fetcher that will get and apply the
* patches. If anything goes wrong in that process, it will fall back to
* the original packages file
@@ -343,10 +369,10 @@ pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner,
return;
}
- if(Debug)
- std::clog << "pkgAcqIndexDiffs::pkgAcqIndexDiffs(): "
- << CurrentPackagesFile << std::endl;
-
+ if(Debug)
+ std::clog << "pkgAcqDiffIndex::pkgAcqDiffIndex(): "
+ << CurrentPackagesFile << std::endl;
+
QueueURI(Desc);
}
@@ -372,8 +398,8 @@ string pkgAcqDiffIndex::Custom600Headers()
bool pkgAcqDiffIndex::ParseDiffIndex(string IndexDiffFile) /*{{{*/
{
if(Debug)
- std::clog << "pkgAcqIndexDiffs::ParseIndexDiff() " << IndexDiffFile
- << std::endl;
+ std::clog << "pkgAcqDiffIndex::ParseIndexDiff() " << IndexDiffFile
+ << std::endl;
pkgTagSection Tags;
string ServerSha1;
@@ -400,16 +426,18 @@ bool pkgAcqDiffIndex::ParseDiffIndex(string IndexDiffFile) /*{{{*/
SHA1.AddFD(fd);
string const local_sha1 = SHA1.Result();
- if(local_sha1 == ServerSha1)
+ if(local_sha1 == ServerSha1)
{
- // we have the same sha1 as the server
+ // we have the same sha1 as the server so we are done here
if(Debug)
std::clog << "Package file is up-to-date" << std::endl;
- // set found to true, this will queue a pkgAcqIndexDiffs with
- // a empty availabe_patches
- found = true;
- }
- else
+ // list cleanup needs to know that this file as well as the already
+ // present index is ours, so we create an empty diff to save it for us
+ new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
+ ExpectedHash, ServerSha1, available_patches);
+ return true;
+ }
+ else
{
if(Debug)
std::clog << "SHA1-Current: " << ServerSha1 << " and we start at "<< fd.Name() << " " << fd.Size() << " " << local_sha1 << std::endl;
@@ -470,14 +498,37 @@ bool pkgAcqDiffIndex::ParseDiffIndex(string IndexDiffFile) /*{{{*/
}
// we have something, queue the next diff
- if(found)
+ if(found)
{
// queue the diffs
string::size_type const last_space = Description.rfind(" ");
if(last_space != string::npos)
Description.erase(last_space, Description.size()-last_space);
- new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
- ExpectedHash, ServerSha1, available_patches);
+
+ /* decide if we should download patches one by one or in one go:
+ The first is good if the server merges patches, but many don't so client
+ based merging can be attempt in which case the second is better.
+ "bad things" will happen if patches are merged on the server,
+ but client side merging is attempt as well */
+ bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true);
+ if (pdiff_merge == true)
+ {
+ // reprepro adds this flag if it has merged patches on the server
+ std::string const precedence = Tags.FindS("X-Patch-Precedence");
+ pdiff_merge = (precedence != "merged");
+ }
+
+ if (pdiff_merge == false)
+ new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
+ ExpectedHash, ServerSha1, available_patches);
+ else
+ {
+ std::vector<pkgAcqIndexMergeDiffs*> *diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size());
+ for(size_t i = 0; i < available_patches.size(); ++i)
+ (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, RealURI, Description, Desc.ShortDesc, ExpectedHash,
+ available_patches[i], diffs);
+ }
+
Complete = false;
Status = StatDone;
Dequeue();
@@ -497,7 +548,7 @@ void pkgAcqDiffIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{
{
if(Debug)
std::clog << "pkgAcqDiffIndex failed: " << Desc.URI << std::endl
- << "Falling back to normal index file aquire" << std::endl;
+ << "Falling back to normal index file acquire" << std::endl;
new pkgAcqIndex(Owner, RealURI, Description, Desc.ShortDesc,
ExpectedHash);
@@ -518,7 +569,7 @@ void pkgAcqDiffIndex::Done(string Message,unsigned long long Size,string Md5Hash
string FinalFile;
FinalFile = _config->FindDir("Dir::State::lists")+URItoFileName(RealURI);
- // sucess in downloading the index
+ // success in downloading the index
// rename the index
FinalFile += string(".IndexDiff");
if(Debug)
@@ -577,7 +628,7 @@ void pkgAcqIndexDiffs::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{
{
if(Debug)
std::clog << "pkgAcqIndexDiffs failed: " << Desc.URI << std::endl
- << "Falling back to normal index file aquire" << std::endl;
+ << "Falling back to normal index file acquire" << std::endl;
new pkgAcqIndex(Owner, RealURI, Description,Desc.ShortDesc,
ExpectedHash);
Finish();
@@ -595,9 +646,7 @@ void pkgAcqIndexDiffs::Finish(bool allDone)
if(!ExpectedHash.empty() && !ExpectedHash.VerifyFile(DestFile))
{
- Status = StatAuthError;
- ErrorText = _("MD5Sum mismatch");
- Rename(DestFile,DestFile + ".FAILED");
+ RenameOnError(HashSumMismatch);
Dequeue();
return;
}
@@ -684,7 +733,7 @@ void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size,string Md5Has
string FinalFile;
FinalFile = _config->FindDir("Dir::State::lists")+URItoFileName(RealURI);
- // sucess in downloading a diff, enter ApplyDiff state
+ // success in downloading a diff, enter ApplyDiff state
if(State == StateFetchDiff)
{
@@ -708,6 +757,7 @@ void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size,string Md5Has
{
// remove the just applied patch
available_patches.erase(available_patches.begin());
+ unlink((FinalFile + ".ed").c_str());
// move into place
if(Debug)
@@ -728,6 +778,131 @@ void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size,string Md5Has
}
}
/*}}}*/
+// AcqIndexMergeDiffs::AcqIndexMergeDiffs - Constructor /*{{{*/
+pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire *Owner,
+ string const &URI, string const &URIDesc,
+ string const &ShortDesc, HashString const &ExpectedHash,
+ DiffInfo const &patch,
+ std::vector<pkgAcqIndexMergeDiffs*> const * const allPatches)
+ : Item(Owner), RealURI(URI), ExpectedHash(ExpectedHash),
+ patch(patch),allPatches(allPatches), State(StateFetchDiff)
+{
+
+ DestFile = _config->FindDir("Dir::State::lists") + "partial/";
+ DestFile += URItoFileName(URI);
+
+ Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
+
+ Description = URIDesc;
+ Desc.Owner = this;
+ Desc.ShortDesc = ShortDesc;
+
+ Desc.URI = string(RealURI) + ".diff/" + patch.file + ".gz";
+ Desc.Description = Description + " " + patch.file + string(".pdiff");
+ DestFile = _config->FindDir("Dir::State::lists") + "partial/";
+ DestFile += URItoFileName(RealURI + ".diff/" + patch.file);
+
+ if(Debug)
+ std::clog << "pkgAcqIndexMergeDiffs: " << Desc.URI << std::endl;
+
+ QueueURI(Desc);
+}
+ /*}}}*/
+void pkgAcqIndexMergeDiffs::Failed(string Message,pkgAcquire::MethodConfig *Cnf)/*{{{*/
+{
+ if(Debug)
+ std::clog << "pkgAcqIndexMergeDiffs failed: " << Desc.URI << " with " << Message << std::endl;
+ Complete = false;
+ Status = StatDone;
+ Dequeue();
+
+ // check if we are the first to fail, otherwise we are done here
+ State = StateDoneDiff;
+ for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
+ I != allPatches->end(); ++I)
+ if ((*I)->State == StateErrorDiff)
+ return;
+
+ // first failure means we should fallback
+ State = StateErrorDiff;
+ std::clog << "Falling back to normal index file acquire" << std::endl;
+ new pkgAcqIndex(Owner, RealURI, Description,Desc.ShortDesc,
+ ExpectedHash);
+}
+ /*}}}*/
+void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,string Md5Hash, /*{{{*/
+ pkgAcquire::MethodConfig *Cnf)
+{
+ if(Debug)
+ std::clog << "pkgAcqIndexMergeDiffs::Done(): " << Desc.URI << std::endl;
+
+ Item::Done(Message,Size,Md5Hash,Cnf);
+
+ string const FinalFile = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
+
+ if (State == StateFetchDiff)
+ {
+ // rred expects the patch as $FinalFile.ed.$patchname.gz
+ Rename(DestFile, FinalFile + ".ed." + patch.file + ".gz");
+
+ // check if this is the last completed diff
+ State = StateDoneDiff;
+ for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
+ I != allPatches->end(); ++I)
+ if ((*I)->State != StateDoneDiff)
+ {
+ if(Debug)
+ std::clog << "Not the last done diff in the batch: " << Desc.URI << std::endl;
+ return;
+ }
+
+ // this is the last completed diff, so we are ready to apply now
+ State = StateApplyDiff;
+
+ if(Debug)
+ std::clog << "Sending to rred method: " << FinalFile << std::endl;
+
+ Local = true;
+ Desc.URI = "rred:" + FinalFile;
+ QueueURI(Desc);
+ Mode = "rred";
+ return;
+ }
+ // success in download/apply all diffs, clean up
+ else if (State == StateApplyDiff)
+ {
+ // see if we really got the expected file
+ if(!ExpectedHash.empty() && !ExpectedHash.VerifyFile(DestFile))
+ {
+ RenameOnError(HashSumMismatch);
+ return;
+ }
+
+ // move the result into place
+ if(Debug)
+ std::clog << "Moving patched file in place: " << std::endl
+ << DestFile << " -> " << FinalFile << std::endl;
+ Rename(DestFile, FinalFile);
+ chmod(FinalFile.c_str(), 0644);
+
+ // otherwise lists cleanup will eat the file
+ DestFile = FinalFile;
+
+ // ensure the ed's are gone regardless of list-cleanup
+ for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
+ I != allPatches->end(); ++I)
+ {
+ std::string patch = FinalFile + ".ed." + (*I)->patch.file + ".gz";
+ unlink(patch.c_str());
+ }
+
+ // all set and done
+ Complete = true;
+ if(Debug)
+ std::clog << "allDone: " << DestFile << "\n" << std::endl;
+ }
+}
+ /*}}}*/
// AcqIndex::AcqIndex - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* The package file is added to the queue and a second class is
@@ -866,10 +1041,7 @@ void pkgAcqIndex::Done(string Message,unsigned long long Size,string Hash,
if (!ExpectedHash.empty() && ExpectedHash.toStr() != Hash)
{
- Status = StatAuthError;
- ErrorText = _("Hash Sum mismatch");
- Rename(DestFile,DestFile + ".FAILED");
- ReportMirrorFailure("HashChecksumFailure");
+ RenameOnError(HashSumMismatch);
return;
}
@@ -878,22 +1050,18 @@ void pkgAcqIndex::Done(string Message,unsigned long long Size,string Hash,
if (Verify == true)
{
FileFd fd(DestFile, FileFd::ReadOnly);
- pkgTagSection sec;
- pkgTagFile tag(&fd);
-
- // Only test for correctness if the file is not empty (empty is ok)
- if (fd.Size() > 0) {
- if (_error->PendingError() || !tag.Step(sec)) {
- Status = StatError;
- _error->DumpErrors();
- Rename(DestFile,DestFile + ".FAILED");
- return;
- } else if (!sec.Exists("Package")) {
- Status = StatError;
- ErrorText = ("Encountered a section with no Package: header");
- Rename(DestFile,DestFile + ".FAILED");
- return;
- }
+ // Only test for correctness if the file is not empty (empty is ok)
+ if (fd.FileSize() > 0)
+ {
+ pkgTagSection sec;
+ pkgTagFile tag(&fd);
+
+ // all our current indexes have a field 'Package' in each section
+ if (_error->PendingError() == true || tag.Step(sec) == false || sec.Exists("Package") == false)
+ {
+ RenameOnError(InvalidFormat);
+ return;
+ }
}
}
@@ -984,6 +1152,8 @@ void pkgAcqIndex::Done(string Message,unsigned long long Size,string Hash,
DestFile += ".decomp";
Desc.URI = decompProg + ":" + FileName;
QueueURI(Desc);
+
+ // FIXME: this points to a c++ string that goes out of scope
Mode = decompProg.c_str();
}
/*}}}*/
@@ -1070,7 +1240,7 @@ pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire *Owner, /*{{{*/
if (RealFileExists(Final) == true)
{
// File was already in place. It needs to be re-downloaded/verified
- // because Release might have changed, we do give it a differnt
+ // because Release might have changed, we do give it a different
// name than DestFile because otherwise the http method will
// send If-Range requests and there are too many broken servers
// out there that do not understand them
@@ -1728,34 +1898,40 @@ pkgAcqArchive::pkgAcqArchive(pkgAcquire *Owner,pkgSourceList *Sources,
}
// check if we have one trusted source for the package. if so, switch
- // to "TrustedOnly" mode
+ // to "TrustedOnly" mode - but only if not in AllowUnauthenticated mode
+ bool const allowUnauth = _config->FindB("APT::Get::AllowUnauthenticated", false);
+ bool const debugAuth = _config->FindB("Debug::pkgAcquire::Auth", false);
+ bool seenUntrusted = false;
for (pkgCache::VerFileIterator i = Version.FileList(); i.end() == false; ++i)
{
pkgIndexFile *Index;
if (Sources->FindIndex(i.File(),Index) == false)
continue;
- if (_config->FindB("Debug::pkgAcquire::Auth", false))
- {
+
+ if (debugAuth == true)
std::cerr << "Checking index: " << Index->Describe()
- << "(Trusted=" << Index->IsTrusted() << ")\n";
- }
- if (Index->IsTrusted()) {
+ << "(Trusted=" << Index->IsTrusted() << ")" << std::endl;
+
+ if (Index->IsTrusted() == true)
+ {
Trusted = true;
- break;
+ if (allowUnauth == false)
+ break;
}
+ else
+ seenUntrusted = true;
}
// "allow-unauthenticated" restores apts old fetching behaviour
// that means that e.g. unauthenticated file:// uris are higher
// priority than authenticated http:// uris
- if (_config->FindB("APT::Get::AllowUnauthenticated",false) == true)
+ if (allowUnauth == true && seenUntrusted == true)
Trusted = false;
// Select a source
if (QueueNext() == false && _error->PendingError() == false)
- _error->Error(_("I wasn't able to locate a file for the %s package. "
- "This might mean you need to manually fix this package."),
- Version.ParentPkg().Name());
+ _error->Error(_("Can't find a source to download version '%s' of '%s'"),
+ Version.VerStr(), Version.ParentPkg().FullName(false).c_str());
}
/*}}}*/
// AcqArchive::QueueNext - Queue the next file source /*{{{*/
@@ -1856,7 +2032,7 @@ bool pkgAcqArchive::QueueNext()
return true;
}
- /* Hmm, we have a file and its size does not match, this shouldnt
+ /* Hmm, we have a file and its size does not match, this shouldn't
happen.. */
unlink(FinalFile.c_str());
}
@@ -1909,18 +2085,14 @@ void pkgAcqArchive::Done(string Message,unsigned long long Size,string CalcHash,
// Check the size
if (Size != Version->Size)
{
- Status = StatError;
- ErrorText = _("Size mismatch");
+ RenameOnError(SizeMismatch);
return;
}
// Check the hash
if(ExpectedHash.toStr() != CalcHash)
{
- Status = StatError;
- ErrorText = _("Hash Sum mismatch");
- if(FileExists(DestFile))
- Rename(DestFile,DestFile + ".FAILED");
+ RenameOnError(HashSumMismatch);
return;
}
@@ -2060,9 +2232,7 @@ void pkgAcqFile::Done(string Message,unsigned long long Size,string CalcHash,
// Check the hash
if(!ExpectedHash.empty() && ExpectedHash.toStr() != CalcHash)
{
- Status = StatError;
- ErrorText = _("Hash Sum mismatch");
- Rename(DestFile,DestFile + ".FAILED");
+ RenameOnError(HashSumMismatch);
return;
}
diff --git a/apt-pkg/acquire-item.h b/apt-pkg/acquire-item.h
index 10c855e63..5a1c7979c 100644
--- a/apt-pkg/acquire-item.h
+++ b/apt-pkg/acquire-item.h
@@ -83,7 +83,7 @@ class pkgAcquire::Item : public WeakPointable
* overwritten.
*/
void Rename(std::string From,std::string To);
-
+
public:
/** \brief The current status of this item. */
@@ -281,6 +281,21 @@ class pkgAcquire::Item : public WeakPointable
* pkgAcquire::Remove.
*/
virtual ~Item();
+
+ protected:
+
+ enum RenameOnErrorState {
+ HashSumMismatch,
+ SizeMismatch,
+ InvalidFormat
+ };
+
+ /** \brief Rename failed file and set error
+ *
+ * \param state respresenting the error we encountered
+ * \param errorMsg a message describing the error
+ */
+ bool RenameOnError(RenameOnErrorState const state);
};
/*}}}*/
/** \brief Information about an index patch (aka diff). */ /*{{{*/
@@ -414,7 +429,105 @@ class pkgAcqDiffIndex : public pkgAcquire::Item
std::string ShortDesc, HashString ExpectedHash);
};
/*}}}*/
-/** \brief An item that is responsible for fetching all the patches {{{
+/** \brief An item that is responsible for fetching client-merge patches {{{
+ * that need to be applied to a given package index file.
+ *
+ * Instead of downloading and applying each patch one by one like its
+ * sister #pkgAcqIndexDiffs this class will download all patches at once
+ * and call rred with all the patches downloaded once. Rred will then
+ * merge and apply them in one go, which should be a lot faster – but is
+ * incompatible with server-based merges of patches like reprepro can do.
+ *
+ * \sa pkgAcqDiffIndex, pkgAcqIndex
+ */
+class pkgAcqIndexMergeDiffs : public pkgAcquire::Item
+{
+ protected:
+
+ /** \brief If \b true, debugging output will be written to
+ * std::clog.
+ */
+ bool Debug;
+
+ /** \brief description of the item that is currently being
+ * downloaded.
+ */
+ pkgAcquire::ItemDesc Desc;
+
+ /** \brief URI of the package index file that is being
+ * reconstructed.
+ */
+ std::string RealURI;
+
+ /** \brief HashSum of the package index file that is being
+ * reconstructed.
+ */
+ HashString ExpectedHash;
+
+ /** \brief description of the file being downloaded. */
+ std::string Description;
+
+ /** \brief information about the current patch */
+ struct DiffInfo const patch;
+
+ /** \brief list of all download items for the patches */
+ std::vector<pkgAcqIndexMergeDiffs*> const * const allPatches;
+
+ /** The current status of this patch. */
+ enum DiffState
+ {
+ /** \brief The diff is currently being fetched. */
+ StateFetchDiff,
+
+ /** \brief The diff is currently being applied. */
+ StateApplyDiff,
+
+ /** \brief the work with this diff is done */
+ StateDoneDiff,
+
+ /** \brief something bad happened and fallback was triggered */
+ StateErrorDiff
+ } State;
+
+ public:
+ /** \brief Called when the patch file failed to be downloaded.
+ *
+ * This method will fall back to downloading the whole index file
+ * outright; its arguments are ignored.
+ */
+ virtual void Failed(std::string Message,pkgAcquire::MethodConfig *Cnf);
+
+ virtual void Done(std::string Message,unsigned long long Size,std::string Md5Hash,
+ pkgAcquire::MethodConfig *Cnf);
+ virtual std::string DescURI() {return RealURI + "Index";};
+
+ /** \brief Create an index merge-diff item.
+ *
+ * \param Owner The pkgAcquire object that owns this item.
+ *
+ * \param URI The URI of the package index file being
+ * reconstructed.
+ *
+ * \param URIDesc A long description of this item.
+ *
+ * \param ShortDesc A brief description of this item.
+ *
+ * \param ExpectedHash The expected md5sum of the completely
+ * reconstructed package index file; the index file will be tested
+ * against this value when it is entirely reconstructed.
+ *
+ * \param patch contains infos about the patch this item is supposed
+ * to download which were read from the index
+ *
+ * \param allPatches contains all related items so that each item can
+ * check if it was the last one to complete the download step
+ */
+ pkgAcqIndexMergeDiffs(pkgAcquire *Owner,std::string const &URI,std::string const &URIDesc,
+ std::string const &ShortDesc, HashString const &ExpectedHash,
+ DiffInfo const &patch, std::vector<pkgAcqIndexMergeDiffs*> const * const allPatches);
+};
+ /*}}}*/
+/** \brief An item that is responsible for fetching server-merge patches {{{
* that need to be applied to a given package index file.
*
* After downloading and applying a single patch, this item will
@@ -982,7 +1095,7 @@ class pkgAcqArchive : public pkgAcquire::Item
*
* \param Version The package version to download.
*
- * \param StoreFilename A location in which the actual filename of
+ * \param[out] StoreFilename A location in which the actual filename of
* the package should be stored. It will be set to a guessed
* basename in the constructor, and filled in with a fully
* qualified filename once the download finishes.
diff --git a/apt-pkg/acquire-worker.cc b/apt-pkg/acquire-worker.cc
index 44a84216a..de62080da 100644
--- a/apt-pkg/acquire-worker.cc
+++ b/apt-pkg/acquire-worker.cc
@@ -109,7 +109,12 @@ bool pkgAcquire::Worker::Start()
// Get the method path
string Method = _config->FindDir("Dir::Bin::Methods") + Access;
if (FileExists(Method) == false)
- return _error->Error(_("The method driver %s could not be found."),Method.c_str());
+ {
+ _error->Error(_("The method driver %s could not be found."),Method.c_str());
+ if (Access == "https")
+ _error->Notice(_("Is the package %s installed?"), "apt-transport-https");
+ return false;
+ }
if (Debug == true)
clog << "Starting method '" << Method << '\'' << endl;
@@ -563,7 +568,7 @@ bool pkgAcquire::Worker::InFdReady()
/*}}}*/
// Worker::MethodFailure - Called when the method fails /*{{{*/
// ---------------------------------------------------------------------
-/* This is called when the method is belived to have failed, probably because
+/* This is called when the method is believed to have failed, probably because
read returned -1. */
bool pkgAcquire::Worker::MethodFailure()
{
diff --git a/apt-pkg/acquire.cc b/apt-pkg/acquire.cc
index a8a5abd34..120e809e1 100644
--- a/apt-pkg/acquire.cc
+++ b/apt-pkg/acquire.cc
@@ -5,9 +5,9 @@
Acquire - File Acquiration
- The core element for the schedual system is the concept of a named
+ The core element for the schedule system is the concept of a named
queue. Each queue is unique and each queue has a name derived from the
- URI. The degree of paralization can be controled by how the queue
+ URI. The degree of paralization can be controlled by how the queue
name is derived from the URI.
##################################################################### */
@@ -175,7 +175,7 @@ void pkgAcquire::Add(Worker *Work)
// ---------------------------------------------------------------------
/* A worker has died. This can not be done while the select loop is running
as it would require that RunFds could handling a changing list state and
- it cant.. */
+ it can't.. */
void pkgAcquire::Remove(Worker *Work)
{
if (Running == true)
diff --git a/apt-pkg/algorithms.cc b/apt-pkg/algorithms.cc
index 69d4acd83..0363ab3e2 100644
--- a/apt-pkg/algorithms.cc
+++ b/apt-pkg/algorithms.cc
@@ -336,217 +336,6 @@ bool pkgFixBroken(pkgDepCache &Cache)
return Fix.Resolve(true);
}
/*}}}*/
-// DistUpgrade - Distribution upgrade /*{{{*/
-// ---------------------------------------------------------------------
-/* This autoinstalls every package and then force installs every
- pre-existing package. This creates the initial set of conditions which
- most likely contain problems because too many things were installed.
-
- The problem resolver is used to resolve the problems.
- */
-bool pkgDistUpgrade(pkgDepCache &Cache)
-{
- std::string const solver = _config->Find("APT::Solver", "internal");
- if (solver != "internal") {
- OpTextProgress Prog(*_config);
- return EDSP::ResolveExternal(solver.c_str(), Cache, false, true, false, &Prog);
- }
-
- pkgDepCache::ActionGroup group(Cache);
-
- /* Upgrade all installed packages first without autoinst to help the resolver
- in versioned or-groups to upgrade the old solver instead of installing
- a new one (if the old solver is not the first one [anymore]) */
- for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
- if (I->CurrentVer != 0)
- Cache.MarkInstall(I, false, 0, false);
-
- /* Auto upgrade all installed packages, this provides the basis
- for the installation */
- for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
- if (I->CurrentVer != 0)
- Cache.MarkInstall(I, true, 0, false);
-
- /* Now, install each essential package which is not installed
- (and not provided by another package in the same name group) */
- std::string essential = _config->Find("pkgCacheGen::Essential", "all");
- if (essential == "all")
- {
- for (pkgCache::GrpIterator G = Cache.GrpBegin(); G.end() == false; ++G)
- {
- bool isEssential = false;
- bool instEssential = false;
- for (pkgCache::PkgIterator P = G.PackageList(); P.end() == false; P = G.NextPkg(P))
- {
- if ((P->Flags & pkgCache::Flag::Essential) != pkgCache::Flag::Essential)
- continue;
- isEssential = true;
- if (Cache[P].Install() == true)
- {
- instEssential = true;
- break;
- }
- }
- if (isEssential == false || instEssential == true)
- continue;
- pkgCache::PkgIterator P = G.FindPreferredPkg();
- Cache.MarkInstall(P, true, 0, false);
- }
- }
- else if (essential != "none")
- for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
- if ((I->Flags & pkgCache::Flag::Essential) == pkgCache::Flag::Essential)
- Cache.MarkInstall(I, true, 0, false);
-
- /* We do it again over all previously installed packages to force
- conflict resolution on them all. */
- for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
- if (I->CurrentVer != 0)
- Cache.MarkInstall(I, false, 0, false);
-
- pkgProblemResolver Fix(&Cache);
-
- // Hold back held packages.
- if (_config->FindB("APT::Ignore-Hold",false) == false)
- {
- for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
- {
- if (I->SelectedState == pkgCache::State::Hold)
- {
- Fix.Protect(I);
- Cache.MarkKeep(I, false, false);
- }
- }
- }
-
- return Fix.Resolve();
-}
- /*}}}*/
-// AllUpgrade - Upgrade as many packages as possible /*{{{*/
-// ---------------------------------------------------------------------
-/* Right now the system must be consistent before this can be called.
- It also will not change packages marked for install, it only tries
- to install packages not marked for install */
-bool pkgAllUpgrade(pkgDepCache &Cache)
-{
- std::string const solver = _config->Find("APT::Solver", "internal");
- if (solver != "internal") {
- OpTextProgress Prog(*_config);
- return EDSP::ResolveExternal(solver.c_str(), Cache, true, false, false, &Prog);
- }
-
- pkgDepCache::ActionGroup group(Cache);
-
- pkgProblemResolver Fix(&Cache);
-
- if (Cache.BrokenCount() != 0)
- return false;
-
- // Upgrade all installed packages
- for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
- {
- if (Cache[I].Install() == true)
- Fix.Protect(I);
-
- if (_config->FindB("APT::Ignore-Hold",false) == false)
- if (I->SelectedState == pkgCache::State::Hold)
- continue;
-
- if (I->CurrentVer != 0 && Cache[I].InstallVer != 0)
- Cache.MarkInstall(I, false, 0, false);
- }
-
- return Fix.ResolveByKeep();
-}
- /*}}}*/
-// AllUpgradeNoDelete - Upgrade without removing packages /*{{{*/
-// ---------------------------------------------------------------------
-/* Right now the system must be consistent before this can be called.
- * Upgrade as much as possible without deleting anything (useful for
- * stable systems)
- */
-bool pkgAllUpgradeNoDelete(pkgDepCache &Cache)
-{
- pkgDepCache::ActionGroup group(Cache);
-
- pkgProblemResolver Fix(&Cache);
-
- if (Cache.BrokenCount() != 0)
- return false;
-
- // provide the initial set of stuff we want to upgrade by marking
- // all upgradable packages for upgrade
- for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
- {
- if (I->CurrentVer != 0 && Cache[I].InstallVer != 0)
- {
- if (_config->FindB("APT::Ignore-Hold",false) == false)
- if (I->SelectedState == pkgCache::State::Hold)
- continue;
-
- Cache.MarkInstall(I, false, 0, false);
- }
- }
-
- // then let auto-install loose
- for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
- if (Cache[I].Install())
- Cache.MarkInstall(I, true, 0, false);
-
- // ... but it may remove stuff, we we need to clean up afterwards again
- for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
- if (Cache[I].Delete() == true)
- Cache.MarkKeep(I, false, false);
-
- // resolve remaining issues via keep
- return Fix.ResolveByKeep();
-}
- /*}}}*/
-// MinimizeUpgrade - Minimizes the set of packages to be upgraded /*{{{*/
-// ---------------------------------------------------------------------
-/* This simply goes over the entire set of packages and tries to keep
- each package marked for upgrade. If a conflict is generated then
- the package is restored. */
-bool pkgMinimizeUpgrade(pkgDepCache &Cache)
-{
- pkgDepCache::ActionGroup group(Cache);
-
- if (Cache.BrokenCount() != 0)
- return false;
-
- // We loop for 10 tries to get the minimal set size.
- bool Change = false;
- unsigned int Count = 0;
- do
- {
- Change = false;
- for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
- {
- // Not interesting
- if (Cache[I].Upgrade() == false || Cache[I].NewInstall() == true)
- continue;
-
- // Keep it and see if that is OK
- Cache.MarkKeep(I, false, false);
- if (Cache.BrokenCount() != 0)
- Cache.MarkInstall(I, false, 0, false);
- else
- {
- // If keep didnt actually do anything then there was no change..
- if (Cache[I].Upgrade() == false)
- Change = true;
- }
- }
- ++Count;
- }
- while (Change == true && Count < 10);
-
- if (Cache.BrokenCount() != 0)
- return _error->Error("Internal Error in pkgMinimizeUpgrade");
-
- return true;
-}
- /*}}}*/
// ProblemResolver::pkgProblemResolver - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -635,7 +424,7 @@ void pkgProblemResolver::MakeScores()
/* This is arbitrary, it should be high enough to elevate an
essantial package above most other packages but low enough
to allow an obsolete essential packages to be removed by
- a conflicts on a powerfull normal package (ie libc6) */
+ a conflicts on a powerful normal package (ie libc6) */
if ((I->Flags & pkgCache::Flag::Essential) == pkgCache::Flag::Essential
|| (I->Flags & pkgCache::Flag::Important) == pkgCache::Flag::Important)
Score += PrioEssentials;
@@ -652,7 +441,7 @@ void pkgProblemResolver::MakeScores()
Score += PrioInstalledAndNotObsolete;
}
- // Now that we have the base scores we go and propogate dependencies
+ // Now that we have the base scores we go and propagate dependencies
for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
{
if (Cache[I].InstallVer == 0)
@@ -696,7 +485,7 @@ void pkgProblemResolver::MakeScores()
}
}
- /* Now we propogate along provides. This makes the packages that
+ /* Now we propagate along provides. This makes the packages that
provide important packages extremely important */
for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
{
@@ -851,7 +640,7 @@ bool pkgProblemResolver::Resolve(bool BrokenFix)
adjusting the package will inflict.
It goes from highest score to lowest and corrects all of the breaks by
- keeping or removing the dependant packages. If that fails then it removes
+ keeping or removing the dependent packages. If that fails then it removes
the package itself and goes on. The routine should be able to intelligently
go from any broken state to a fixed state.
@@ -1041,7 +830,7 @@ bool pkgProblemResolver::ResolveInternal(bool const BrokenFix)
/* Look across the version list. If there are no possible
targets then we keep the package and bail. This is necessary
- if a package has a dep on another package that cant be found */
+ if a package has a dep on another package that can't be found */
SPtrArray<pkgCache::Version *> VList = Start.AllTargets();
if (*VList == 0 && (Flags[I->ID] & Protected) != Protected &&
Start.IsNegative() == false &&
@@ -1394,7 +1183,7 @@ bool pkgProblemResolver::ResolveByKeepInternal()
continue;
/* Keep the package. If this works then great, otherwise we have
- to be significantly more agressive and manipulate its dependencies */
+ to be significantly more aggressive and manipulate its dependencies */
if ((Flags[I->ID] & Protected) == 0)
{
if (Debug == true)
@@ -1548,102 +1337,3 @@ void pkgPrioSortList(pkgCache &Cache,pkgCache::Version **List)
qsort(List,Count,sizeof(*List),PrioComp);
}
/*}}}*/
-// ListUpdate - construct Fetcher and update the cache files /*{{{*/
-// ---------------------------------------------------------------------
-/* This is a simple wrapper to update the cache. it will fetch stuff
- * from the network (or any other sources defined in sources.list)
- */
-bool ListUpdate(pkgAcquireStatus &Stat,
- pkgSourceList &List,
- int PulseInterval)
-{
- pkgAcquire Fetcher;
- if (Fetcher.Setup(&Stat, _config->FindDir("Dir::State::Lists")) == false)
- return false;
-
- // Populate it with the source selection
- if (List.GetIndexes(&Fetcher) == false)
- return false;
-
- return AcquireUpdate(Fetcher, PulseInterval, true);
-}
- /*}}}*/
-// AcquireUpdate - take Fetcher and update the cache files /*{{{*/
-// ---------------------------------------------------------------------
-/* This is a simple wrapper to update the cache with a provided acquire
- * If you only need control over Status and the used SourcesList use
- * ListUpdate method instead.
- */
-bool AcquireUpdate(pkgAcquire &Fetcher, int const PulseInterval,
- bool const RunUpdateScripts, bool const ListCleanup)
-{
- // Run scripts
- if (RunUpdateScripts == true)
- RunScripts("APT::Update::Pre-Invoke");
-
- pkgAcquire::RunResult res;
- if(PulseInterval > 0)
- res = Fetcher.Run(PulseInterval);
- else
- res = Fetcher.Run();
-
- if (res == pkgAcquire::Failed)
- return false;
-
- bool Failed = false;
- bool TransientNetworkFailure = false;
- for (pkgAcquire::ItemIterator I = Fetcher.ItemsBegin();
- I != Fetcher.ItemsEnd(); ++I)
- {
- if ((*I)->Status == pkgAcquire::Item::StatDone)
- continue;
-
- (*I)->Finished();
-
- ::URI uri((*I)->DescURI());
- uri.User.clear();
- uri.Password.clear();
- string descUri = string(uri);
- _error->Warning(_("Failed to fetch %s %s\n"), descUri.c_str(),
- (*I)->ErrorText.c_str());
-
- if ((*I)->Status == pkgAcquire::Item::StatTransientNetworkError)
- {
- TransientNetworkFailure = true;
- continue;
- }
-
- Failed = true;
- }
-
- // Clean out any old list files
- // Keep "APT::Get::List-Cleanup" name for compatibility, but
- // this is really a global option for the APT library now
- if (!TransientNetworkFailure && !Failed && ListCleanup == true &&
- (_config->FindB("APT::Get::List-Cleanup",true) == true &&
- _config->FindB("APT::List-Cleanup",true) == true))
- {
- if (Fetcher.Clean(_config->FindDir("Dir::State::lists")) == false ||
- Fetcher.Clean(_config->FindDir("Dir::State::lists") + "partial/") == false)
- // something went wrong with the clean
- return false;
- }
-
- if (TransientNetworkFailure == true)
- _error->Warning(_("Some index files failed to download. They have been ignored, or old ones used instead."));
- else if (Failed == true)
- return _error->Error(_("Some index files failed to download. They have been ignored, or old ones used instead."));
-
-
- // Run the success scripts if all was fine
- if (RunUpdateScripts == true)
- {
- if(!TransientNetworkFailure && !Failed)
- RunScripts("APT::Update::Post-Invoke-Success");
-
- // Run the other scripts
- RunScripts("APT::Update::Post-Invoke");
- }
- return true;
-}
- /*}}}*/
diff --git a/apt-pkg/algorithms.h b/apt-pkg/algorithms.h
index a499db8ba..489d81159 100644
--- a/apt-pkg/algorithms.h
+++ b/apt-pkg/algorithms.h
@@ -10,7 +10,7 @@
see all of the effects of an upgrade run.
pkgDistUpgrade computes an upgrade that causes as many packages as
- possible to move to the newest verison.
+ possible to move to the newest version.
pkgApplyStatus sets the target state based on the content of the status
field in the status file. It is important to get proper crash recovery.
@@ -43,7 +43,12 @@
using std::ostream;
#endif
-class pkgAcquireStatus;
+#ifndef APT_9_CLEANER_HEADERS
+// include pkg{DistUpgrade,AllUpgrade,MiniizeUpgrade} here for compatibility
+#include <apt-pkg/upgrade.h>
+#include <apt-pkg/update.h>
+#endif
+
class pkgSimulate : public pkgPackageManager /*{{{*/
{
@@ -85,6 +90,7 @@ private:
/*}}}*/
class pkgProblemResolver /*{{{*/
{
+ private:
/** \brief dpointer placeholder (for later in case we need it) */
void *d;
@@ -140,20 +146,10 @@ class pkgProblemResolver /*{{{*/
~pkgProblemResolver();
};
/*}}}*/
-bool pkgDistUpgrade(pkgDepCache &Cache);
bool pkgApplyStatus(pkgDepCache &Cache);
bool pkgFixBroken(pkgDepCache &Cache);
-bool pkgAllUpgrade(pkgDepCache &Cache);
-
-bool pkgAllUpgradeNoDelete(pkgDepCache &Cache);
-
-bool pkgMinimizeUpgrade(pkgDepCache &Cache);
-
void pkgPrioSortList(pkgCache &Cache,pkgCache::Version **List);
-bool ListUpdate(pkgAcquireStatus &progress, pkgSourceList &List, int PulseInterval=0);
-bool AcquireUpdate(pkgAcquire &Fetcher, int const PulseInterval = 0,
- bool const RunUpdateScripts = true, bool const ListCleanup = true);
#endif
diff --git a/apt-pkg/aptconfiguration.cc b/apt-pkg/aptconfiguration.cc
index e32e553a4..0b0b546c5 100644
--- a/apt-pkg/aptconfiguration.cc
+++ b/apt-pkg/aptconfiguration.cc
@@ -27,9 +27,9 @@
#include <vector>
/*}}}*/
namespace APT {
-// getCompressionTypes - Return Vector of usbale compressiontypes /*{{{*/
+// getCompressionTypes - Return Vector of usable compressiontypes /*{{{*/
// ---------------------------------------------------------------------
-/* return a vector of compression types in the prefered order. */
+/* return a vector of compression types in the preferred order. */
std::vector<std::string>
const Configuration::getCompressionTypes(bool const &Cached) {
static std::vector<std::string> types;
@@ -109,7 +109,7 @@ const Configuration::getCompressionTypes(bool const &Cached) {
/*}}}*/
// GetLanguages - Return Vector of Language Codes /*{{{*/
// ---------------------------------------------------------------------
-/* return a vector of language codes in the prefered order.
+/* return a vector of language codes in the preferred order.
the special word "environment" will be replaced with the long and the short
code of the local settings and it will be insured that this will not add
duplicates. So in an german local the setting "environment, de_DE, en, de"
@@ -141,7 +141,7 @@ std::vector<std::string> const Configuration::getLanguages(bool const &All,
// so they will be all included in the Cache.
std::vector<string> builtin;
DIR *D = opendir(_config->FindDir("Dir::State::lists").c_str());
- if (D != 0) {
+ if (D != NULL) {
builtin.push_back("none");
for (struct dirent *Ent = readdir(D); Ent != 0; Ent = readdir(D)) {
string const name = SubstVar(Ent->d_name, "%5f", "_");
@@ -166,8 +166,8 @@ std::vector<std::string> const Configuration::getLanguages(bool const &All,
continue;
builtin.push_back(c);
}
+ closedir(D);
}
- closedir(D);
// FIXME: Remove support for the old APT::Acquire::Translation
// it was undocumented and so it should be not very widthly used
@@ -330,7 +330,7 @@ bool const Configuration::checkLanguage(std::string Lang, bool const All) {
return (std::find(langs.begin(), langs.end(), Lang) != langs.end());
}
/*}}}*/
-// getArchitectures - Return Vector of prefered Architectures /*{{{*/
+// getArchitectures - Return Vector of preferred Architectures /*{{{*/
std::vector<std::string> const Configuration::getArchitectures(bool const &Cached) {
using std::string;
@@ -392,7 +392,7 @@ std::vector<std::string> const Configuration::getArchitectures(bool const &Cache
dup2(nullfd, STDIN_FILENO);
dup2(external[1], STDOUT_FILENO);
dup2(nullfd, STDERR_FILENO);
- if (chrootDir != "/" && chroot(chrootDir.c_str()) != 0)
+ if (chrootDir != "/" && chroot(chrootDir.c_str()) != 0 && chdir("/") != 0)
_error->WarningE("getArchitecture", "Couldn't chroot into %s for dpkg --print-foreign-architectures", chrootDir.c_str());
execvp(Args[0], (char**) &Args[0]);
_error->WarningE("getArchitecture", "Can't detect foreign architectures supported by dpkg!");
@@ -453,7 +453,7 @@ void Configuration::setDefaultConfigurationForCompressors() {
_config->CndSet("Dir::Bin::bzip2", "/bin/bzip2");
_config->CndSet("Dir::Bin::xz", "/usr/bin/xz");
if (FileExists(_config->FindFile("Dir::Bin::xz")) == true) {
- _config->Clear("Dir::Bin::lzma");
+ _config->Set("Dir::Bin::lzma", _config->FindFile("Dir::Bin::xz"));
_config->Set("APT::Compressor::lzma::Binary", "xz");
if (_config->Exists("APT::Compressor::lzma::CompressArg") == false) {
_config->Set("APT::Compressor::lzma::CompressArg::", "--format=lzma");
diff --git a/apt-pkg/aptconfiguration.h b/apt-pkg/aptconfiguration.h
index d22b675c0..bf7deae85 100644
--- a/apt-pkg/aptconfiguration.h
+++ b/apt-pkg/aptconfiguration.h
@@ -37,14 +37,14 @@ public: /*{{{*/
* \param Cached saves the result so we need to calculated it only once
* this parameter should ony be used for testing purposes.
*
- * \return a vector of the compression types in the prefered usage order
+ * \return a vector of the compression types in the preferred usage order
*/
std::vector<std::string> static const getCompressionTypes(bool const &Cached = true);
/** \brief Returns a vector of Language Codes
*
* Languages can be defined with their two or five chars long code.
- * This methods handles the various ways to set the prefered codes,
+ * This methods handles the various ways to set the preferred codes,
* honors the environment and ensures that the codes are not listed twice.
*
* The special word "environment" will be replaced with the long and the short
@@ -52,7 +52,7 @@ public: /*{{{*/
* duplicates. So in an german local the setting "environment, de_DE, en, de"
* will result in "de_DE, de, en".
*
- * Another special word is "none" which separates the prefered from all codes
+ * Another special word is "none" which separates the preferred from all codes
* in this setting. So setting and method can be used to get codes the user want
* to see or to get all language codes APT (should) have Translations available.
*
@@ -62,7 +62,7 @@ public: /*{{{*/
* \param Locale don't get the locale from the system but use this one instead
* this parameter should ony be used for testing purposes.
*
- * \return a vector of (all) Language Codes in the prefered usage order
+ * \return a vector of (all) Language Codes in the preferred usage order
*/
std::vector<std::string> static const getLanguages(bool const &All = false,
bool const &Cached = true, char const ** const Locale = 0);
@@ -80,7 +80,7 @@ public: /*{{{*/
* \param Cached saves the result so we need to calculated it only once
* this parameter should ony be used for testing purposes.
*
- * \return a vector of Architectures in prefered order
+ * \return a vector of Architectures in preferred order
*/
std::vector<std::string> static const getArchitectures(bool const &Cached = true);
diff --git a/apt-pkg/cacheiterators.h b/apt-pkg/cacheiterators.h
index 886d84838..ea6a4afba 100644
--- a/apt-pkg/cacheiterators.h
+++ b/apt-pkg/cacheiterators.h
@@ -46,7 +46,7 @@ template<typename Str, typename Itr> class pkgCache::Iterator :
* The implementation of this method should be pretty short
* as it will only return the Pointer into the mmap stored
* in the owner but the name of this pointer is different for
- * each stucture and we want to abstract here at least for the
+ * each structure and we want to abstract here at least for the
* basic methods from the actual structure.
* \return Pointer to the first structure of this type
*/
@@ -198,7 +198,7 @@ class pkgCache::VerIterator : public Iterator<Version, VerIterator> {
/** \brief compares two version and returns if they are similar
This method should be used to identify if two pseudo versions are
- refering to the same "real" version */
+ referring to the same "real" version */
inline bool SimilarVer(const VerIterator &B) const {
return (B.end() == false && S->Hash == B->Hash && strcmp(VerStr(), B.VerStr()) == 0);
};
@@ -419,7 +419,7 @@ class pkgCache::DescFileIterator : public Iterator<DescFile, DescFileIterator> {
inline DescFileIterator(pkgCache &Owner,DescFile *Trg) : Iterator<DescFile, DescFileIterator>(Owner, Trg) {};
};
/*}}}*/
-// Inlined Begin functions cant be in the class because of order problems /*{{{*/
+// Inlined Begin functions can't be in the class because of order problems /*{{{*/
inline pkgCache::PkgIterator pkgCache::GrpIterator::PackageList() const
{return PkgIterator(*Owner,Owner->PkgP + S->FirstPackage);};
inline pkgCache::VerIterator pkgCache::PkgIterator::VersionList() const
diff --git a/apt-pkg/cacheset.cc b/apt-pkg/cacheset.cc
index 0147f7e86..6b6fdb5ad 100644
--- a/apt-pkg/cacheset.cc
+++ b/apt-pkg/cacheset.cc
@@ -73,6 +73,8 @@ bool PackageContainerInterface::FromTask(PackageContainerInterface * const pci,
const char *start, *end;
parser.GetRec(start,end);
unsigned int const length = end - start;
+ if (unlikely(length == 0))
+ continue;
char buf[length];
strncpy(buf, start, length);
buf[length-1] = '\0';
@@ -202,12 +204,20 @@ PackageContainerInterface::FromFnmatch(PackageContainerInterface * const pci,
}
pci->insert(Pkg);
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+ helper.showFnmatchSelection(Pkg, pattern);
+#else
helper.showRegExSelection(Pkg, pattern);
+#endif
found = true;
}
if (found == false) {
- helper.canNotFindRegEx(pci, Cache, pattern);
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+ helper.canNotFindFnmatch(pci, Cache, pattern);
+#else
+ helper.canNotFindRegEx(pci, Cache, pattern);
+#endif
pci->setConstructor(UNKNOWN);
return false;
}
@@ -302,7 +312,9 @@ bool PackageContainerInterface::FromString(PackageContainerInterface * const pci
if (FromGroup(pci, Cache, str, helper) == false &&
FromTask(pci, Cache, str, helper) == false &&
- FromFnmatch(pci, Cache, str, helper) == false &&
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+ FromFnmatch(pci, Cache, str, helper) == false)
+#endif
FromRegEx(pci, Cache, str, helper) == false)
{
helper.canNotFindPackage(pci, Cache, str);
@@ -586,7 +598,13 @@ void CacheSetHelper::canNotFindRegEx(PackageContainerInterface * const pci, pkgC
if (ShowError == true)
_error->Insert(ErrorType, _("Couldn't find any package by regex '%s'"), pattern.c_str());
}
- /*}}}*/
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+// canNotFindFnmatch - handle the case no package is found by a fnmatch /*{{{*/
+void CacheSetHelper::canNotFindFnmatch(PackageContainerInterface * const pci, pkgCacheFile &Cache, std::string pattern) {
+ if (ShowError == true)
+ _error->Insert(ErrorType, _("Couldn't find any package by glob '%s'"), pattern.c_str());
+}
+#endif /*}}}*/
// canNotFindPackage - handle the case no package is found from a string/*{{{*/
void CacheSetHelper::canNotFindPackage(PackageContainerInterface * const pci, pkgCacheFile &Cache, std::string const &str) {
}
@@ -646,6 +664,13 @@ void CacheSetHelper::showRegExSelection(pkgCache::PkgIterator const &pkg,
std::string const &pattern) {
}
/*}}}*/
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+// showFnmatchSelection /*{{{*/
+void CacheSetHelper::showFnmatchSelection(pkgCache::PkgIterator const &pkg,
+ std::string const &pattern) {
+}
+ /*}}}*/
+#endif
// showSelectedVersion /*{{{*/
void CacheSetHelper::showSelectedVersion(pkgCache::PkgIterator const &Pkg,
pkgCache::VerIterator const Ver,
diff --git a/apt-pkg/cacheset.h b/apt-pkg/cacheset.h
index 29103aad9..b69d74b8e 100644
--- a/apt-pkg/cacheset.h
+++ b/apt-pkg/cacheset.h
@@ -48,11 +48,17 @@ public: /*{{{*/
virtual void showTaskSelection(pkgCache::PkgIterator const &pkg, std::string const &pattern);
virtual void showRegExSelection(pkgCache::PkgIterator const &pkg, std::string const &pattern);
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+ virtual void showFnmatchSelection(pkgCache::PkgIterator const &pkg, std::string const &pattern);
+#endif
virtual void showSelectedVersion(pkgCache::PkgIterator const &Pkg, pkgCache::VerIterator const Ver,
std::string const &ver, bool const verIsRel);
virtual void canNotFindTask(PackageContainerInterface * const pci, pkgCacheFile &Cache, std::string pattern);
virtual void canNotFindRegEx(PackageContainerInterface * const pci, pkgCacheFile &Cache, std::string pattern);
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+ virtual void canNotFindFnmatch(PackageContainerInterface * const pci, pkgCacheFile &Cache, std::string pattern);
+#endif
virtual void canNotFindPackage(PackageContainerInterface * const pci, pkgCacheFile &Cache, std::string const &str);
virtual void canNotFindAllVer(VersionContainerInterface * const vci, pkgCacheFile &Cache, pkgCache::PkgIterator const &Pkg);
diff --git a/apt-pkg/cdrom.cc b/apt-pkg/cdrom.cc
index a5668a50a..3ae1e8b1d 100644
--- a/apt-pkg/cdrom.cc
+++ b/apt-pkg/cdrom.cc
@@ -561,45 +561,65 @@ bool pkgCdrom::WriteSourceList(string Name,vector<string> &List,bool Source)
return true;
}
/*}}}*/
-bool pkgCdrom::Ident(string &ident, pkgCdromStatus *log) /*{{{*/
+bool pkgCdrom::MountAndIdentCDROM(Configuration &Database, std::string &CDROM, std::string &ident, pkgCdromStatus * const log)/*{{{*/
{
- stringstream msg;
-
// Startup
- string CDROM = _config->FindDir("Acquire::cdrom::mount");
+ CDROM = _config->FindDir("Acquire::cdrom::mount");
if (CDROM[0] == '.')
CDROM= SafeGetCWD() + '/' + CDROM;
if (log != NULL)
{
- msg.str("");
- ioprintf(msg, _("Using CD-ROM mount point %s\nMounting CD-ROM\n"),
- CDROM.c_str());
- log->Update(msg.str());
+ string msg;
+ log->SetTotal(STEP_LAST);
+ strprintf(msg, _("Using CD-ROM mount point %s\n"), CDROM.c_str());
+ log->Update(msg, STEP_PREPARE);
+ }
+
+ // Unmount the CD and get the user to put in the one they want
+ if (_config->FindB("APT::CDROM::NoMount", false) == false)
+ {
+ if(log != NULL)
+ log->Update(_("Unmounting CD-ROM\n"), STEP_UNMOUNT);
+ UnmountCdrom(CDROM);
+
+ if(log != NULL)
+ {
+ log->Update(_("Waiting for disc...\n"), STEP_WAIT);
+ if(!log->ChangeCdrom()) {
+ // user aborted
+ return false;
+ }
+ }
+
+ // Mount the new CDROM
+ if(log != NULL)
+ log->Update(_("Mounting CD-ROM...\n"), STEP_MOUNT);
+
+ if (MountCdrom(CDROM) == false)
+ return _error->Error("Failed to mount the cdrom.");
}
- if (MountCdrom(CDROM) == false)
- return _error->Error("Failed to mount the cdrom.");
// Hash the CD to get an ID
if (log != NULL)
- log->Update(_("Identifying.. "));
-
+ log->Update(_("Identifying.. "), STEP_IDENT);
if (IdentCdrom(CDROM,ident) == false)
{
ident = "";
+ if (log != NULL)
+ log->Update("\n");
return false;
}
if (log != NULL)
{
- msg.str("");
- ioprintf(msg, "[%s]\n",ident.c_str());
- log->Update(msg.str());
+ string msg;
+ strprintf(msg, "[%s]\n", ident.c_str());
+ log->Update(msg);
}
// Read the database
- Configuration Database;
string DFile = _config->FindFile("Dir::State::cdroms");
if (FileExists(DFile) == true)
{
@@ -607,12 +627,22 @@ bool pkgCdrom::Ident(string &ident, pkgCdromStatus *log) /*{{{*/
return _error->Error("Unable to read the cdrom database %s",
DFile.c_str());
}
+ return true;
+}
+ /*}}}*/
+bool pkgCdrom::Ident(string &ident, pkgCdromStatus *log) /*{{{*/
+{
+ Configuration Database;
+ std::string CDROM;
+ if (MountAndIdentCDROM(Database, CDROM, ident, log) == false)
+ return false;
+
if (log != NULL)
{
- msg.str("");
- ioprintf(msg, _("Stored label: %s\n"),
- Database.Find("CD::"+ident).c_str());
- log->Update(msg.str());
+ string msg;
+ strprintf(msg, _("Stored label: %s\n"),
+ Database.Find("CD::"+ident).c_str());
+ log->Update(msg);
}
// Unmount and finish
@@ -628,70 +658,13 @@ bool pkgCdrom::Ident(string &ident, pkgCdromStatus *log) /*{{{*/
/*}}}*/
bool pkgCdrom::Add(pkgCdromStatus *log) /*{{{*/
{
- stringstream msg;
-
- // Startup
- string CDROM = _config->FindDir("Acquire::cdrom::mount");
- if (CDROM[0] == '.')
- CDROM= SafeGetCWD() + '/' + CDROM;
-
- if(log != NULL)
- {
- log->SetTotal(STEP_LAST);
- msg.str("");
- ioprintf(msg, _("Using CD-ROM mount point %s\n"), CDROM.c_str());
- log->Update(msg.str(), STEP_PREPARE);
- }
-
- // Read the database
Configuration Database;
- string DFile = _config->FindFile("Dir::State::cdroms");
- if (FileExists(DFile) == true)
- {
- if (ReadConfigFile(Database,DFile) == false)
- return _error->Error("Unable to read the cdrom database %s",
- DFile.c_str());
- }
-
- // Unmount the CD and get the user to put in the one they want
- if (_config->FindB("APT::CDROM::NoMount",false) == false)
- {
- if(log != NULL)
- log->Update(_("Unmounting CD-ROM\n"), STEP_UNMOUNT);
- UnmountCdrom(CDROM);
-
- if(log != NULL)
- {
- log->Update(_("Waiting for disc...\n"), STEP_WAIT);
- if(!log->ChangeCdrom()) {
- // user aborted
- return false;
- }
- }
-
- // Mount the new CDROM
- if(log != NULL)
- log->Update(_("Mounting CD-ROM...\n"), STEP_MOUNT);
-
- if (MountCdrom(CDROM) == false)
- return _error->Error("Failed to mount the cdrom.");
- }
-
- // Hash the CD to get an ID
- if(log != NULL)
- log->Update(_("Identifying.. "), STEP_IDENT);
- string ID;
- if (IdentCdrom(CDROM,ID) == false)
- {
- if (log != NULL)
- log->Update("\n");
+ std::string ID, CDROM;
+ if (MountAndIdentCDROM(Database, CDROM, ID, log) == false)
return false;
- }
+
if(log != NULL)
- {
- log->Update("["+ID+"]\n");
log->Update(_("Scanning disc for index files..\n"),STEP_SCAN);
- }
// Get the CD structure
vector<string> List;
@@ -740,12 +713,12 @@ bool pkgCdrom::Add(pkgCdromStatus *log) /*{{{*/
if (_config->FindB("APT::CDROM::DropTranslation", true) == true)
DropTranslation(TransList);
if(log != NULL) {
- msg.str("");
- ioprintf(msg, _("Found %zu package indexes, %zu source indexes, "
+ string msg;
+ strprintf(msg, _("Found %zu package indexes, %zu source indexes, "
"%zu translation indexes and %zu signatures\n"),
List.size(), SourceList.size(), TransList.size(),
SigList.size());
- log->Update(msg.str(), STEP_SCAN);
+ log->Update(msg, STEP_SCAN);
}
if (List.empty() == true && SourceList.empty() == true)
@@ -778,9 +751,9 @@ bool pkgCdrom::Add(pkgCdromStatus *log) /*{{{*/
if(log != NULL)
{
- msg.str("");
- ioprintf(msg, _("Found label '%s'\n"), Name.c_str());
- log->Update(msg.str());
+ string msg;
+ strprintf(msg, _("Found label '%s'\n"), Name.c_str());
+ log->Update(msg);
}
Database.Set("CD::" + ID + "::Label",Name);
}
@@ -824,9 +797,9 @@ bool pkgCdrom::Add(pkgCdromStatus *log) /*{{{*/
Database.Set("CD::" + ID,Name);
if(log != NULL)
{
- msg.str("");
- ioprintf(msg, _("This disc is called: \n'%s'\n"), Name.c_str());
- log->Update(msg.str());
+ string msg;
+ strprintf(msg, _("This disc is called: \n'%s'\n"), Name.c_str());
+ log->Update(msg);
log->Update(_("Copying package lists..."), STEP_COPY);
}
@@ -884,7 +857,7 @@ bool pkgCdrom::Add(pkgCdromStatus *log) /*{{{*/
if(log != NULL)
{
- msg.str("");
+ stringstream msg;
msg << "deb cdrom:[" << Name << "]/" << string(*I,0,Space) <<
" " << string(*I,Space+1) << endl;
log->Update(msg.str());
@@ -902,7 +875,7 @@ bool pkgCdrom::Add(pkgCdromStatus *log) /*{{{*/
}
if(log != NULL) {
- msg.str("");
+ stringstream msg;
msg << "deb-src cdrom:[" << Name << "]/" << string(*I,0,Space) <<
" " << string(*I,Space+1) << endl;
log->Update(msg.str());
diff --git a/apt-pkg/cdrom.h b/apt-pkg/cdrom.h
index 7d19eb813..c58593550 100644
--- a/apt-pkg/cdrom.h
+++ b/apt-pkg/cdrom.h
@@ -69,6 +69,10 @@ class pkgCdrom /*{{{*/
public:
bool Ident(std::string &ident, pkgCdromStatus *log);
bool Add(pkgCdromStatus *log);
+
+ private:
+ bool MountAndIdentCDROM(Configuration &Database, std::string &CDROM,
+ std::string &ident, pkgCdromStatus * const log);
};
/*}}}*/
@@ -84,7 +88,7 @@ struct CdromDevice /*{{{*/
class pkgUdevCdromDevices /*{{{*/
{
protected:
- // libudev dlopen stucture
+ // libudev dlopen structure
void *libudev_handle;
struct udev* (*udev_new)(void);
int (*udev_enumerate_add_match_property)(struct udev_enumerate *udev_enumerate, const char *property, const char *value);
diff --git a/apt-pkg/clean.cc b/apt-pkg/clean.cc
index eae419e34..2dea8ffdd 100644
--- a/apt-pkg/clean.cc
+++ b/apt-pkg/clean.cc
@@ -105,7 +105,7 @@ bool pkgArchiveCleaner::Go(std::string Dir,pkgCache &Cache)
break;
}
- // See if this verison matches the file
+ // See if this version matches the file
if (IsFetchable == true && Ver == V.VerStr())
break;
}
diff --git a/apt-pkg/contrib/cdromutl.cc b/apt-pkg/contrib/cdromutl.cc
index afa01a562..20210ec0a 100644
--- a/apt-pkg/contrib/cdromutl.cc
+++ b/apt-pkg/contrib/cdromutl.cc
@@ -47,8 +47,8 @@ bool IsMounted(string &Path)
if (Path[Path.length() - 1] != '/')
Path += '/';
- /* First we check if the path is actualy mounted, we do this by
- stating the path and the previous directory (carefull of links!)
+ /* First we check if the path is actually mounted, we do this by
+ stating the path and the previous directory (careful of links!)
and comparing their device fields. */
struct stat Buf,Buf2;
if (stat(Path.c_str(),&Buf) != 0 ||
diff --git a/apt-pkg/contrib/cmndline.cc b/apt-pkg/contrib/cmndline.cc
index 8cef80368..ed5800007 100644
--- a/apt-pkg/contrib/cmndline.cc
+++ b/apt-pkg/contrib/cmndline.cc
@@ -293,7 +293,7 @@ bool CommandLine::HandleOpt(int &I,int argc,const char *argv[],
// Look for an argument.
while (1)
{
- // Look at preceeding text
+ // Look at preceding text
char Buffer[300];
if (Argument == 0)
{
@@ -397,6 +397,7 @@ bool CommandLine::DispatchArg(Dispatch *Map,bool NoMatch)
void CommandLine::SaveInConfig(unsigned int const &argc, char const * const * const argv)
{
char cmdline[100 + argc * 50];
+ memset(cmdline, 0, sizeof(cmdline));
unsigned int length = 0;
bool lastWasOption = false;
bool closeQuote = false;
diff --git a/apt-pkg/contrib/crc-16.cc b/apt-pkg/contrib/crc-16.cc
index 4058821f9..f5df2d8b1 100644
--- a/apt-pkg/contrib/crc-16.cc
+++ b/apt-pkg/contrib/crc-16.cc
@@ -10,7 +10,7 @@
Al Longyear <longyear@netcom.com>
Modified by Jason Gunthorpe <jgg@debian.org> to fit the local coding
- style, this code is belived to be in the Public Domain.
+ style, this code is believed to be in the Public Domain.
##################################################################### */
/*}}}*/
diff --git a/apt-pkg/contrib/error.h b/apt-pkg/contrib/error.h
index 7d09b2d4a..bcee70b1a 100644
--- a/apt-pkg/contrib/error.h
+++ b/apt-pkg/contrib/error.h
@@ -229,7 +229,7 @@ public: /*{{{*/
/** \brief is the list empty?
*
* The default checks if the list is empty or contains only notices,
- * if you want to check if also no notices happend set the parameter
+ * if you want to check if also no notices happened set the parameter
* flag to \b false.
*
* \param WithoutNotice does notices count, default is \b true, so no
diff --git a/apt-pkg/contrib/fileutl.cc b/apt-pkg/contrib/fileutl.cc
index ac2879017..9da5ac84e 100644
--- a/apt-pkg/contrib/fileutl.cc
+++ b/apt-pkg/contrib/fileutl.cc
@@ -222,7 +222,7 @@ int GetLock(string File,bool Errors)
int FD = open(File.c_str(),O_RDWR | O_CREAT | O_NOFOLLOW,0640);
if (FD < 0)
{
- // Read only .. cant have locking problems there.
+ // Read only .. can't have locking problems there.
if (errno == EROFS)
{
_error->Warning(_("Not using locking for read only lock file %s"),File.c_str());
@@ -238,7 +238,7 @@ int GetLock(string File,bool Errors)
}
SetCloseExec(FD,true);
- // Aquire a write lock
+ // Acquire a write lock
struct flock fl;
fl.l_type = F_WRLCK;
fl.l_whence = SEEK_SET;
@@ -319,7 +319,7 @@ bool CreateDirectory(string const &Parent, string const &Path)
return false;
// we are not going to create directories "into the blue"
- if (Path.find(Parent, 0) != 0)
+ if (Path.compare(0, Parent.length(), Parent) != 0)
return false;
vector<string> const dirs = VectorizeString(Path.substr(Parent.size()), '/');
@@ -465,7 +465,7 @@ std::vector<string> GetListOfFilesInDir(string const &Dir, std::vector<string> c
const char *C = Ent->d_name;
for (; *C != 0; ++C)
if (isalpha(*C) == 0 && isdigit(*C) == 0
- && *C != '_' && *C != '-') {
+ && *C != '_' && *C != '-' && *C != ':') {
// no required extension -> dot is a bad character
if (*C == '.' && Ext.empty() == false)
continue;
@@ -656,9 +656,9 @@ string flNoLink(string File)
while (1)
{
// Read the link
- int Res;
+ ssize_t Res;
if ((Res = readlink(NFile.c_str(),Buffer,sizeof(Buffer))) <= 0 ||
- (unsigned)Res >= sizeof(Buffer))
+ (size_t)Res >= sizeof(Buffer))
return File;
// Append or replace the previous path
@@ -760,6 +760,27 @@ bool WaitFd(int Fd,bool write,unsigned long timeout)
return true;
}
/*}}}*/
+// MergeKeepFdsFromConfiguration - Merge APT::Keep-Fds configuration /*{{{*/
+// ---------------------------------------------------------------------
+/* This is used to merge the APT::Keep-Fds with the provided KeepFDs
+ * set.
+ */
+void MergeKeepFdsFromConfiguration(std::set<int> &KeepFDs)
+{
+ Configuration::Item const *Opts = _config->Tree("APT::Keep-Fds");
+ if (Opts != 0 && Opts->Child != 0)
+ {
+ Opts = Opts->Child;
+ for (; Opts != 0; Opts = Opts->Next)
+ {
+ if (Opts->Value.empty() == true)
+ continue;
+ int fd = atoi(Opts->Value.c_str());
+ KeepFDs.insert(fd);
+ }
+ }
+}
+ /*}}}*/
// ExecFork - Magical fork that sanitizes the context before execing /*{{{*/
// ---------------------------------------------------------------------
/* This is used if you want to cleanse the environment for the forked
@@ -767,6 +788,15 @@ bool WaitFd(int Fd,bool write,unsigned long timeout)
otherwise acts like normal fork. */
pid_t ExecFork()
{
+ set<int> KeepFDs;
+ // we need to merge the Keep-Fds as external tools like
+ // debconf-apt-progress use it
+ MergeKeepFdsFromConfiguration(KeepFDs);
+ return ExecFork(KeepFDs);
+}
+
+pid_t ExecFork(std::set<int> KeepFDs)
+{
// Fork off the process
pid_t Process = fork();
if (Process < 0)
@@ -786,22 +816,8 @@ pid_t ExecFork()
signal(SIGCONT,SIG_DFL);
signal(SIGTSTP,SIG_DFL);
- set<int> KeepFDs;
- Configuration::Item const *Opts = _config->Tree("APT::Keep-Fds");
- if (Opts != 0 && Opts->Child != 0)
- {
- Opts = Opts->Child;
- for (; Opts != 0; Opts = Opts->Next)
- {
- if (Opts->Value.empty() == true)
- continue;
- int fd = atoi(Opts->Value.c_str());
- KeepFDs.insert(fd);
- }
- }
-
// Close all of our FDs - just in case
- for (int K = 3; K != 40; K++)
+ for (int K = 3; K != sysconf(_SC_OPEN_MAX); K++)
{
if(KeepFDs.find(K) == KeepFDs.end())
fcntl(K,F_SETFD,FD_CLOEXEC);
@@ -966,9 +982,6 @@ bool FileFd::Open(string FileName,unsigned int const Mode,APT::Configuration::Co
if ((Mode & Atomic) == Atomic)
{
Flags |= Replace;
- char *name = strdup((FileName + ".XXXXXX").c_str());
- TemporaryFileName = string(mktemp(name));
- free(name);
}
else if ((Mode & (Exclusive | Create)) == (Exclusive | Create))
{
@@ -991,11 +1004,24 @@ bool FileFd::Open(string FileName,unsigned int const Mode,APT::Configuration::Co
if_FLAGGED_SET(Create, O_CREAT);
if_FLAGGED_SET(Empty, O_TRUNC);
if_FLAGGED_SET(Exclusive, O_EXCL);
- else if_FLAGGED_SET(Atomic, O_EXCL);
#undef if_FLAGGED_SET
- if (TemporaryFileName.empty() == false)
- iFd = open(TemporaryFileName.c_str(), fileflags, Perms);
+ if ((Mode & Atomic) == Atomic)
+ {
+ char *name = strdup((FileName + ".XXXXXX").c_str());
+
+ if((iFd = mkstemp(name)) == -1)
+ {
+ free(name);
+ return FileFdErrno("mkstemp", "Could not create temporary file for %s", FileName.c_str());
+ }
+
+ TemporaryFileName = string(name);
+ free(name);
+
+ if(Perms != 600 && fchmod(iFd, Perms) == -1)
+ return FileFdErrno("fchmod", "Could not change permissions for temporary file %s", TemporaryFileName.c_str());
+ }
else
iFd = open(FileName.c_str(), fileflags, Perms);
@@ -1250,11 +1276,11 @@ FileFd::~FileFd()
/*}}}*/
// FileFd::Read - Read a bit of the file /*{{{*/
// ---------------------------------------------------------------------
-/* We are carefull to handle interruption by a signal while reading
+/* We are careful to handle interruption by a signal while reading
gracefully. */
bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual)
{
- int Res;
+ ssize_t Res;
errno = 0;
if (Actual != 0)
*Actual = 0;
@@ -1354,7 +1380,7 @@ char* FileFd::ReadLine(char *To, unsigned long long const Size)
/* */
bool FileFd::Write(const void *From,unsigned long long Size)
{
- int Res;
+ ssize_t Res;
errno = 0;
do
{
@@ -1408,7 +1434,7 @@ bool FileFd::Write(const void *From,unsigned long long Size)
}
bool FileFd::Write(int Fd, const void *From, unsigned long long Size)
{
- int Res;
+ ssize_t Res;
errno = 0;
do
{
@@ -1481,14 +1507,14 @@ bool FileFd::Seek(unsigned long long To)
d->seekpos = To;
return true;
}
- int res;
+ off_t res;
#ifdef HAVE_ZLIB
if (d != NULL && d->gz)
res = gzseek(d->gz,To,SEEK_SET);
else
#endif
res = lseek(iFd,To,SEEK_SET);
- if (res != (signed)To)
+ if (res != (off_t)To)
return FileFdError("Unable to seek to %llu", To);
if (d != NULL)
@@ -1519,7 +1545,7 @@ bool FileFd::Skip(unsigned long long Over)
return true;
}
- int res;
+ off_t res;
#ifdef HAVE_ZLIB
if (d != NULL && d->gz != NULL)
res = gzseek(d->gz,Over,SEEK_CUR);
@@ -1539,6 +1565,9 @@ bool FileFd::Skip(unsigned long long Over)
/* */
bool FileFd::Truncate(unsigned long long To)
{
+ // truncating /dev/null is always successful - as we get an error otherwise
+ if (To == 0 && FileName == "/dev/null")
+ return true;
#if defined HAVE_ZLIB || defined HAVE_BZ2
if (d != NULL && (d->gz != NULL || d->bz2 != NULL))
return FileFdError("Truncating compressed files is not implemented (%s)", FileName.c_str());
@@ -1579,29 +1608,55 @@ unsigned long long FileFd::Tell()
return Res;
}
/*}}}*/
-// FileFd::FileSize - Return the size of the file /*{{{*/
-// ---------------------------------------------------------------------
-/* */
-unsigned long long FileFd::FileSize()
+static bool StatFileFd(char const * const msg, int const iFd, std::string const &FileName, struct stat &Buf, FileFdPrivate * const d) /*{{{*/
{
- struct stat Buf;
- if ((d == NULL || d->pipe == false) && fstat(iFd,&Buf) != 0)
- return FileFdErrno("fstat","Unable to determine the file size");
+ bool ispipe = (d != NULL && d->pipe == true);
+ if (ispipe == false)
+ {
+ if (fstat(iFd,&Buf) != 0)
+ // higher-level code will generate more meaningful messages,
+ // even translated this would be meaningless for users
+ return _error->Errno("fstat", "Unable to determine %s for fd %i", msg, iFd);
+ ispipe = S_ISFIFO(Buf.st_mode);
+ }
// for compressor pipes st_size is undefined and at 'best' zero
- if ((d != NULL && d->pipe == true) || S_ISFIFO(Buf.st_mode))
+ if (ispipe == true)
{
// we set it here, too, as we get the info here for free
// in theory the Open-methods should take care of it already
if (d != NULL)
d->pipe = true;
if (stat(FileName.c_str(), &Buf) != 0)
- return FileFdErrno("stat","Unable to determine the file size");
+ return _error->Errno("fstat", "Unable to determine %s for file %s", msg, FileName.c_str());
+ }
+ return true;
+}
+ /*}}}*/
+// FileFd::FileSize - Return the size of the file /*{{{*/
+unsigned long long FileFd::FileSize()
+{
+ struct stat Buf;
+ if (StatFileFd("file size", iFd, FileName, Buf, d) == false)
+ {
+ Flags |= Fail;
+ return 0;
}
-
return Buf.st_size;
}
/*}}}*/
+// FileFd::ModificationTime - Return the time of last touch /*{{{*/
+time_t FileFd::ModificationTime()
+{
+ struct stat Buf;
+ if (StatFileFd("modification time", iFd, FileName, Buf, d) == false)
+ {
+ Flags |= Fail;
+ return 0;
+ }
+ return Buf.st_mtime;
+}
+ /*}}}*/
// FileFd::Size - Return the size of the content in the file /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -1673,35 +1728,6 @@ unsigned long long FileFd::Size()
return size;
}
/*}}}*/
-// FileFd::ModificationTime - Return the time of last touch /*{{{*/
-// ---------------------------------------------------------------------
-/* */
-time_t FileFd::ModificationTime()
-{
- struct stat Buf;
- if ((d == NULL || d->pipe == false) && fstat(iFd,&Buf) != 0)
- {
- FileFdErrno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
- return 0;
- }
-
- // for compressor pipes st_size is undefined and at 'best' zero
- if ((d != NULL && d->pipe == true) || S_ISFIFO(Buf.st_mode))
- {
- // we set it here, too, as we get the info here for free
- // in theory the Open-methods should take care of it already
- if (d != NULL)
- d->pipe = true;
- if (stat(FileName.c_str(), &Buf) != 0)
- {
- FileFdErrno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
- return 0;
- }
- }
-
- return Buf.st_mtime;
-}
- /*}}}*/
// FileFd::Close - Close the file if the close flag is set /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -1797,7 +1823,8 @@ std::vector<std::string> Glob(std::string const &pattern, int flags)
{
std::vector<std::string> result;
glob_t globbuf;
- int glob_res, i;
+ int glob_res;
+ unsigned int i;
glob_res = glob(pattern.c_str(), flags, NULL, &globbuf);
@@ -1817,3 +1844,20 @@ std::vector<std::string> Glob(std::string const &pattern, int flags)
return result;
}
/*}}}*/
+
+std::string GetTempDir()
+{
+ const char *tmpdir = getenv("TMPDIR");
+
+#ifdef P_tmpdir
+ if (!tmpdir)
+ tmpdir = P_tmpdir;
+#endif
+
+ // check that tmpdir is set and exists
+ struct stat st;
+ if (!tmpdir || strlen(tmpdir) == 0 || stat(tmpdir, &st) != 0)
+ tmpdir = "/tmp";
+
+ return string(tmpdir);
+}
diff --git a/apt-pkg/contrib/fileutl.h b/apt-pkg/contrib/fileutl.h
index 9402c8f75..58a90e83b 100644
--- a/apt-pkg/contrib/fileutl.h
+++ b/apt-pkg/contrib/fileutl.h
@@ -26,6 +26,7 @@
#include <string>
#include <vector>
+#include <set>
#include <zlib.h>
@@ -164,6 +165,8 @@ bool DirectoryExists(std::string const &Path) __attrib_const;
bool CreateDirectory(std::string const &Parent, std::string const &Path);
time_t GetModificationTime(std::string const &Path);
+std::string GetTempDir();
+
/** \brief Ensure the existence of the given Path
*
* \param Parent directory of the Path directory - a trailing
@@ -182,6 +185,8 @@ void SetCloseExec(int Fd,bool Close);
void SetNonBlock(int Fd,bool Block);
bool WaitFd(int Fd,bool write = false,unsigned long timeout = 0);
pid_t ExecFork();
+pid_t ExecFork(std::set<int> keep_fds);
+void MergeKeepFdsFromConfiguration(std::set<int> &keep_fds);
bool ExecWait(pid_t Pid,const char *Name,bool Reap = false);
// check if the given file starts with a PGP cleartext signature
diff --git a/apt-pkg/contrib/gpgv.cc b/apt-pkg/contrib/gpgv.cc
index f47e7ea48..9de227062 100644
--- a/apt-pkg/contrib/gpgv.cc
+++ b/apt-pkg/contrib/gpgv.cc
@@ -10,6 +10,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
+#include <unistd.h>
#include<apt-pkg/configuration.h>
#include<apt-pkg/error.h>
@@ -21,16 +22,9 @@
/*}}}*/
static char * GenerateTemporaryFileTemplate(const char *basename) /*{{{*/
{
- const char *tmpdir = getenv("TMPDIR");
-#ifdef P_tmpdir
- if (!tmpdir)
- tmpdir = P_tmpdir;
-#endif
- if (!tmpdir)
- tmpdir = "/tmp";
-
std::string out;
- strprintf(out, "%s/%s.XXXXXX", tmpdir, basename);
+ std::string tmpdir = GetTempDir();
+ strprintf(out, "%s/%s.XXXXXX", tmpdir.c_str(), basename);
return strdup(out.c_str());
}
/*}}}*/
@@ -109,12 +103,12 @@ void ExecGPGV(std::string const &File, std::string const &FileGPG,
}
}
+ enum { DETACHED, CLEARSIGNED } releaseSignature = (FileGPG != File) ? DETACHED : CLEARSIGNED;
std::vector<std::string> dataHeader;
char * sig = NULL;
char * data = NULL;
- // file with detached signature
- if (FileGPG != File)
+ if (releaseSignature == DETACHED)
{
Args.push_back(FileGPG.c_str());
Args.push_back(File.c_str());
@@ -187,7 +181,7 @@ void ExecGPGV(std::string const &File, std::string const &FileGPG,
putenv((char *)"LC_MESSAGES=");
}
- if (FileGPG != File)
+ if (releaseSignature == DETACHED)
{
execvp(gpgvpath.c_str(), (char **) &Args[0]);
ioprintf(std::cerr, "Couldn't execute %s to check %s", Args[0], File.c_str());
@@ -266,8 +260,7 @@ bool SplitClearSignedFile(std::string const &InFile, FileFd * const ContentFile,
char *buf = NULL;
size_t buf_size = 0;
- ssize_t line_len = 0;
- while ((line_len = getline(&buf, &buf_size, in)) != -1)
+ while (getline(&buf, &buf_size, in) != -1)
{
_strrstrip(buf);
if (found_message_start == false)
@@ -361,7 +354,7 @@ bool OpenMaybeClearSignedFile(std::string const &ClearSignedFileName, FileFd &Me
return _error->Error("Couldn't open temporary file to work with %s", ClearSignedFileName.c_str());
_error->PushToStack();
- bool const splitDone = SplitClearSignedFile(ClearSignedFileName.c_str(), &MessageFile, NULL, NULL);
+ bool const splitDone = SplitClearSignedFile(ClearSignedFileName, &MessageFile, NULL, NULL);
bool const errorDone = _error->PendingError();
_error->MergeWithStack();
if (splitDone == false)
diff --git a/apt-pkg/contrib/gpgv.h b/apt-pkg/contrib/gpgv.h
index 45f069058..1d79a52ac 100644
--- a/apt-pkg/contrib/gpgv.h
+++ b/apt-pkg/contrib/gpgv.h
@@ -29,7 +29,7 @@
* for reading. Use #OpenMaybeClearSignedFile to access the message
* instead to ensure you are only reading signed data.
*
- * The method does not return, but has some noteable exit-codes:
+ * The method does not return, but has some notable exit-codes:
* 111 signals an internal error like the inability to execute gpgv,
* 112 indicates a clear-signed file which doesn't include a message,
* which can happen if APT is run while on a network requiring
diff --git a/apt-pkg/contrib/hashes.cc b/apt-pkg/contrib/hashes.cc
index e1a431823..890573d9c 100644
--- a/apt-pkg/contrib/hashes.cc
+++ b/apt-pkg/contrib/hashes.cc
@@ -55,6 +55,26 @@ HashString::HashString(std::string StringedHash) /*{{{*/
/*}}}*/
bool HashString::VerifyFile(std::string filename) const /*{{{*/
{
+ std::string fileHash = GetHashForFile(filename);
+
+ if(_config->FindB("Debug::Hashes",false) == true)
+ std::clog << "HashString::VerifyFile: got: " << fileHash << " expected: " << toStr() << std::endl;
+
+ return (fileHash == Hash);
+}
+ /*}}}*/
+bool HashString::FromFile(std::string filename) /*{{{*/
+{
+ // pick the strongest hash
+ if (Type == "")
+ Type = _SupportedHashes[0];
+
+ Hash = GetHashForFile(filename);
+ return true;
+}
+ /*}}}*/
+std::string HashString::GetHashForFile(std::string filename) const /*{{{*/
+{
std::string fileHash;
FileFd Fd(filename, FileFd::ReadOnly);
@@ -84,10 +104,7 @@ bool HashString::VerifyFile(std::string filename) const /*{{{*/
}
Fd.Close();
- if(_config->FindB("Debug::Hashes",false) == true)
- std::clog << "HashString::VerifyFile: got: " << fileHash << " expected: " << toStr() << std::endl;
-
- return (fileHash == Hash);
+ return fileHash;
}
/*}}}*/
const char** HashString::SupportedHashes()
@@ -112,13 +129,12 @@ bool Hashes::AddFD(int const Fd,unsigned long long Size, bool const addMD5,
bool const addSHA1, bool const addSHA256, bool const addSHA512)
{
unsigned char Buf[64*64];
- ssize_t Res = 0;
- int ToEOF = (Size == 0);
+ bool const ToEOF = (Size == 0);
while (Size != 0 || ToEOF)
{
unsigned long long n = sizeof(Buf);
if (!ToEOF) n = std::min(Size, n);
- Res = read(Fd,Buf,n);
+ ssize_t const Res = read(Fd,Buf,n);
if (Res < 0 || (!ToEOF && Res != (ssize_t) n)) // error, or short read
return false;
if (ToEOF && Res == 0) // EOF
diff --git a/apt-pkg/contrib/hashes.h b/apt-pkg/contrib/hashes.h
index 0c0b6c6a7..0a8bcd259 100644
--- a/apt-pkg/contrib/hashes.h
+++ b/apt-pkg/contrib/hashes.h
@@ -36,7 +36,10 @@ class HashString
protected:
std::string Type;
std::string Hash;
- static const char * _SupportedHashes[10];
+ static const char* _SupportedHashes[10];
+
+ // internal helper
+ std::string GetHashForFile(std::string filename) const;
public:
HashString(std::string Type, std::string Hash);
@@ -49,6 +52,10 @@ class HashString
// verify the given filename against the currently loaded hash
bool VerifyFile(std::string filename) const;
+ // generate a hash string from the given filename
+ bool FromFile(std::string filename);
+
+
// helper
std::string toStr() const; // convert to str as "type:hash"
bool empty() const;
diff --git a/apt-pkg/contrib/hashsum.cc b/apt-pkg/contrib/hashsum.cc
index 289e43aa4..d02177724 100644
--- a/apt-pkg/contrib/hashsum.cc
+++ b/apt-pkg/contrib/hashsum.cc
@@ -9,13 +9,12 @@
/* */
bool SummationImplementation::AddFD(int const Fd, unsigned long long Size) {
unsigned char Buf[64 * 64];
- ssize_t Res = 0;
- int ToEOF = (Size == 0);
+ bool const ToEOF = (Size == 0);
while (Size != 0 || ToEOF)
{
unsigned long long n = sizeof(Buf);
if (!ToEOF) n = std::min(Size, n);
- Res = read(Fd, Buf, n);
+ ssize_t const Res = read(Fd, Buf, n);
if (Res < 0 || (!ToEOF && Res != (ssize_t) n)) // error, or short read
return false;
if (ToEOF && Res == 0) // EOF
@@ -27,7 +26,7 @@ bool SummationImplementation::AddFD(int const Fd, unsigned long long Size) {
}
bool SummationImplementation::AddFD(FileFd &Fd, unsigned long long Size) {
unsigned char Buf[64 * 64];
- bool ToEOF = (Size == 0);
+ bool const ToEOF = (Size == 0);
while (Size != 0 || ToEOF)
{
unsigned long long n = sizeof(Buf);
diff --git a/apt-pkg/contrib/macros.h b/apt-pkg/contrib/macros.h
index 62e7b65db..e53d01b8f 100644
--- a/apt-pkg/contrib/macros.h
+++ b/apt-pkg/contrib/macros.h
@@ -44,7 +44,7 @@
#define _boundv(a,b,c) b = _bound(a,b,c)
#define ABS(a) (((a) < (0)) ?-(a) : (a))
-/* Usefull count macro, use on an array of things and it will return the
+/* Useful count macro, use on an array of things and it will return the
number of items in the array */
#define _count(a) (sizeof(a)/sizeof(a[0]))
diff --git a/apt-pkg/contrib/md5.h b/apt-pkg/contrib/md5.h
index 25631b166..195455645 100644
--- a/apt-pkg/contrib/md5.h
+++ b/apt-pkg/contrib/md5.h
@@ -10,7 +10,7 @@
store a MD5Sum in 16 bytes of memory.
A MD5Sum is used to generate a (hopefully) unique 16 byte number for a
- block of data. This can be used to gaurd against corruption of a file.
+ block of data. This can be used to guard against corruption of a file.
MD5 should not be used for tamper protection, use SHA or something more
secure.
diff --git a/apt-pkg/contrib/mmap.cc b/apt-pkg/contrib/mmap.cc
index a176da636..51e8eb30f 100644
--- a/apt-pkg/contrib/mmap.cc
+++ b/apt-pkg/contrib/mmap.cc
@@ -352,6 +352,12 @@ unsigned long DynamicMMap::RawAllocate(unsigned long long Size,unsigned long Aln
size in the file. */
unsigned long DynamicMMap::Allocate(unsigned long ItemSize)
{
+ if (unlikely(ItemSize == 0))
+ {
+ _error->Fatal("Can't allocate an item of size zero");
+ return 0;
+ }
+
// Look for a matching pool entry
Pool *I;
Pool *Empty = 0;
@@ -412,7 +418,7 @@ unsigned long DynamicMMap::WriteString(const char *String,
unsigned long const Result = RawAllocate(Len+1,0);
- if (Result == 0 && _error->PendingError())
+ if (Base == NULL || (Result == 0 && _error->PendingError()))
return 0;
memcpy((char *)Base + Result,String,Len);
diff --git a/apt-pkg/contrib/mmap.h b/apt-pkg/contrib/mmap.h
index 6bd4a2d86..c1dfedf6d 100644
--- a/apt-pkg/contrib/mmap.h
+++ b/apt-pkg/contrib/mmap.h
@@ -6,7 +6,7 @@
MMap Class - Provides 'real' mmap or a faked mmap using read().
The purpose of this code is to provide a generic way for clients to
- access the mmap function. In enviroments that do not support mmap
+ access the mmap function. In environments that do not support mmap
from file fd's this function will use read and normal allocated
memory.
@@ -15,7 +15,7 @@
The DynamicMMap class is used to help the on-disk data structure
generators. It provides a large allocated workspace and members
- to allocate space from the workspace in an effecient fashion.
+ to allocate space from the workspace in an efficient fashion.
This source is placed in the Public Domain, do with it what you will
It was originally written by Jason Gunthorpe.
diff --git a/apt-pkg/contrib/progress.h b/apt-pkg/contrib/progress.h
index 3a6943aee..f7fbc9ccf 100644
--- a/apt-pkg/contrib/progress.h
+++ b/apt-pkg/contrib/progress.h
@@ -7,7 +7,7 @@
This class allows lengthy operations to communicate their progress
to the GUI. The progress model is simple and is not designed to handle
- the complex case of the multi-activity aquire class.
+ the complex case of the multi-activity acquire class.
The model is based on the concept of an overall operation consisting
of a series of small sub operations. Each sub operation has it's own
diff --git a/apt-pkg/contrib/sha2_internal.cc b/apt-pkg/contrib/sha2_internal.cc
index f84fb761c..bb2560252 100644
--- a/apt-pkg/contrib/sha2_internal.cc
+++ b/apt-pkg/contrib/sha2_internal.cc
@@ -65,7 +65,7 @@
* Please make sure that your system defines BYTE_ORDER. If your
* architecture is little-endian, make sure it also defines
* LITTLE_ENDIAN and that the two (BYTE_ORDER and LITTLE_ENDIAN) are
- * equivilent.
+ * equivalent.
*
* If your system does not define the above, then you can do so by
* hand like this:
diff --git a/apt-pkg/contrib/strutl.cc b/apt-pkg/contrib/strutl.cc
index d06637155..d4f53ea3a 100644
--- a/apt-pkg/contrib/strutl.cc
+++ b/apt-pkg/contrib/strutl.cc
@@ -36,7 +36,30 @@
using namespace std;
/*}}}*/
+// Strip - Remove white space from the front and back of a string /*{{{*/
+// ---------------------------------------------------------------------
+namespace APT {
+ namespace String {
+std::string Strip(const std::string &s)
+{
+ size_t start = s.find_first_not_of(" \t\n");
+ // only whitespace
+ if (start == string::npos)
+ return "";
+ size_t end = s.find_last_not_of(" \t\n");
+ return s.substr(start, end-start+1);
+}
+
+bool Endswith(const std::string &s, const std::string &end)
+{
+ if (end.size() > s.size())
+ return false;
+ return (s.substr(s.size() - end.size(), s.size()) == end);
+}
+}
+}
+ /*}}}*/
// UTF8ToCodeset - Convert some UTF-8 string for some codeset /*{{{*/
// ---------------------------------------------------------------------
/* This is handy to use before display some information for enduser */
@@ -403,7 +426,7 @@ string TimeToStr(unsigned long Sec)
/*}}}*/
// SubstVar - Substitute a string for another string /*{{{*/
// ---------------------------------------------------------------------
-/* This replaces all occurances of Subst with Contents in Str. */
+/* This replaces all occurrences of Subst with Contents in Str. */
string SubstVar(const string &Str,const string &Subst,const string &Contents)
{
string::size_type Pos = 0;
@@ -903,7 +926,7 @@ bool FTPMDTMStrToTime(const char* const str,time_t &time)
/*}}}*/
// StrToTime - Converts a string into a time_t /*{{{*/
// ---------------------------------------------------------------------
-/* This handles all 3 populare time formats including RFC 1123, RFC 1036
+/* This handles all 3 popular time formats including RFC 1123, RFC 1036
and the C library asctime format. It requires the GNU library function
'timegm' to convert a struct tm in UTC to a time_t. For some bizzar
reason the C library does not provide any such function :< This also
@@ -943,6 +966,8 @@ bool StrToTime(const string &Val,time_t &Result)
Tm.tm_isdst = 0;
if (Month[0] != 0)
Tm.tm_mon = MonthConv(Month);
+ else
+ Tm.tm_mon = 0; // we don't have a month, so pick something
Tm.tm_year -= 1900;
// Convert to local time and then to GMT
@@ -1116,6 +1141,37 @@ vector<string> VectorizeString(string const &haystack, char const &split)
return exploded;
}
/*}}}*/
+// StringSplit - split a string into a string vector by token /*{{{*/
+// ---------------------------------------------------------------------
+/* See header for details.
+ */
+vector<string> StringSplit(std::string const &s, std::string const &sep,
+ unsigned int maxsplit)
+{
+ vector<string> split;
+ size_t start, pos;
+
+ // no seperator given, this is bogus
+ if(sep.size() == 0)
+ return split;
+
+ start = pos = 0;
+ while (pos != string::npos)
+ {
+ pos = s.find(sep, start);
+ split.push_back(s.substr(start, pos-start));
+
+ // if maxsplit is reached, the remaining string is the last item
+ if(split.size() >= maxsplit)
+ {
+ split[split.size()-1] = s.substr(start);
+ break;
+ }
+ start = pos+sep.size();
+ }
+ return split;
+}
+ /*}}}*/
// RegexChoice - Simple regex list/list matcher /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -1257,7 +1313,7 @@ string StripEpoch(const string &VerStr)
// tolower_ascii - tolower() function that ignores the locale /*{{{*/
// ---------------------------------------------------------------------
/* This little function is the most called method we have and tries
- therefore to do the absolut minimum - and is noteable faster than
+ therefore to do the absolut minimum - and is notable faster than
standard tolower/toupper and as a bonus avoids problems with different
locales - we only operate on ascii chars anyway. */
int tolower_ascii(int const c)
@@ -1268,9 +1324,9 @@ int tolower_ascii(int const c)
}
/*}}}*/
-// CheckDomainList - See if Host is in a , seperate list /*{{{*/
+// CheckDomainList - See if Host is in a , separate list /*{{{*/
// ---------------------------------------------------------------------
-/* The domain list is a comma seperate list of domains that are suffix
+/* The domain list is a comma separate list of domains that are suffix
matched against the argument */
bool CheckDomainList(const string &Host,const string &List)
{
diff --git a/apt-pkg/contrib/strutl.h b/apt-pkg/contrib/strutl.h
index 530896141..8d746f10e 100644
--- a/apt-pkg/contrib/strutl.h
+++ b/apt-pkg/contrib/strutl.h
@@ -17,7 +17,7 @@
#define STRUTL_H
-
+#include <limits>
#include <stdlib.h>
#include <string>
#include <cstring>
@@ -33,6 +33,14 @@ using std::vector;
using std::ostream;
#endif
+namespace APT {
+ namespace String {
+ std::string Strip(const std::string &s);
+ bool Endswith(const std::string &s, const std::string &ending);
+ };
+};
+
+
bool UTF8ToCodeset(const char *codeset, const std::string &orig, std::string *dest);
char *_strstrip(char *String);
char *_strrstrip(char *String); // right strip only
@@ -62,9 +70,32 @@ bool StrToNum(const char *Str,unsigned long &Res,unsigned Len,unsigned Base = 0)
bool StrToNum(const char *Str,unsigned long long &Res,unsigned Len,unsigned Base = 0);
bool Base256ToNum(const char *Str,unsigned long &Res,unsigned int Len);
bool Hex2Num(const std::string &Str,unsigned char *Num,unsigned int Length);
+
+// input changing string split
bool TokSplitString(char Tok,char *Input,char **List,
unsigned long ListMax);
+
+// split a given string by a char
std::vector<std::string> VectorizeString(std::string const &haystack, char const &split) __attrib_const;
+
+/* \brief Return a vector of strings from string "input" where "sep"
+ * is used as the delimiter string.
+ *
+ * \param input The input string.
+ *
+ * \param sep The seperator to use.
+ *
+ * \param maxsplit (optional) The maximum amount of splitting that
+ * should be done .
+ *
+ * The optional "maxsplit" argument can be used to limit the splitting,
+ * if used the string is only split on maxsplit places and the last
+ * item in the vector contains the remainder string.
+ */
+std::vector<std::string> StringSplit(std::string const &input,
+ std::string const &sep,
+ unsigned int maxsplit=std::numeric_limits<unsigned int>::max()) __attrib_const;
+
void ioprintf(std::ostream &out,const char *format,...) __like_printf(2);
void strprintf(std::string &out,const char *format,...) __like_printf(2);
char *safe_snprintf(char *Buffer,char *End,const char *Format,...) __like_printf(3);
diff --git a/apt-pkg/deb/deblistparser.cc b/apt-pkg/deb/deblistparser.cc
index 87aab6ee2..acdcc4554 100644
--- a/apt-pkg/deb/deblistparser.cc
+++ b/apt-pkg/deb/deblistparser.cc
@@ -635,7 +635,7 @@ bool debListParser::ParseDepends(pkgCache::VerIterator &Ver,
string Version;
unsigned int Op;
- Start = ParseDepends(Start,Stop,Package,Version,Op,false,!MultiArchEnabled);
+ Start = ParseDepends(Start, Stop, Package, Version, Op, false, false);
if (Start == 0)
return _error->Error("Problem parsing dependency %s",Tag);
size_t const found = Package.rfind(':');
@@ -717,9 +717,7 @@ bool debListParser::ParseProvides(pkgCache::VerIterator &Ver)
}
}
- if (MultiArchEnabled == false)
- return true;
- else if ((Ver->MultiArch & pkgCache::Version::Allowed) == pkgCache::Version::Allowed)
+ if ((Ver->MultiArch & pkgCache::Version::Allowed) == pkgCache::Version::Allowed)
{
string const Package = string(Ver.ParentPkg().Name()).append(":").append("any");
return NewProvidesAllArch(Ver, Package, Ver.VerStr());
@@ -760,7 +758,7 @@ bool debListParser::GrabWord(string Word,WordList *List,unsigned char &Out)
/*}}}*/
// ListParser::Step - Move to the next section in the file /*{{{*/
// ---------------------------------------------------------------------
-/* This has to be carefull to only process the correct architecture */
+/* This has to be careful to only process the correct architecture */
bool debListParser::Step()
{
iOffset = Tags.Offset();
diff --git a/apt-pkg/deb/debmetaindex.cc b/apt-pkg/deb/debmetaindex.cc
index b597b6f3c..504877558 100644
--- a/apt-pkg/deb/debmetaindex.cc
+++ b/apt-pkg/deb/debmetaindex.cc
@@ -1,4 +1,3 @@
-// ijones, walters
#include <config.h>
#include <apt-pkg/debmetaindex.h>
@@ -72,6 +71,22 @@ string debReleaseIndex::MetaIndexURI(const char *Type) const
return Res;
}
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+std::string debReleaseIndex::LocalFileName() const
+{
+ // see if we have a InRelease file
+ std::string PathInRelease = MetaIndexFile("InRelease");
+ if (FileExists(PathInRelease))
+ return PathInRelease;
+
+ // and if not return the normal one
+ if (FileExists(PathInRelease))
+ return MetaIndexFile("Release");
+
+ return "";
+}
+#endif
+
string debReleaseIndex::IndexURISuffix(const char *Type, string const &Section, string const &Arch) const
{
string Res ="";
diff --git a/apt-pkg/deb/debmetaindex.h b/apt-pkg/deb/debmetaindex.h
index b9ecab97c..cef8d68f7 100644
--- a/apt-pkg/deb/debmetaindex.h
+++ b/apt-pkg/deb/debmetaindex.h
@@ -3,6 +3,7 @@
#define PKGLIB_DEBMETAINDEX_H
#include <apt-pkg/metaindex.h>
+#include <apt-pkg/init.h>
#include <map>
#include <string>
@@ -39,9 +40,15 @@ class debReleaseIndex : public metaIndex {
virtual bool GetIndexes(pkgAcquire *Owner, bool const &GetAll=false) const;
std::vector <struct IndexTarget *>* ComputeIndexTargets() const;
std::string Info(const char *Type, std::string const &Section, std::string const &Arch="") const;
+
std::string MetaIndexInfo(const char *Type) const;
std::string MetaIndexFile(const char *Types) const;
std::string MetaIndexURI(const char *Type) const;
+
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+ virtual std::string LocalFileName() const;
+#endif
+
std::string IndexURI(const char *Type, std::string const &Section, std::string const &Arch="native") const;
std::string IndexURISuffix(const char *Type, std::string const &Section, std::string const &Arch="native") const;
std::string SourceIndexURI(const char *Type, const std::string &Section) const;
diff --git a/apt-pkg/deb/debsrcrecords.h b/apt-pkg/deb/debsrcrecords.h
index 5d2a67f4f..a8fb465bb 100644
--- a/apt-pkg/deb/debsrcrecords.h
+++ b/apt-pkg/deb/debsrcrecords.h
@@ -30,7 +30,7 @@ class debSrcRecordParser : public pkgSrcRecords::Parser
public:
- virtual bool Restart() {return Tags.Jump(Sect,0);};
+ virtual bool Restart() {return Jump(0);};
virtual bool Step() {iOffset = Tags.Offset(); return Tags.Step(Sect);};
virtual bool Jump(unsigned long const &Off) {iOffset = Off; return Tags.Jump(Sect,Off);};
@@ -50,8 +50,8 @@ class debSrcRecordParser : public pkgSrcRecords::Parser
virtual bool Files(std::vector<pkgSrcRecords::File> &F);
debSrcRecordParser(std::string const &File,pkgIndexFile const *Index)
- : Parser(Index), Fd(File,FileFd::ReadOnly, FileFd::Extension), Tags(&Fd,102400),
- Buffer(NULL) {}
+ : Parser(Index), Fd(File,FileFd::ReadOnly, FileFd::Extension), Tags(&Fd,102400),
+ iOffset(0), Buffer(NULL) {}
virtual ~debSrcRecordParser();
};
diff --git a/apt-pkg/deb/debsystem.cc b/apt-pkg/deb/debsystem.cc
index 7ed6936c3..b95ff15df 100644
--- a/apt-pkg/deb/debsystem.cc
+++ b/apt-pkg/deb/debsystem.cc
@@ -193,7 +193,7 @@ bool debSystem::Initialize(Configuration &Cnf)
/*}}}*/
// System::ArchiveSupported - Is a file format supported /*{{{*/
// ---------------------------------------------------------------------
-/* The standard name for a deb is 'deb'.. There are no seperate versions
+/* The standard name for a deb is 'deb'.. There are no separate versions
of .deb to worry about.. */
bool debSystem::ArchiveSupported(const char *Type)
{
diff --git a/apt-pkg/deb/debversion.cc b/apt-pkg/deb/debversion.cc
index 140561262..74e2552ff 100644
--- a/apt-pkg/deb/debversion.cc
+++ b/apt-pkg/deb/debversion.cc
@@ -116,7 +116,7 @@ int debVersioningSystem::CmpFragment(const char *A,const char *AEnd,
return 1;
}
- // Shouldnt happen
+ // Shouldn't happen
return 1;
}
/*}}}*/
@@ -221,7 +221,7 @@ bool debVersioningSystem::CheckDep(const char *PkgVer,
if (PkgVer == DepVer)
return Op == pkgCache::Dep::Equals || Op == pkgCache::Dep::LessEq || Op == pkgCache::Dep::GreaterEq;
- // Perform the actual comparision.
+ // Perform the actual comparison.
int const Res = CmpVersion(PkgVer, DepVer);
switch (Op)
{
diff --git a/apt-pkg/deb/dpkgpm.cc b/apt-pkg/deb/dpkgpm.cc
index b069f22f7..b975754b0 100644
--- a/apt-pkg/deb/dpkgpm.cc
+++ b/apt-pkg/deb/dpkgpm.cc
@@ -19,6 +19,7 @@
#include <apt-pkg/fileutl.h>
#include <apt-pkg/cachefile.h>
#include <apt-pkg/packagemanager.h>
+#include <apt-pkg/install-progress.h>
#include <unistd.h>
#include <stdlib.h>
@@ -37,11 +38,13 @@
#include <map>
#include <pwd.h>
#include <grp.h>
+#include <iomanip>
#include <termios.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <pty.h>
+#include <stdio.h>
#include <apti18n.h>
/*}}}*/
@@ -52,10 +55,14 @@ class pkgDPkgPMPrivate
{
public:
pkgDPkgPMPrivate() : stdin_is_dev_null(false), dpkgbuf_pos(0),
- term_out(NULL), history_out(NULL)
+ term_out(NULL), history_out(NULL),
+ progress(NULL), master(-1), slave(-1)
{
dpkgbuf[0] = '\0';
}
+ ~pkgDPkgPMPrivate()
+ {
+ }
bool stdin_is_dev_null;
// the buffer we use for the dpkg status-fd reading
char dpkgbuf[1024];
@@ -63,6 +70,17 @@ public:
FILE *term_out;
FILE *history_out;
string dpkg_error;
+ APT::Progress::PackageManager *progress;
+
+ // pty stuff
+ struct termios tt;
+ int master;
+ int slave;
+
+ // signals
+ sigset_t sigmask;
+ sigset_t original_sigmask;
+
};
namespace
@@ -125,6 +143,20 @@ ionice(int PID)
return ExecWait(Process, "ionice");
}
+static std::string getDpkgExecutable()
+{
+ string Tmp = _config->Find("Dir::Bin::dpkg","dpkg");
+ string const dpkgChrootDir = _config->FindDir("DPkg::Chroot-Directory", "/");
+ size_t dpkgChrootLen = dpkgChrootDir.length();
+ if (dpkgChrootDir != "/" && Tmp.find(dpkgChrootDir) == 0)
+ {
+ if (dpkgChrootDir[dpkgChrootLen - 1] == '/')
+ --dpkgChrootLen;
+ Tmp = Tmp.substr(dpkgChrootLen);
+ }
+ return Tmp;
+}
+
// dpkgChrootDirectory - chrooting for dpkg if needed /*{{{*/
static void dpkgChrootDirectory()
{
@@ -170,7 +202,7 @@ pkgCache::VerIterator FindNowVersion(const pkgCache::PkgIterator &Pkg)
// ---------------------------------------------------------------------
/* */
pkgDPkgPM::pkgDPkgPM(pkgDepCache *Cache)
- : pkgPackageManager(Cache), PackagesDone(0), PackagesTotal(0)
+ : pkgPackageManager(Cache), pkgFailures(0), PackagesDone(0), PackagesTotal(0)
{
d = new pkgDPkgPMPrivate();
}
@@ -385,17 +417,21 @@ bool pkgDPkgPM::RunScriptsWithPkgs(const char *Cnf)
unsigned int InfoFD = _config->FindI(OptSec + "::InfoFD", STDIN_FILENO);
// Create the pipes
+ std::set<int> KeepFDs;
+ MergeKeepFdsFromConfiguration(KeepFDs);
int Pipes[2];
if (pipe(Pipes) != 0)
return _error->Errno("pipe","Failed to create IPC pipe to subprocess");
if (InfoFD != (unsigned)Pipes[0])
SetCloseExec(Pipes[0],true);
else
- _config->Set("APT::Keep-Fds::", Pipes[0]);
+ KeepFDs.insert(Pipes[0]);
+
+
SetCloseExec(Pipes[1],true);
// Purified Fork for running the script
- pid_t Process = ExecFork();
+ pid_t Process = ExecFork(KeepFDs);
if (Process == 0)
{
// Setup the FDs
@@ -417,8 +453,6 @@ bool pkgDPkgPM::RunScriptsWithPkgs(const char *Cnf)
execv(Args[0],(char **)Args);
_exit(100);
}
- if (InfoFD == (unsigned)Pipes[0])
- _config->Clear("APT::Keep-Fds", Pipes[0]);
close(Pipes[0]);
FILE *F = fdopen(Pipes[1],"w");
if (F == 0)
@@ -501,156 +535,184 @@ void pkgDPkgPM::DoTerminalPty(int master)
// ---------------------------------------------------------------------
/*
*/
-void pkgDPkgPM::ProcessDpkgStatusLine(int OutStatusFd, char *line)
+void pkgDPkgPM::ProcessDpkgStatusLine(char *line)
{
bool const Debug = _config->FindB("Debug::pkgDPkgProgressReporting",false);
- // the status we output
- ostringstream status;
-
if (Debug == true)
std::clog << "got from dpkg '" << line << "'" << std::endl;
-
/* dpkg sends strings like this:
- 'status: <pkg>: <pkg qstate>'
- errors look like this:
- 'status: /var/cache/apt/archives/krecipes_0.8.1-0ubuntu1_i386.deb : error : trying to overwrite `/usr/share/doc/kde/HTML/en/krecipes/krectip.png', which is also in package krecipes-data
- and conffile-prompt like this
- 'status: conffile-prompt: conffile : 'current-conffile' 'new-conffile' useredited distedited
+ 'status: <pkg>: <pkg qstate>'
+ 'status: <pkg>:<arch>: <pkg qstate>'
- Newer versions of dpkg sent also:
- 'processing: install: pkg'
- 'processing: configure: pkg'
- 'processing: remove: pkg'
- 'processing: purge: pkg'
- 'processing: disappear: pkg'
- 'processing: trigproc: trigger'
-
+ 'processing: {install,configure,remove,purge,disappear,trigproc}: pkg'
+ 'processing: {install,configure,remove,purge,disappear,trigproc}: trigger'
*/
- char* list[6];
- // dpkg sends multiline error messages sometimes (see
- // #374195 for a example. we should support this by
- // either patching dpkg to not send multiline over the
- // statusfd or by rewriting the code here to deal with
- // it. for now we just ignore it and not crash
- TokSplitString(':', line, list, sizeof(list)/sizeof(list[0]));
- if( list[0] == NULL || list[1] == NULL || list[2] == NULL)
+
+ // we need to split on ": " (note the appended space) as the ':' is
+ // part of the pkgname:arch information that dpkg sends
+ //
+ // A dpkg error message may contain additional ":" (like
+ // "failed in buffer_write(fd) (10, ret=-1): backend dpkg-deb ..."
+ // so we need to ensure to not split too much
+ std::vector<std::string> list = StringSplit(line, ": ", 4);
+ if(list.size() < 3)
{
if (Debug == true)
std::clog << "ignoring line: not enough ':'" << std::endl;
return;
}
- const char* const pkg = list[1];
- const char* action = _strstrip(list[2]);
+
+ // build the (prefix, pkgname, action) tuple, position of this
+ // is different for "processing" or "status" messages
+ std::string prefix = APT::String::Strip(list[0]);
+ std::string pkgname;
+ std::string action;
+
+ // "processing" has the form "processing: action: pkg or trigger"
+ // with action = ["install", "configure", "remove", "purge", "disappear",
+ // "trigproc"]
+ if (prefix == "processing")
+ {
+ pkgname = APT::String::Strip(list[2]);
+ action = APT::String::Strip(list[1]);
+ }
+ // "status" has the form: "status: pkg: state"
+ // with state in ["half-installed", "unpacked", "half-configured",
+ // "installed", "config-files", "not-installed"]
+ else if (prefix == "status")
+ {
+ pkgname = APT::String::Strip(list[1]);
+ action = APT::String::Strip(list[2]);
+ } else {
+ if (Debug == true)
+ std::clog << "unknown prefix '" << prefix << "'" << std::endl;
+ return;
+ }
+
+
+ /* handle the special cases first:
+
+ errors look like this:
+ 'status: /var/cache/apt/archives/krecipes_0.8.1-0ubuntu1_i386.deb : error : trying to overwrite `/usr/share/doc/kde/HTML/en/krecipes/krectip.png', which is also in package krecipes-data
+ and conffile-prompt like this
+ 'status:/etc/compiz.conf/compiz.conf : conffile-prompt: 'current-conffile' 'new-conffile' useredited distedited
+ */
+ if (prefix == "status")
+ {
+ if(action == "error")
+ {
+ d->progress->Error(list[1], PackagesDone, PackagesTotal,
+ list[3]);
+ pkgFailures++;
+ WriteApportReport(list[1].c_str(), list[3].c_str());
+ return;
+ }
+ else if(action == "conffile-prompt")
+ {
+ d->progress->ConffilePrompt(list[1], PackagesDone, PackagesTotal,
+ list[3]);
+ return;
+ }
+ }
+
+ // at this point we know that we should have a valid pkgname, so build all
+ // the info from it
+
+ // dpkg does not send always send "pkgname:arch" so we add it here
+ // if needed
+ if (pkgname.find(":") == std::string::npos)
+ {
+ // find the package in the group that is in a touched by dpkg
+ // if there are multiple dpkg will send us a full pkgname:arch
+ pkgCache::GrpIterator Grp = Cache.FindGrp(pkgname);
+ if (Grp.end() == false)
+ {
+ pkgCache::PkgIterator P = Grp.PackageList();
+ for (; P.end() != true; P = Grp.NextPkg(P))
+ {
+ if(Cache[P].Mode != pkgDepCache::ModeKeep)
+ {
+ pkgname = P.FullName();
+ break;
+ }
+ }
+ }
+ }
+
+ const char* const pkg = pkgname.c_str();
+ std::string short_pkgname = StringSplit(pkgname, ":")[0];
+ std::string arch = "";
+ if (pkgname.find(":") != string::npos)
+ arch = StringSplit(pkgname, ":")[1];
+ std::string i18n_pkgname = pkgname;
+ if (arch.size() != 0)
+ strprintf(i18n_pkgname, "%s (%s)", short_pkgname.c_str(), arch.c_str());
// 'processing' from dpkg looks like
// 'processing: action: pkg'
- if(strncmp(list[0], "processing", strlen("processing")) == 0)
+ if(prefix == "processing")
{
- char s[200];
- const char* const pkg_or_trigger = _strstrip(list[2]);
- action = _strstrip( list[1]);
const std::pair<const char *, const char *> * const iter =
std::find_if(PackageProcessingOpsBegin,
PackageProcessingOpsEnd,
- MatchProcessingOp(action));
+ MatchProcessingOp(action.c_str()));
if(iter == PackageProcessingOpsEnd)
{
if (Debug == true)
std::clog << "ignoring unknown action: " << action << std::endl;
return;
}
- snprintf(s, sizeof(s), _(iter->second), pkg_or_trigger);
-
- status << "pmstatus:" << pkg_or_trigger
- << ":" << (PackagesDone/float(PackagesTotal)*100.0)
- << ":" << s
- << endl;
- if(OutStatusFd > 0)
- FileFd::Write(OutStatusFd, status.str().c_str(), status.str().size());
- if (Debug == true)
- std::clog << "send: '" << status.str() << "'" << endl;
-
- if (strncmp(action, "disappear", strlen("disappear")) == 0)
- handleDisappearAction(pkg_or_trigger);
- return;
- }
-
- if(strncmp(action,"error",strlen("error")) == 0)
- {
- // urgs, sometime has ":" in its error string so that we
- // end up with the error message split between list[3]
- // and list[4], e.g. the message:
- // "failed in buffer_write(fd) (10, ret=-1): backend dpkg-deb ..."
- // concat them again
- if( list[4] != NULL )
- list[3][strlen(list[3])] = ':';
-
- status << "pmerror:" << list[1]
- << ":" << (PackagesDone/float(PackagesTotal)*100.0)
- << ":" << list[3]
- << endl;
- if(OutStatusFd > 0)
- FileFd::Write(OutStatusFd, status.str().c_str(), status.str().size());
- if (Debug == true)
- std::clog << "send: '" << status.str() << "'" << endl;
- pkgFailures++;
- WriteApportReport(list[1], list[3]);
- return;
- }
- else if(strncmp(action,"conffile",strlen("conffile")) == 0)
- {
- status << "pmconffile:" << list[1]
- << ":" << (PackagesDone/float(PackagesTotal)*100.0)
- << ":" << list[3]
- << endl;
- if(OutStatusFd > 0)
- FileFd::Write(OutStatusFd, status.str().c_str(), status.str().size());
- if (Debug == true)
- std::clog << "send: '" << status.str() << "'" << endl;
+ std::string msg;
+ strprintf(msg, _(iter->second), i18n_pkgname.c_str());
+ d->progress->StatusChanged(pkgname, PackagesDone, PackagesTotal, msg);
+
+ // FIXME: this needs a muliarch testcase
+ // FIXME2: is "pkgname" here reliable with dpkg only sending us
+ // short pkgnames?
+ if (action == "disappear")
+ handleDisappearAction(pkgname);
return;
- }
+ }
- vector<struct DpkgState> const &states = PackageOps[pkg];
- const char *next_action = NULL;
- if(PackageOpsDone[pkg] < states.size())
- next_action = states[PackageOpsDone[pkg]].state;
- // check if the package moved to the next dpkg state
- if(next_action && (strcmp(action, next_action) == 0))
+ if (prefix == "status")
{
- // only read the translation if there is actually a next
- // action
- const char *translation = _(states[PackageOpsDone[pkg]].str);
- char s[200];
- snprintf(s, sizeof(s), translation, pkg);
-
- // we moved from one dpkg state to a new one, report that
- PackageOpsDone[pkg]++;
- PackagesDone++;
- // build the status str
- status << "pmstatus:" << pkg
- << ":" << (PackagesDone/float(PackagesTotal)*100.0)
- << ":" << s
- << endl;
- if(OutStatusFd > 0)
- FileFd::Write(OutStatusFd, status.str().c_str(), status.str().size());
- if (Debug == true)
- std::clog << "send: '" << status.str() << "'" << endl;
+ vector<struct DpkgState> const &states = PackageOps[pkg];
+ const char *next_action = NULL;
+ if(PackageOpsDone[pkg] < states.size())
+ next_action = states[PackageOpsDone[pkg]].state;
+ // check if the package moved to the next dpkg state
+ if(next_action && (action == next_action))
+ {
+ // only read the translation if there is actually a next
+ // action
+ const char *translation = _(states[PackageOpsDone[pkg]].str);
+ std::string msg;
+
+ // we moved from one dpkg state to a new one, report that
+ PackageOpsDone[pkg]++;
+ PackagesDone++;
+
+ strprintf(msg, translation, i18n_pkgname.c_str());
+ d->progress->StatusChanged(pkgname, PackagesDone, PackagesTotal, msg);
+
+ }
+ if (Debug == true)
+ std::clog << "(parsed from dpkg) pkg: " << short_pkgname
+ << " action: " << action << endl;
}
- if (Debug == true)
- std::clog << "(parsed from dpkg) pkg: " << pkg
- << " action: " << action << endl;
}
/*}}}*/
// DPkgPM::handleDisappearAction /*{{{*/
void pkgDPkgPM::handleDisappearAction(string const &pkgname)
{
- // record the package name for display and stuff later
- disappearedPkgs.insert(pkgname);
-
pkgCache::PkgIterator Pkg = Cache.FindPkg(pkgname);
if (unlikely(Pkg.end() == true))
return;
+
+ // record the package name for display and stuff later
+ disappearedPkgs.insert(Pkg.FullName(true));
+
// the disappeared package was auto-installed - nothing to do
if ((Cache[Pkg].Flags & pkgCache::Flag::Auto) == pkgCache::Flag::Auto)
return;
@@ -693,7 +755,7 @@ void pkgDPkgPM::handleDisappearAction(string const &pkgname)
// ---------------------------------------------------------------------
/*
*/
-void pkgDPkgPM::DoDpkgStatusFd(int statusfd, int OutStatusFd)
+void pkgDPkgPM::DoDpkgStatusFd(int statusfd)
{
char *p, *q;
int len;
@@ -708,7 +770,7 @@ void pkgDPkgPM::DoDpkgStatusFd(int statusfd, int OutStatusFd)
while((q=(char*)memchr(p, '\n', d->dpkgbuf+d->dpkgbuf_pos-p)) != NULL)
{
*q = 0;
- ProcessDpkgStatusLine(OutStatusFd, p);
+ ProcessDpkgStatusLine(p);
p=q+1; // continue with next line
}
@@ -874,6 +936,7 @@ bool pkgDPkgPM::CloseLog()
return true;
}
/*}}}*/
+ /*}}}*/
/*{{{*/
// This implements a racy version of pselect for those architectures
// that don't have a working implementation.
@@ -894,36 +957,158 @@ static int racy_pselect(int nfds, fd_set *readfds, fd_set *writefds,
sigprocmask(SIG_SETMASK, &origmask, 0);
return retval;
}
-/*}}}*/
+ /*}}}*/
+
+// DPkgPM::BuildPackagesProgressMap /*{{{*/
+void pkgDPkgPM::BuildPackagesProgressMap()
+{
+ // map the dpkg states to the operations that are performed
+ // (this is sorted in the same way as Item::Ops)
+ static const struct DpkgState DpkgStatesOpMap[][7] = {
+ // Install operation
+ {
+ {"half-installed", N_("Preparing %s")},
+ {"unpacked", N_("Unpacking %s") },
+ {NULL, NULL}
+ },
+ // Configure operation
+ {
+ {"unpacked",N_("Preparing to configure %s") },
+ {"half-configured", N_("Configuring %s") },
+ { "installed", N_("Installed %s")},
+ {NULL, NULL}
+ },
+ // Remove operation
+ {
+ {"half-configured", N_("Preparing for removal of %s")},
+ {"half-installed", N_("Removing %s")},
+ {"config-files", N_("Removed %s")},
+ {NULL, NULL}
+ },
+ // Purge operation
+ {
+ {"config-files", N_("Preparing to completely remove %s")},
+ {"not-installed", N_("Completely removed %s")},
+ {NULL, NULL}
+ },
+ };
+
+ // init the PackageOps map, go over the list of packages that
+ // that will be [installed|configured|removed|purged] and add
+ // them to the PackageOps map (the dpkg states it goes through)
+ // and the PackageOpsTranslations (human readable strings)
+ for (vector<Item>::const_iterator I = List.begin(); I != List.end(); ++I)
+ {
+ if((*I).Pkg.end() == true)
+ continue;
+
+ string const name = (*I).Pkg.FullName();
+ PackageOpsDone[name] = 0;
+ for(int i=0; (DpkgStatesOpMap[(*I).Op][i]).state != NULL; ++i)
+ {
+ PackageOps[name].push_back(DpkgStatesOpMap[(*I).Op][i]);
+ PackagesTotal++;
+ }
+ }
+}
+ /*}}}*/
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR < 13)
+bool pkgDPkgPM::Go(int StatusFd)
+{
+ APT::Progress::PackageManager *progress = NULL;
+ if (StatusFd == -1)
+ progress = APT::Progress::PackageManagerProgressFactory();
+ else
+ progress = new APT::Progress::PackageManagerProgressFd(StatusFd);
+
+ return GoNoABIBreak(progress);
+}
+#endif
+
+void pkgDPkgPM::StartPtyMagic()
+{
+ if (_config->FindB("Dpkg::Use-Pty", true) == false)
+ {
+ d->master = d->slave = -1;
+ return;
+ }
+
+ // setup the pty and stuff
+ struct winsize win;
+
+ // if tcgetattr does not return zero there was a error
+ // and we do not do any pty magic
+ _error->PushToStack();
+ if (tcgetattr(STDOUT_FILENO, &d->tt) == 0)
+ {
+ if (ioctl(1, TIOCGWINSZ, (char *)&win) < 0)
+ {
+ _error->Errno("ioctl", _("ioctl(TIOCGWINSZ) failed"));
+ } else if (openpty(&d->master, &d->slave, NULL, &d->tt, &win) < 0)
+ {
+ _error->Errno("openpty", _("Can not write log (%s)"), _("Is /dev/pts mounted?"));
+ d->master = d->slave = -1;
+ } else {
+ struct termios rtt;
+ rtt = d->tt;
+ cfmakeraw(&rtt);
+ rtt.c_lflag &= ~ECHO;
+ rtt.c_lflag |= ISIG;
+ // block SIGTTOU during tcsetattr to prevent a hang if
+ // the process is a member of the background process group
+ // http://www.opengroup.org/onlinepubs/000095399/functions/tcsetattr.html
+ sigemptyset(&d->sigmask);
+ sigaddset(&d->sigmask, SIGTTOU);
+ sigprocmask(SIG_BLOCK,&d->sigmask, &d->original_sigmask);
+ tcsetattr(0, TCSAFLUSH, &rtt);
+ sigprocmask(SIG_SETMASK, &d->original_sigmask, 0);
+ }
+ }
+ // complain only if stdout is either a terminal (but still failed) or is an invalid
+ // descriptor otherwise we would complain about redirection to e.g. /dev/null as well.
+ else if (isatty(STDOUT_FILENO) == 1 || errno == EBADF)
+ _error->Errno("tcgetattr", _("Can not write log (%s)"), _("Is stdout a terminal?"));
+
+ if (_error->PendingError() == true)
+ _error->DumpErrors(std::cerr);
+ _error->RevertToStack();
+}
+
+void pkgDPkgPM::StopPtyMagic()
+{
+ if(d->slave > 0)
+ close(d->slave);
+ if(d->master >= 0)
+ {
+ tcsetattr(0, TCSAFLUSH, &d->tt);
+ close(d->master);
+ }
+}
+
// DPkgPM::Go - Run the sequence /*{{{*/
// ---------------------------------------------------------------------
/* This globs the operations and calls dpkg
- *
- * If it is called with "OutStatusFd" set to a valid file descriptor
- * apt will report the install progress over this fd. It maps the
- * dpkg states a package goes through to human readable (and i10n-able)
+ *
+ * If it is called with a progress object apt will report the install
+ * progress to this object. It maps the dpkg states a package goes
+ * through to human readable (and i10n-able)
* names and calculates a percentage for each step.
-*/
-bool pkgDPkgPM::Go(int OutStatusFd)
+ */
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+bool pkgDPkgPM::Go(APT::Progress::PackageManager *progress)
+#else
+bool pkgDPkgPM::GoNoABIBreak(APT::Progress::PackageManager *progress)
+#endif
{
pkgPackageManager::SigINTStop = false;
+ d->progress = progress;
// Generate the base argument list for dpkg
- std::vector<const char *> Args;
unsigned long StartSize = 0;
- string Tmp = _config->Find("Dir::Bin::dpkg","dpkg");
- {
- string const dpkgChrootDir = _config->FindDir("DPkg::Chroot-Directory", "/");
- size_t dpkgChrootLen = dpkgChrootDir.length();
- if (dpkgChrootDir != "/" && Tmp.find(dpkgChrootDir) == 0)
- {
- if (dpkgChrootDir[dpkgChrootLen - 1] == '/')
- --dpkgChrootLen;
- Tmp = Tmp.substr(dpkgChrootLen);
- }
- }
- Args.push_back(Tmp.c_str());
- StartSize += Tmp.length();
+ std::vector<const char *> Args;
+ std::string DpkgExecutable = getDpkgExecutable();
+ Args.push_back(DpkgExecutable.c_str());
+ StartSize += DpkgExecutable.length();
// Stick in any custom dpkg options
Configuration::Item const *Opts = _config->Tree("DPkg::Options");
@@ -960,8 +1145,6 @@ bool pkgDPkgPM::Go(int OutStatusFd)
fd_set rfds;
struct timespec tv;
- sigset_t sigmask;
- sigset_t original_sigmask;
unsigned int const MaxArgs = _config->FindI("Dpkg::MaxArgs",8*1024);
unsigned int const MaxArgBytes = _config->FindI("Dpkg::MaxArgBytes",32*1024);
@@ -980,54 +1163,8 @@ bool pkgDPkgPM::Go(int OutStatusFd)
if (_config->FindB("DPkg::ConfigurePending", SmartConf) == true)
List.push_back(Item(Item::ConfigurePending, PkgIterator()));
- // map the dpkg states to the operations that are performed
- // (this is sorted in the same way as Item::Ops)
- static const struct DpkgState DpkgStatesOpMap[][7] = {
- // Install operation
- {
- {"half-installed", N_("Preparing %s")},
- {"unpacked", N_("Unpacking %s") },
- {NULL, NULL}
- },
- // Configure operation
- {
- {"unpacked",N_("Preparing to configure %s") },
- {"half-configured", N_("Configuring %s") },
- { "installed", N_("Installed %s")},
- {NULL, NULL}
- },
- // Remove operation
- {
- {"half-configured", N_("Preparing for removal of %s")},
- {"half-installed", N_("Removing %s")},
- {"config-files", N_("Removed %s")},
- {NULL, NULL}
- },
- // Purge operation
- {
- {"config-files", N_("Preparing to completely remove %s")},
- {"not-installed", N_("Completely removed %s")},
- {NULL, NULL}
- },
- };
-
- // init the PackageOps map, go over the list of packages that
- // that will be [installed|configured|removed|purged] and add
- // them to the PackageOps map (the dpkg states it goes through)
- // and the PackageOpsTranslations (human readable strings)
- for (vector<Item>::const_iterator I = List.begin(); I != List.end(); ++I)
- {
- if((*I).Pkg.end() == true)
- continue;
-
- string const name = (*I).Pkg.Name();
- PackageOpsDone[name] = 0;
- for(int i=0; (DpkgStatesOpMap[(*I).Op][i]).state != NULL; ++i)
- {
- PackageOps[name].push_back(DpkgStatesOpMap[(*I).Op][i]);
- PackagesTotal++;
- }
- }
+ // for the progress
+ BuildPackagesProgressMap();
d->stdin_is_dev_null = false;
@@ -1049,8 +1186,15 @@ bool pkgDPkgPM::Go(int OutStatusFd)
dpkgMultiArch = true;
}
- // this loop is runs once per operation
- for (vector<Item>::const_iterator I = List.begin(); I != List.end();)
+ // start pty magic before the loop
+ StartPtyMagic();
+
+ // Tell the progress that its starting and fork dpkg
+ d->progress->Start(d->master);
+
+ // this loop is runs once per dpkg operation
+ vector<Item>::const_iterator I = List.begin();
+ while (I != List.end())
{
// Do all actions with the same Op in one run
vector<Item>::const_iterator J = I;
@@ -1171,7 +1315,7 @@ bool pkgDPkgPM::Go(int OutStatusFd)
{
if((*I).Pkg.end() == true)
continue;
- if (I->Op == Item::Configure && disappearedPkgs.find(I->Pkg.Name()) != disappearedPkgs.end())
+ if (I->Op == Item::Configure && disappearedPkgs.find(I->Pkg.FullName(true)) != disappearedPkgs.end())
continue;
// We keep this here to allow "smooth" transitions from e.g. multiarch dpkg/ubuntu to dpkg/debian
if (dpkgMultiArch == false && (I->Pkg.Arch() == nativeArch ||
@@ -1241,72 +1385,29 @@ bool pkgDPkgPM::Go(int OutStatusFd)
// ignore SIGHUP as well (debian #463030)
sighandler_t old_SIGHUP = signal(SIGHUP,SIG_IGN);
- struct termios tt;
- struct winsize win;
- int master = -1;
- int slave = -1;
-
- // if tcgetattr does not return zero there was a error
- // and we do not do any pty magic
- _error->PushToStack();
- if (tcgetattr(STDOUT_FILENO, &tt) == 0)
- {
- ioctl(0, TIOCGWINSZ, (char *)&win);
- if (openpty(&master, &slave, NULL, &tt, &win) < 0)
- {
- _error->Errno("openpty", _("Can not write log (%s)"), _("Is /dev/pts mounted?"));
- master = slave = -1;
- } else {
- struct termios rtt;
- rtt = tt;
- cfmakeraw(&rtt);
- rtt.c_lflag &= ~ECHO;
- rtt.c_lflag |= ISIG;
- // block SIGTTOU during tcsetattr to prevent a hang if
- // the process is a member of the background process group
- // http://www.opengroup.org/onlinepubs/000095399/functions/tcsetattr.html
- sigemptyset(&sigmask);
- sigaddset(&sigmask, SIGTTOU);
- sigprocmask(SIG_BLOCK,&sigmask, &original_sigmask);
- tcsetattr(0, TCSAFLUSH, &rtt);
- sigprocmask(SIG_SETMASK, &original_sigmask, 0);
- }
- }
- // complain only if stdout is either a terminal (but still failed) or is an invalid
- // descriptor otherwise we would complain about redirection to e.g. /dev/null as well.
- else if (isatty(STDOUT_FILENO) == 1 || errno == EBADF)
- _error->Errno("tcgetattr", _("Can not write log (%s)"), _("Is stdout a terminal?"));
-
- if (_error->PendingError() == true)
- _error->DumpErrors(std::cerr);
- _error->RevertToStack();
-
- // Fork dpkg
- pid_t Child;
- _config->Set("APT::Keep-Fds::",fd[1]);
- // send status information that we are about to fork dpkg
- if(OutStatusFd > 0) {
- ostringstream status;
- status << "pmstatus:dpkg-exec:"
- << (PackagesDone/float(PackagesTotal)*100.0)
- << ":" << _("Running dpkg")
- << endl;
- FileFd::Write(OutStatusFd, status.str().c_str(), status.str().size());
- }
- Child = ExecFork();
-
- // This is the child
+ // now run dpkg
+ d->progress->StartDpkg();
+ std::set<int> KeepFDs;
+ KeepFDs.insert(fd[1]);
+ MergeKeepFdsFromConfiguration(KeepFDs);
+ pid_t Child = ExecFork(KeepFDs);
if (Child == 0)
{
- if(slave >= 0 && master >= 0)
+ // This is the child
+ if(d->slave >= 0 && d->master >= 0)
{
setsid();
- ioctl(slave, TIOCSCTTY, 0);
- close(master);
- dup2(slave, 0);
- dup2(slave, 1);
- dup2(slave, 2);
- close(slave);
+ int res = ioctl(d->slave, TIOCSCTTY, 0);
+ if (res < 0) {
+ std::cerr << "ioctl(TIOCSCTTY) failed for fd: "
+ << d->slave << std::endl;
+ } else {
+ close(d->master);
+ dup2(d->slave, 0);
+ dup2(d->slave, 1);
+ dup2(d->slave, 2);
+ close(d->slave);
+ }
}
close(fd[0]); // close the read end of the pipe
@@ -1314,7 +1415,7 @@ bool pkgDPkgPM::Go(int OutStatusFd)
if (chdir(_config->FindDir("DPkg::Run-Directory","/").c_str()) != 0)
_exit(100);
-
+
if (_config->FindB("DPkg::FlushSTDIN",true) == true && isatty(STDIN_FILENO))
{
int Flags,dummy;
@@ -1343,9 +1444,6 @@ bool pkgDPkgPM::Go(int OutStatusFd)
if (_config->FindB("DPkg::UseIoNice", false) == true)
ionice(Child);
- // clear the Keep-Fd again
- _config->Clear("APT::Keep-Fds",fd[1]);
-
// Wait for dpkg
int Status = 0;
@@ -1353,12 +1451,9 @@ bool pkgDPkgPM::Go(int OutStatusFd)
int const _dpkgin = fd[0];
close(fd[1]); // close the write end of the pipe
- if(slave > 0)
- close(slave);
-
// setups fds
- sigemptyset(&sigmask);
- sigprocmask(SIG_BLOCK,&sigmask,&original_sigmask);
+ sigemptyset(&d->sigmask);
+ sigprocmask(SIG_BLOCK,&d->sigmask,&d->original_sigmask);
/* free vectors (and therefore memory) as we don't need the included data anymore */
for (std::vector<char *>::const_iterator p = Packages.begin();
@@ -1387,18 +1482,19 @@ bool pkgDPkgPM::Go(int OutStatusFd)
// wait for input or output here
FD_ZERO(&rfds);
- if (master >= 0 && !d->stdin_is_dev_null)
+ if (d->master >= 0 && !d->stdin_is_dev_null)
FD_SET(0, &rfds);
FD_SET(_dpkgin, &rfds);
- if(master >= 0)
- FD_SET(master, &rfds);
- tv.tv_sec = 1;
- tv.tv_nsec = 0;
- select_ret = pselect(max(master, _dpkgin)+1, &rfds, NULL, NULL,
- &tv, &original_sigmask);
+ if(d->master >= 0)
+ FD_SET(d->master, &rfds);
+ tv.tv_sec = 0;
+ tv.tv_nsec = d->progress->GetPulseInterval();
+ select_ret = pselect(max(d->master, _dpkgin)+1, &rfds, NULL, NULL,
+ &tv, &d->original_sigmask);
if (select_ret < 0 && (errno == EINVAL || errno == ENOSYS))
- select_ret = racy_pselect(max(master, _dpkgin)+1, &rfds, NULL,
- NULL, &tv, &original_sigmask);
+ select_ret = racy_pselect(max(d->master, _dpkgin)+1, &rfds, NULL,
+ NULL, &tv, &d->original_sigmask);
+ d->progress->Pulse();
if (select_ret == 0)
continue;
else if (select_ret < 0 && errno == EINTR)
@@ -1409,12 +1505,12 @@ bool pkgDPkgPM::Go(int OutStatusFd)
continue;
}
- if(master >= 0 && FD_ISSET(master, &rfds))
- DoTerminalPty(master);
- if(master >= 0 && FD_ISSET(0, &rfds))
- DoStdin(master);
+ if(d->master >= 0 && FD_ISSET(d->master, &rfds))
+ DoTerminalPty(d->master);
+ if(d->master >= 0 && FD_ISSET(0, &rfds))
+ DoStdin(d->master);
if(FD_ISSET(_dpkgin, &rfds))
- DoDpkgStatusFd(_dpkgin, OutStatusFd);
+ DoDpkgStatusFd(_dpkgin);
}
close(_dpkgin);
@@ -1423,13 +1519,6 @@ bool pkgDPkgPM::Go(int OutStatusFd)
signal(SIGINT,old_SIGINT);
signal(SIGHUP,old_SIGHUP);
-
- if(master >= 0)
- {
- tcsetattr(0, TCSAFLUSH, &tt);
- close(master);
- }
-
// Check for an error code.
if (WIFEXITED(Status) == 0 || WEXITSTATUS(Status) != 0)
{
@@ -1454,12 +1543,17 @@ bool pkgDPkgPM::Go(int OutStatusFd)
if(stopOnError)
{
CloseLog();
+ StopPtyMagic();
+ d->progress->Stop();
return false;
}
}
}
+ // dpkg is done at this point
+ d->progress->Stop();
+ StopPtyMagic();
CloseLog();
-
+
if (pkgPackageManager::SigINTStop)
_error->Warning(_("Operation was interrupted before it could finish"));
@@ -1541,18 +1635,49 @@ void pkgDPkgPM::WriteApportReport(const char *pkgpath, const char *errormsg)
}
// do not report out-of-memory failures
- if(strstr(errormsg, strerror(ENOMEM)) != NULL) {
+ if(strstr(errormsg, strerror(ENOMEM)) != NULL ||
+ strstr(errormsg, "failed to allocate memory") != NULL) {
std::clog << _("No apport report written because the error message indicates a out of memory error") << std::endl;
return;
}
- // do not report dpkg I/O errors
- // XXX - this message is localized, but this only matches the English version. This is better than nothing.
- if(strstr(errormsg, "short read in buffer_copy (")) {
- std::clog << _("No apport report written because the error message indicates a dpkg I/O error") << std::endl;
+ // do not report bugs regarding inaccessible local files
+ if(strstr(errormsg, strerror(ENOENT)) != NULL ||
+ strstr(errormsg, "cannot access archive") != NULL) {
+ std::clog << _("No apport report written because the error message indicates an issue on the local system") << std::endl;
+ return;
+ }
+
+ // do not report errors encountered when decompressing packages
+ if(strstr(errormsg, "--fsys-tarfile returned error exit status 2") != NULL) {
+ std::clog << _("No apport report written because the error message indicates an issue on the local system") << std::endl;
return;
}
+ // do not report dpkg I/O errors, this is a format string, so we compare
+ // the prefix and the suffix of the error with the dpkg error message
+ vector<string> io_errors;
+ io_errors.push_back(string("failed to read on buffer copy for %s"));
+ io_errors.push_back(string("failed in write on buffer copy for %s"));
+ io_errors.push_back(string("short read on buffer copy for %s"));
+
+ for (vector<string>::iterator I = io_errors.begin(); I != io_errors.end(); ++I)
+ {
+ vector<string> list = VectorizeString(dgettext("dpkg", (*I).c_str()), '%');
+ if (list.size() > 1) {
+ // we need to split %s, VectorizeString only allows char so we need
+ // to kill the "s" manually
+ if (list[1].size() > 1) {
+ list[1].erase(0, 1);
+ if(strstr(errormsg, list[0].c_str()) &&
+ strstr(errormsg, list[1].c_str())) {
+ std::clog << _("No apport report written because the error message indicates a dpkg I/O error") << std::endl;
+ return;
+ }
+ }
+ }
+ }
+
// get the pkgname and reportfile
pkgname = flNotDir(pkgpath);
pos = pkgname.find('_');
@@ -1642,6 +1767,22 @@ void pkgDPkgPM::WriteApportReport(const char *pkgpath, const char *errormsg)
char buf[1024];
while( fgets(buf, sizeof(buf), log) != NULL)
fprintf(report, " %s", buf);
+ fprintf(report, " \n");
+ fclose(log);
+ }
+ }
+
+ // attach history log it if we have it
+ string histfile_name = _config->FindFile("Dir::Log::History");
+ if (!histfile_name.empty())
+ {
+ fprintf(report, "DpkgHistoryLog:\n");
+ FILE* log = fopen(histfile_name.c_str(),"r");
+ if(log != NULL)
+ {
+ char buf[1024];
+ while( fgets(buf, sizeof(buf), log) != NULL)
+ fprintf(report, " %s", buf);
fclose(log);
}
}
diff --git a/apt-pkg/deb/dpkgpm.h b/apt-pkg/deb/dpkgpm.h
index c31d56f8e..02e12a6d9 100644
--- a/apt-pkg/deb/dpkgpm.h
+++ b/apt-pkg/deb/dpkgpm.h
@@ -15,6 +15,7 @@
#include <map>
#include <stdio.h>
#include <apt-pkg/macros.h>
+#include <apt-pkg/init.h>
#ifndef APT_8_CLEANER_HEADERS
using std::vector;
@@ -23,6 +24,7 @@ using std::map;
class pkgDPkgPMPrivate;
+
class pkgDPkgPM : public pkgPackageManager
{
private:
@@ -83,6 +85,11 @@ class pkgDPkgPM : public pkgPackageManager
__deprecated bool SendV2Pkgs(FILE *F);
bool SendPkgsInfo(FILE * const F, unsigned int const &Version);
void WriteHistoryTag(std::string const &tag, std::string value);
+ std::string ExpandShortPackageName(pkgDepCache &Cache,
+ const std::string &short_pkgname);
+
+ // Terminal progress
+ void SendTerminalProgress(float percentage);
// apport integration
void WriteApportReport(const char *pkgpath, const char *errormsg);
@@ -90,18 +97,39 @@ class pkgDPkgPM : public pkgPackageManager
// dpkg log
bool OpenLog();
bool CloseLog();
+
+ // helper
+ void BuildPackagesProgressMap();
+ void StartPtyMagic();
+ void StopPtyMagic();
// input processing
void DoStdin(int master);
void DoTerminalPty(int master);
- void DoDpkgStatusFd(int statusfd, int OutStatusFd);
- void ProcessDpkgStatusLine(int OutStatusFd, char *line);
+ void DoDpkgStatusFd(int statusfd);
+ void ProcessDpkgStatusLine(char *line);
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR < 13)
+ void DoDpkgStatusFd(int statusfd, int unused) {
+ DoDpkgStatusFd(statusfd);
+ }
+ void ProcessDpkgStatusLine(int unused, char *line) {
+ ProcessDpkgStatusLine(line);
+ }
+#endif
+
// The Actuall installation implementation
virtual bool Install(PkgIterator Pkg,std::string File);
virtual bool Configure(PkgIterator Pkg);
virtual bool Remove(PkgIterator Pkg,bool Purge = false);
+
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+ virtual bool Go(APT::Progress::PackageManager *progress);
+#else
virtual bool Go(int StatusFd=-1);
+ bool GoNoABIBreak(APT::Progress::PackageManager *progress);
+#endif
+
virtual void Reset();
public:
diff --git a/apt-pkg/depcache.cc b/apt-pkg/depcache.cc
index 978a893f7..a12e6963d 100644
--- a/apt-pkg/depcache.cc
+++ b/apt-pkg/depcache.cc
@@ -789,7 +789,7 @@ bool pkgDepCache::MarkKeep(PkgIterator const &Pkg, bool Soft, bool FromUser,
// - this makes sense as default when all Garbage dependencies
// are automatically marked for removal (as aptitude does).
// setting a package for keep then makes it no longer autoinstalled
- // for all other use-case this action is rather suprising
+ // for all other use-case this action is rather surprising
if(FromUser && !P.Marked)
P.Flags &= ~Flag::Auto;
#endif
@@ -896,6 +896,7 @@ char const* PrintMode(char const mode)
case pkgDepCache::ModeInstall: return "Install";
case pkgDepCache::ModeKeep: return "Keep";
case pkgDepCache::ModeDelete: return "Delete";
+ case pkgDepCache::ModeGarbage: return "Garbage";
default: return "UNKNOWN";
}
}
@@ -1133,8 +1134,13 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
std::clog << OutputInDepth(Depth) << Start << " can't be satisfied!" << std::endl;
if (Start.IsCritical() == false)
continue;
- // if the dependency was critical, we can't install it, so remove it again
- MarkDelete(Pkg,false,Depth + 1, false);
+ // if the dependency was critical, we have absolutely no chance to install it,
+ // so if it wasn't installed remove it again. If it was, discard the candidate
+ // as the problemresolver will trip over it otherwise trying to install it (#735967)
+ if (Pkg->CurrentVer == 0)
+ MarkDelete(Pkg,false,Depth + 1, false);
+ else
+ SetCandidateVersion(Pkg.CurrentVer());
return false;
}
@@ -1189,7 +1195,7 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
}
}
- /* This bit is for processing the possibilty of an install/upgrade
+ /* This bit is for processing the possibility of an install/upgrade
fixing the problem for "positive" dependencies */
if (Start.IsNegative() == false && (DepState[Start->ID] & DepCVer) == DepCVer)
{
@@ -1252,6 +1258,11 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
if (PkgState[Pkg->ID].InstallVer == 0)
continue;
+ /* Ignore negative dependencies that we are not going to
+ get installed */
+ if (PkgState[Pkg->ID].InstallVer != *I)
+ continue;
+
if ((Start->Version != 0 || TrgPkg != Pkg) &&
PkgState[Pkg->ID].CandidateVer != PkgState[Pkg->ID].InstallVer &&
PkgState[Pkg->ID].CandidateVer != *I &&
@@ -1304,7 +1315,7 @@ bool pkgDepCache::IsInstallOkMultiArchSameVersionSynced(PkgIterator const &Pkg,
// (simple string-compare as stuff like '1' == '0:1-0' can't happen here)
if (P->CurrentVer == 0 || strcmp(Pkg.CandVersion(), P.CandVersion()) == 0)
continue;
- // packages loosing M-A:same can be out-of-sync
+ // packages losing M-A:same can be out-of-sync
VerIterator CV = PkgState[P->ID].CandidateVerIter(*this);
if (unlikely(CV.end() == true) ||
(CV->MultiArch & pkgCache::Version::Same) != pkgCache::Version::Same)
@@ -1521,7 +1532,7 @@ bool pkgDepCache::SetCandidateRelease(pkgCache::VerIterator TargetVer,
if (itsFine == false)
{
// change the candidate
- Changed.push_back(make_pair(oldCand, TargetVer));
+ Changed.push_back(make_pair(V, TargetVer));
if (SetCandidateRelease(V, TargetRel, Changed) == false)
{
if (stillOr == false)
@@ -1726,8 +1737,6 @@ bool pkgDepCache::MarkRequired(InRootSetFunc &userFunc)
follow_recommends = MarkFollowsRecommends();
follow_suggests = MarkFollowsSuggests();
-
-
// do the mark part, this is the core bit of the algorithm
for(PkgIterator p = PkgBegin(); !p.end(); ++p)
{
@@ -1738,7 +1747,9 @@ bool pkgDepCache::MarkRequired(InRootSetFunc &userFunc)
// be nice even then a required package violates the policy (#583517)
// and do the full mark process also for required packages
(p.CurrentVer().end() != true &&
- p.CurrentVer()->Priority == pkgCache::State::Required))
+ p.CurrentVer()->Priority == pkgCache::State::Required) ||
+ // packages which can't be changed (like holds) can't be garbage
+ (IsModeChangeOk(ModeGarbage, p, 0, false) == false))
{
// the package is installed (and set to keep)
if(PkgState[p->ID].Keep() && !p.CurrentVer().end())
diff --git a/apt-pkg/depcache.h b/apt-pkg/depcache.h
index d9c95349b..f6848f383 100644
--- a/apt-pkg/depcache.h
+++ b/apt-pkg/depcache.h
@@ -15,7 +15,7 @@
This structure is important to support the readonly status of the cache
file. When the data is saved the cache will be refereshed from our
- internal rep and written to disk. Then the actual persistant data
+ internal rep and written to disk. Then the actual persistent data
files will be put on the disk.
Each dependency is compared against 3 target versions to produce to
@@ -128,7 +128,7 @@ class pkgDepCache : protected pkgCache::Namespace
enum InternalFlags {AutoKept = (1 << 0), Purge = (1 << 1), ReInstall = (1 << 2), Protected = (1 << 3)};
enum VersionTypes {NowVersion, InstallVersion, CandidateVersion};
- enum ModeList {ModeDelete = 0, ModeKeep = 1, ModeInstall = 2};
+ enum ModeList {ModeDelete = 0, ModeKeep = 1, ModeInstall = 2, ModeGarbage = 3};
/** \brief Represents an active action group.
*
diff --git a/apt-pkg/edsp.h b/apt-pkg/edsp.h
index 12b06d143..fd4436f60 100644
--- a/apt-pkg/edsp.h
+++ b/apt-pkg/edsp.h
@@ -2,7 +2,7 @@
/** Description \file edsp.h {{{
######################################################################
Set of methods to help writing and reading everything needed for EDSP
- with the noteable exception of reading a scenario for conversion into
+ with the notable exception of reading a scenario for conversion into
a Cache as this is handled by edsp interface for listparser and friends
##################################################################### */
/*}}}*/
@@ -182,13 +182,13 @@ public:
* they were unable to calculate a solution for a given task.
* Obviously they can't send a solution through, so this
* methods deals with formatting an error message correctly
- * so that the front-ends can recieve and display it.
+ * so that the front-ends can receive and display it.
*
* The first line of the message should be a short description
* of the error so it can be used for dialog titles or alike
*
* \param uuid of this error message
- * \param message is free form text to discribe the error
+ * \param message is free form text to describe the error
* \param output the front-end listens for error messages
*/
bool static WriteError(char const * const uuid, std::string const &message, FILE* output);
diff --git a/apt-pkg/indexfile.h b/apt-pkg/indexfile.h
index 1d34dc773..a0096fa34 100644
--- a/apt-pkg/indexfile.h
+++ b/apt-pkg/indexfile.h
@@ -10,12 +10,12 @@
Binary index files
Binary translation files
- Bianry index files decribing the local system
+ Binary index files describing the local system
Source index files
They are all bundled together here, and the interfaces for
sources.list, acquire, cache gen and record parsing all use this class
- to acess the underlying representation.
+ to access the underlying representation.
##################################################################### */
/*}}}*/
@@ -78,10 +78,10 @@ class pkgIndexFile
virtual bool Exists() const = 0;
virtual bool HasPackages() const = 0;
virtual unsigned long Size() const = 0;
- virtual bool Merge(pkgCacheGenerator &/*Gen*/,OpProgress* /*Prog*/) const { return false; };
+ virtual bool Merge(pkgCacheGenerator &Gen, OpProgress* Prog) const { return false; };
__deprecated virtual bool Merge(pkgCacheGenerator &Gen, OpProgress &Prog) const
{ return Merge(Gen, &Prog); };
- virtual bool MergeFileProvides(pkgCacheGenerator &/*Gen*/,OpProgress* /*Prog*/) const {return true;};
+ virtual bool MergeFileProvides(pkgCacheGenerator &Gen,OpProgress* Prog) const {return true;};
__deprecated virtual bool MergeFileProvides(pkgCacheGenerator &Gen, OpProgress &Prog) const
{return MergeFileProvides(Gen, &Prog);};
virtual pkgCache::PkgFileIterator FindInCache(pkgCache &Cache) const;
diff --git a/apt-pkg/indexrecords.cc b/apt-pkg/indexrecords.cc
index 8a72ca151..f8097c3c6 100644
--- a/apt-pkg/indexrecords.cc
+++ b/apt-pkg/indexrecords.cc
@@ -27,6 +27,11 @@ string indexRecords::GetDist() const
return this->Dist;
}
+string indexRecords::GetSuite() const
+{
+ return this->Suite;
+}
+
bool indexRecords::CheckDist(const string MaybeDist) const
{
return (this->Dist == MaybeDist
diff --git a/apt-pkg/indexrecords.h b/apt-pkg/indexrecords.h
index a98b939bc..d003ec0fa 100644
--- a/apt-pkg/indexrecords.h
+++ b/apt-pkg/indexrecords.h
@@ -46,6 +46,7 @@ class indexRecords
virtual bool Load(std::string Filename);
std::string GetDist() const;
+ std::string GetSuite() const;
time_t GetValidUntil() const;
virtual bool CheckDist(const std::string MaybeDist) const;
std::string GetExpectedDist() const;
diff --git a/apt-pkg/init.cc b/apt-pkg/init.cc
index 76278921f..6ab5ec42d 100644
--- a/apt-pkg/init.cc
+++ b/apt-pkg/init.cc
@@ -44,16 +44,8 @@ bool pkgInitConfig(Configuration &Cnf)
Cnf.CndSet("APT::Install-Suggests", false);
Cnf.CndSet("Dir","/");
- // State
+ // State
Cnf.CndSet("Dir::State","var/lib/apt/");
-
- /* Just in case something goes horribly wrong, we can fall back to the
- old /var/state paths.. */
- struct stat St;
- if (stat("/var/lib/apt/.",&St) != 0 &&
- stat("/var/state/apt/.",&St) == 0)
- Cnf.CndSet("Dir::State","var/state/apt/");
-
Cnf.CndSet("Dir::State::lists","lists/");
Cnf.CndSet("Dir::State::cdroms","cdroms.list");
Cnf.CndSet("Dir::State::mirrors","mirrors/");
@@ -100,7 +92,7 @@ bool pkgInitConfig(Configuration &Cnf)
// Read an alternate config file
const char *Cfg = getenv("APT_CONFIG");
- if (Cfg != 0)
+ if (Cfg != 0 && strlen(Cfg) != 0)
{
if (RealFileExists(Cfg) == true)
Res &= ReadConfigFile(Cnf,Cfg);
diff --git a/apt-pkg/install-progress.cc b/apt-pkg/install-progress.cc
new file mode 100644
index 000000000..a3a4cc0e1
--- /dev/null
+++ b/apt-pkg/install-progress.cc
@@ -0,0 +1,375 @@
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/install-progress.h>
+
+#include <apti18n.h>
+
+#include <termios.h>
+#include <sys/ioctl.h>
+#include <sstream>
+#include <fcntl.h>
+#include <algorithm>
+#include <stdio.h>
+
+namespace APT {
+namespace Progress {
+
+
+/* Return a APT::Progress::PackageManager based on the global
+ * apt configuration (i.e. APT::Status-Fd and APT::Status-deb822-Fd)
+ */
+PackageManager* PackageManagerProgressFactory()
+{
+ // select the right progress
+ int status_fd = _config->FindI("APT::Status-Fd", -1);
+ int status_deb822_fd = _config->FindI("APT::Status-deb822-Fd", -1);
+
+ APT::Progress::PackageManager *progress = NULL;
+ if (status_deb822_fd > 0)
+ progress = new APT::Progress::PackageManagerProgressDeb822Fd(
+ status_deb822_fd);
+ else if (status_fd > 0)
+ progress = new APT::Progress::PackageManagerProgressFd(status_fd);
+ else if(_config->FindB("Dpkg::Progress-Fancy", false) == true)
+ progress = new APT::Progress::PackageManagerFancy();
+ else if (_config->FindB("Dpkg::Progress",
+ _config->FindB("DpkgPM::Progress", false)) == true)
+ progress = new APT::Progress::PackageManagerText();
+ else
+ progress = new APT::Progress::PackageManager();
+ return progress;
+}
+
+bool PackageManager::StatusChanged(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string HumanReadableAction)
+{
+ int reporting_steps = _config->FindI("DpkgPM::Reporting-Steps", 1);
+ percentage = StepsDone/(float)TotalSteps * 100.0;
+ strprintf(progress_str, _("Progress: [%3i%%]"), (int)percentage);
+
+ if(percentage < (last_reported_progress + reporting_steps))
+ return false;
+
+ return true;
+}
+
+PackageManagerProgressFd::PackageManagerProgressFd(int progress_fd)
+ : StepsDone(0), StepsTotal(1)
+{
+ OutStatusFd = progress_fd;
+}
+
+void PackageManagerProgressFd::WriteToStatusFd(std::string s)
+{
+ if(OutStatusFd <= 0)
+ return;
+ FileFd::Write(OutStatusFd, s.c_str(), s.size());
+}
+
+void PackageManagerProgressFd::StartDpkg()
+{
+ if(OutStatusFd <= 0)
+ return;
+
+ // FIXME: use SetCloseExec here once it taught about throwing
+ // exceptions instead of doing _exit(100) on failure
+ fcntl(OutStatusFd,F_SETFD,FD_CLOEXEC);
+
+ // send status information that we are about to fork dpkg
+ std::ostringstream status;
+ status << "pmstatus:dpkg-exec:"
+ << (StepsDone/float(StepsTotal)*100.0)
+ << ":" << _("Running dpkg")
+ << std::endl;
+ WriteToStatusFd(status.str());
+}
+
+void PackageManagerProgressFd::Stop()
+{
+}
+
+void PackageManagerProgressFd::Error(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string ErrorMessage)
+{
+ std::ostringstream status;
+ status << "pmerror:" << PackageName
+ << ":" << (StepsDone/float(TotalSteps)*100.0)
+ << ":" << ErrorMessage
+ << std::endl;
+ WriteToStatusFd(status.str());
+}
+
+void PackageManagerProgressFd::ConffilePrompt(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string ConfMessage)
+{
+ std::ostringstream status;
+ status << "pmconffile:" << PackageName
+ << ":" << (StepsDone/float(TotalSteps)*100.0)
+ << ":" << ConfMessage
+ << std::endl;
+ WriteToStatusFd(status.str());
+}
+
+
+bool PackageManagerProgressFd::StatusChanged(std::string PackageName,
+ unsigned int xStepsDone,
+ unsigned int xTotalSteps,
+ std::string pkg_action)
+{
+ StepsDone = xStepsDone;
+ StepsTotal = xTotalSteps;
+
+ // build the status str
+ std::ostringstream status;
+ status << "pmstatus:" << StringSplit(PackageName, ":")[0]
+ << ":" << (StepsDone/float(StepsTotal)*100.0)
+ << ":" << pkg_action
+ << std::endl;
+ WriteToStatusFd(status.str());
+
+ if(_config->FindB("Debug::APT::Progress::PackageManagerFd", false) == true)
+ std::cerr << "progress: " << PackageName << " " << xStepsDone
+ << " " << xTotalSteps << " " << pkg_action
+ << std::endl;
+
+
+ return true;
+}
+
+
+PackageManagerProgressDeb822Fd::PackageManagerProgressDeb822Fd(int progress_fd)
+ : StepsDone(0), StepsTotal(1)
+{
+ OutStatusFd = progress_fd;
+}
+
+void PackageManagerProgressDeb822Fd::WriteToStatusFd(std::string s)
+{
+ FileFd::Write(OutStatusFd, s.c_str(), s.size());
+}
+
+void PackageManagerProgressDeb822Fd::StartDpkg()
+{
+ // FIXME: use SetCloseExec here once it taught about throwing
+ // exceptions instead of doing _exit(100) on failure
+ fcntl(OutStatusFd,F_SETFD,FD_CLOEXEC);
+
+ // send status information that we are about to fork dpkg
+ std::ostringstream status;
+ status << "Status: " << "progress" << std::endl
+ << "Percent: " << (StepsDone/float(StepsTotal)*100.0) << std::endl
+ << "Message: " << _("Running dpkg") << std::endl
+ << std::endl;
+ WriteToStatusFd(status.str());
+}
+
+void PackageManagerProgressDeb822Fd::Stop()
+{
+}
+
+void PackageManagerProgressDeb822Fd::Error(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string ErrorMessage)
+{
+ std::ostringstream status;
+ status << "Status: " << "Error" << std::endl
+ << "Package:" << PackageName << std::endl
+ << "Percent: " << (StepsDone/float(TotalSteps)*100.0) << std::endl
+ << "Message: " << ErrorMessage << std::endl
+ << std::endl;
+ WriteToStatusFd(status.str());
+}
+
+void PackageManagerProgressDeb822Fd::ConffilePrompt(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string ConfMessage)
+{
+ std::ostringstream status;
+ status << "Status: " << "ConfFile" << std::endl
+ << "Package:" << PackageName << std::endl
+ << "Percent: " << (StepsDone/float(TotalSteps)*100.0) << std::endl
+ << "Message: " << ConfMessage << std::endl
+ << std::endl;
+ WriteToStatusFd(status.str());
+}
+
+
+bool PackageManagerProgressDeb822Fd::StatusChanged(std::string PackageName,
+ unsigned int xStepsDone,
+ unsigned int xTotalSteps,
+ std::string message)
+{
+ StepsDone = xStepsDone;
+ StepsTotal = xTotalSteps;
+
+ // build the status str
+ std::ostringstream status;
+ status << "Status: " << "progress" << std::endl
+ << "Package: " << PackageName << std::endl
+ << "Percent: " << (StepsDone/float(StepsTotal)*100.0) << std::endl
+ << "Message: " << message << std::endl
+ << std::endl;
+ WriteToStatusFd(status.str());
+
+ return true;
+}
+
+
+PackageManagerFancy::PackageManagerFancy()
+ : child_pty(-1)
+{
+ // setup terminal size
+ old_SIGWINCH = signal(SIGWINCH, PackageManagerFancy::staticSIGWINCH);
+ instances.push_back(this);
+}
+std::vector<PackageManagerFancy*> PackageManagerFancy::instances;
+
+PackageManagerFancy::~PackageManagerFancy()
+{
+ instances.erase(find(instances.begin(), instances.end(), this));
+ signal(SIGWINCH, old_SIGWINCH);
+}
+
+void PackageManagerFancy::staticSIGWINCH(int signum)
+{
+ std::vector<PackageManagerFancy *>::const_iterator I;
+ for(I = instances.begin(); I != instances.end(); ++I)
+ (*I)->HandleSIGWINCH(signum);
+}
+
+int PackageManagerFancy::GetNumberTerminalRows()
+{
+ struct winsize win;
+ // FIXME: get from "child_pty" instead?
+ if(ioctl(STDOUT_FILENO, TIOCGWINSZ, (char *)&win) != 0)
+ return -1;
+
+ if(_config->FindB("Debug::InstallProgress::Fancy", false) == true)
+ std::cerr << "GetNumberTerminalRows: " << win.ws_row << std::endl;
+
+ return win.ws_row;
+}
+
+void PackageManagerFancy::SetupTerminalScrollArea(int nr_rows)
+{
+ if(_config->FindB("Debug::InstallProgress::Fancy", false) == true)
+ std::cerr << "SetupTerminalScrollArea: " << nr_rows << std::endl;
+
+ // scroll down a bit to avoid visual glitch when the screen
+ // area shrinks by one row
+ std::cout << "\n";
+
+ // save cursor
+ std::cout << "\033[s";
+
+ // set scroll region (this will place the cursor in the top left)
+ std::cout << "\033[0;" << nr_rows - 1 << "r";
+
+ // restore cursor but ensure its inside the scrolling area
+ std::cout << "\033[u";
+ static const char *move_cursor_up = "\033[1A";
+ std::cout << move_cursor_up;
+
+ // ensure its flushed
+ std::flush(std::cout);
+
+ // setup tty size to ensure xterm/linux console are working properly too
+ // see bug #731738
+ struct winsize win;
+ ioctl(child_pty, TIOCGWINSZ, (char *)&win);
+ win.ws_row = nr_rows - 1;
+ ioctl(child_pty, TIOCSWINSZ, (char *)&win);
+}
+
+void PackageManagerFancy::HandleSIGWINCH(int)
+{
+ int nr_terminal_rows = GetNumberTerminalRows();
+ SetupTerminalScrollArea(nr_terminal_rows);
+}
+
+void PackageManagerFancy::Start(int a_child_pty)
+{
+ child_pty = a_child_pty;
+ int nr_terminal_rows = GetNumberTerminalRows();
+ if (nr_terminal_rows > 0)
+ SetupTerminalScrollArea(nr_terminal_rows);
+}
+
+void PackageManagerFancy::Stop()
+{
+ int nr_terminal_rows = GetNumberTerminalRows();
+ if (nr_terminal_rows > 0)
+ {
+ SetupTerminalScrollArea(nr_terminal_rows + 1);
+
+ // override the progress line (sledgehammer)
+ static const char* clear_screen_below_cursor = "\033[J";
+ std::cout << clear_screen_below_cursor;
+ }
+ child_pty = -1;
+}
+
+bool PackageManagerFancy::StatusChanged(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string HumanReadableAction)
+{
+ if (!PackageManager::StatusChanged(PackageName, StepsDone, TotalSteps,
+ HumanReadableAction))
+ return false;
+
+ int row = GetNumberTerminalRows();
+
+ static string save_cursor = "\033[s";
+ static string restore_cursor = "\033[u";
+
+ static string set_bg_color = "\033[42m"; // green
+ static string set_fg_color = "\033[30m"; // black
+
+ static string restore_bg = "\033[49m";
+ static string restore_fg = "\033[39m";
+
+ std::cout << save_cursor
+ // move cursor position to last row
+ << "\033[" << row << ";0f"
+ << set_bg_color
+ << set_fg_color
+ << progress_str
+ << restore_cursor
+ << restore_bg
+ << restore_fg;
+ std::flush(std::cout);
+ last_reported_progress = percentage;
+
+ return true;
+}
+
+bool PackageManagerText::StatusChanged(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string HumanReadableAction)
+{
+ if (!PackageManager::StatusChanged(PackageName, StepsDone, TotalSteps, HumanReadableAction))
+ return false;
+
+ std::cout << progress_str << "\r\n";
+ std::flush(std::cout);
+
+ last_reported_progress = percentage;
+
+ return true;
+}
+
+
+
+}; // namespace progress
+}; // namespace apt
diff --git a/apt-pkg/install-progress.h b/apt-pkg/install-progress.h
new file mode 100644
index 000000000..8a5b68a8f
--- /dev/null
+++ b/apt-pkg/install-progress.h
@@ -0,0 +1,156 @@
+#ifndef PKGLIB_IPROGRESS_H
+#define PKGLIB_IPROGRESS_H
+
+#include <string>
+#include <unistd.h>
+#include <signal.h>
+#include <vector>
+
+namespace APT {
+namespace Progress {
+
+ class PackageManager;
+ PackageManager* PackageManagerProgressFactory();
+
+ class PackageManager
+ {
+ private:
+ /** \brief dpointer placeholder */
+ void *d;
+
+ protected:
+ std::string progress_str;
+ float percentage;
+ int last_reported_progress;
+
+ public:
+ PackageManager()
+ : percentage(0.0), last_reported_progress(-1) {};
+ virtual ~PackageManager() {};
+
+ /* Global Start/Stop */
+ virtual void Start(int child_pty=-1) {};
+ virtual void Stop() {};
+
+ /* When dpkg is invoked (may happen multiple times for each
+ * install/remove block
+ */
+ virtual void StartDpkg() {};
+
+ virtual pid_t fork() {return fork(); };
+
+ virtual void Pulse() {};
+ virtual long GetPulseInterval() {
+ return 500000;
+ };
+
+ virtual bool StatusChanged(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string HumanReadableAction) ;
+ virtual void Error(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string ErrorMessage) {};
+ virtual void ConffilePrompt(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string ConfMessage) {};
+ };
+
+ class PackageManagerProgressFd : public PackageManager
+ {
+ protected:
+ int OutStatusFd;
+ int StepsDone;
+ int StepsTotal;
+ void WriteToStatusFd(std::string msg);
+
+ public:
+ PackageManagerProgressFd(int progress_fd);
+
+ virtual void StartDpkg();
+ virtual void Stop();
+
+ virtual bool StatusChanged(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string HumanReadableAction);
+ virtual void Error(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string ErrorMessage);
+ virtual void ConffilePrompt(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string ConfMessage);
+
+ };
+
+ class PackageManagerProgressDeb822Fd : public PackageManager
+ {
+ protected:
+ int OutStatusFd;
+ int StepsDone;
+ int StepsTotal;
+ void WriteToStatusFd(std::string msg);
+
+ public:
+ PackageManagerProgressDeb822Fd(int progress_fd);
+
+ virtual void StartDpkg();
+ virtual void Stop();
+
+ virtual bool StatusChanged(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string HumanReadableAction);
+ virtual void Error(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string ErrorMessage);
+ virtual void ConffilePrompt(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string ConfMessage);
+ };
+
+ class PackageManagerFancy : public PackageManager
+ {
+ private:
+ static void staticSIGWINCH(int);
+ static std::vector<PackageManagerFancy*> instances;
+
+ protected:
+ void SetupTerminalScrollArea(int nr_rows);
+ void HandleSIGWINCH(int);
+
+ int GetNumberTerminalRows();
+ sighandler_t old_SIGWINCH;
+ int child_pty;
+
+ public:
+ PackageManagerFancy();
+ ~PackageManagerFancy();
+ virtual void Start(int child_pty=-1);
+ virtual void Stop();
+ virtual bool StatusChanged(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string HumanReadableAction);
+ };
+
+ class PackageManagerText : public PackageManager
+ {
+ public:
+ virtual bool StatusChanged(std::string PackageName,
+ unsigned int StepsDone,
+ unsigned int TotalSteps,
+ std::string HumanReadableAction);
+ };
+
+
+}; // namespace Progress
+}; // namespace APT
+
+#endif
diff --git a/apt-pkg/makefile b/apt-pkg/makefile
index 59729faf5..a90131f80 100644
--- a/apt-pkg/makefile
+++ b/apt-pkg/makefile
@@ -43,7 +43,8 @@ SOURCE+= pkgcache.cc version.cc depcache.cc \
srcrecords.cc cachefile.cc versionmatch.cc policy.cc \
pkgsystem.cc indexfile.cc pkgcachegen.cc acquire-item.cc \
indexrecords.cc vendor.cc vendorlist.cc cdrom.cc indexcopy.cc \
- aptconfiguration.cc cachefilter.cc cacheset.cc edsp.cc
+ aptconfiguration.cc cachefilter.cc cacheset.cc edsp.cc \
+ install-progress.cc upgrade.cc update.cc
HEADERS+= algorithms.h depcache.h pkgcachegen.h cacheiterators.h \
orderlist.h sourcelist.h packagemanager.h tagfile.h \
init.h pkgcache.h version.h progress.h pkgrecords.h \
@@ -51,7 +52,8 @@ HEADERS+= algorithms.h depcache.h pkgcachegen.h cacheiterators.h \
clean.h srcrecords.h cachefile.h versionmatch.h policy.h \
pkgsystem.h indexfile.h metaindex.h indexrecords.h vendor.h \
vendorlist.h cdrom.h indexcopy.h aptconfiguration.h \
- cachefilter.h cacheset.h edsp.h
+ cachefilter.h cacheset.h edsp.h install-progress.h \
+ upgrade.h update.h
# Source code for the debian specific components
# In theory the deb headers do not need to be exported..
diff --git a/apt-pkg/metaindex.h b/apt-pkg/metaindex.h
index 5783735ff..18a90a29d 100644
--- a/apt-pkg/metaindex.h
+++ b/apt-pkg/metaindex.h
@@ -5,6 +5,7 @@
#include <string>
#include <apt-pkg/pkgcache.h>
#include <apt-pkg/indexfile.h>
+#include <apt-pkg/init.h>
#ifndef APT_8_CLEANER_HEADERS
#include <apt-pkg/srcrecords.h>
@@ -28,27 +29,36 @@ class metaIndex
public:
-
// Various accessors
virtual std::string GetURI() const {return URI;}
virtual std::string GetDist() const {return Dist;}
virtual const char* GetType() const {return Type;}
+ // interface to to query it
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+ // returns the path of the local file (or "" if its not available)
+ virtual std::string LocalFileName() const {return "";};
+#endif
+
// Interface for acquire
- virtual std::string ArchiveURI(std::string const& /*File*/) const = 0;
+ virtual std::string ArchiveURI(std::string const& File) const = 0;
virtual bool GetIndexes(pkgAcquire *Owner, bool const &GetAll=false) const = 0;
-
virtual std::vector<pkgIndexFile *> *GetIndexFiles() = 0;
virtual bool IsTrusted() const = 0;
- metaIndex(std::string const &URI, std::string const &Dist, char const * const Type) :
- Indexes(NULL), Type(Type), URI(URI), Dist(Dist) {
+ metaIndex(std::string const &URI, std::string const &Dist,
+ char const * const Type)
+ : Indexes(NULL), Type(Type), URI(URI), Dist(Dist)
+ {
+ /* nothing */
}
- virtual ~metaIndex() {
+ virtual ~metaIndex()
+ {
if (Indexes == 0)
return;
- for (std::vector<pkgIndexFile *>::iterator I = (*Indexes).begin(); I != (*Indexes).end(); ++I)
+ for (std::vector<pkgIndexFile *>::iterator I = (*Indexes).begin();
+ I != (*Indexes).end(); ++I)
delete *I;
delete Indexes;
}
diff --git a/apt-pkg/orderlist.cc b/apt-pkg/orderlist.cc
index 984ae1d10..21b5fc4e7 100644
--- a/apt-pkg/orderlist.cc
+++ b/apt-pkg/orderlist.cc
@@ -566,10 +566,10 @@ bool pkgOrderList::VisitProvides(DepIterator D,bool Critical)
// ---------------------------------------------------------------------
/* This is the core ordering routine. It calls the set dependency
consideration functions which then potentialy call this again. Finite
- depth is achived through the colouring mechinism. */
+ depth is achieved through the colouring mechinism. */
bool pkgOrderList::VisitNode(PkgIterator Pkg, char const* from)
{
- // Looping or irrelevent.
+ // Looping or irrelevant.
// This should probably trancend not installed packages
if (Pkg.end() == true || IsFlag(Pkg,Added) == true ||
IsFlag(Pkg,AddPending) == true || IsFlag(Pkg,InList) == false)
@@ -824,7 +824,7 @@ bool pkgOrderList::DepUnPackPre(DepIterator D)
The forwards depends loop is designed to bring the packages dependents
close to the package. This helps reduce deconfigure time.
- Loops are irrelevent to this. */
+ Loops are irrelevant to this. */
bool pkgOrderList::DepUnPackDep(DepIterator D)
{
@@ -840,7 +840,7 @@ bool pkgOrderList::DepUnPackDep(DepIterator D)
D.ParentPkg().CurrentVer() != D.ParentVer())
continue;
- // The dep will not break so it is irrelevent.
+ // The dep will not break so it is irrelevant.
if (CheckDep(D) == true)
continue;
diff --git a/apt-pkg/packagemanager.cc b/apt-pkg/packagemanager.cc
index 8c0d2e855..5f9a31264 100644
--- a/apt-pkg/packagemanager.cc
+++ b/apt-pkg/packagemanager.cc
@@ -26,7 +26,6 @@
#include <apt-pkg/sptr.h>
#include <iostream>
-#include <fcntl.h>
#include <apti18n.h>
/*}}}*/
@@ -216,7 +215,7 @@ bool pkgPackageManager::CreateOrderList()
return true;
}
/*}}}*/
-// PM::DepAlwaysTrue - Returns true if this dep is irrelevent /*{{{*/
+// PM::DepAlwaysTrue - Returns true if this dep is irrelevant /*{{{*/
// ---------------------------------------------------------------------
/* The restriction on provides is to eliminate the case when provides
are transitioning between valid states [ie exim to smail] */
@@ -244,11 +243,11 @@ bool pkgPackageManager::CheckRConflicts(PkgIterator Pkg,DepIterator D,
D->Type != pkgCache::Dep::Obsoletes)
continue;
- // The package hasnt been changed
+ // The package hasn't been changed
if (List->IsNow(Pkg) == false)
continue;
- // Ignore self conflicts, ignore conflicts from irrelevent versions
+ // Ignore self conflicts, ignore conflicts from irrelevant versions
if (D.IsIgnorable(Pkg) || D.ParentVer() != D.ParentPkg().CurrentVer())
continue;
@@ -315,7 +314,7 @@ bool pkgPackageManager::ConfigureAll()
Note on failure: This method can fail, without causing any problems.
This can happen when using Immediate-Configure-All, SmartUnPack may call
- SmartConfigure, it may fail because of a complex dependancy situation, but
+ SmartConfigure, it may fail because of a complex dependency situation, but
a error will only be reported if ConfigureAll fails. This is why some of the
messages this function reports on failure (return false;) as just warnings
only shown when debuging*/
@@ -597,7 +596,7 @@ bool pkgPackageManager::SmartRemove(PkgIterator Pkg)
/*}}}*/
// PM::SmartUnPack - Install helper /*{{{*/
// ---------------------------------------------------------------------
-/* This puts the system in a state where it can Unpack Pkg, if Pkg is allready
+/* This puts the system in a state where it can Unpack Pkg, if Pkg is already
unpacked, or when it has been unpacked, if Immediate==true it configures it. */
bool pkgPackageManager::SmartUnPack(PkgIterator Pkg)
{
@@ -624,7 +623,7 @@ bool pkgPackageManager::SmartUnPack(PkgIterator Pkg, bool const Immediate, int c
/* PreUnpack Checks: This loop checks and attempts to rectify and problems that would prevent the package being unpacked.
It addresses: PreDepends, Conflicts, Obsoletes and Breaks (DpkgBreaks). Any resolutions that do not require it should
avoid configuration (calling SmartUnpack with Immediate=true), this is because when unpacking some packages with
- complex dependancy structures, trying to configure some packages while breaking the loops can complicate things .
+ complex dependency structures, trying to configure some packages while breaking the loops can complicate things .
This will be either dealt with if the package is configured as a dependency of Pkg (if and when Pkg is configured),
or by the ConfigureAll call at the end of the for loop in OrderInstall. */
bool Changed = false;
@@ -791,7 +790,7 @@ bool pkgPackageManager::SmartUnPack(PkgIterator Pkg, bool const Immediate, int c
{
if (List->IsFlag(BrokenPkg,pkgOrderList::Loop) && PkgLoop)
{
- // This dependancy has already been dealt with by another SmartUnPack on Pkg
+ // This dependency has already been dealt with by another SmartUnPack on Pkg
break;
}
else
@@ -1004,7 +1003,7 @@ pkgPackageManager::OrderResult pkgPackageManager::OrderInstall()
DoneSomething = true;
if (ImmConfigureAll) {
- /* ConfigureAll here to pick up and packages left unconfigured becuase they were unpacked in the
+ /* ConfigureAll here to pick up and packages left unconfigured because they were unpacked in the
"PreUnpack Checks" section */
if (!ConfigureAll())
return Failed;
@@ -1028,32 +1027,78 @@ pkgPackageManager::OrderResult pkgPackageManager::OrderInstall()
return Completed;
}
+// PM::DoInstallPostFork - compat /*{{{*/
+// ---------------------------------------------------------------------
+ /*}}}*/
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+pkgPackageManager::OrderResult
+pkgPackageManager::DoInstallPostFork(int statusFd)
+{
+ APT::Progress::PackageManager *progress = new
+ APT::Progress::PackageManagerProgressFd(statusFd);
+ pkgPackageManager::OrderResult res = DoInstallPostFork(progress);
+ delete progress;
+ return res;
+}
/*}}}*/
// PM::DoInstallPostFork - Does install part that happens after the fork /*{{{*/
// ---------------------------------------------------------------------
pkgPackageManager::OrderResult
-pkgPackageManager::DoInstallPostFork(int statusFd)
+pkgPackageManager::DoInstallPostFork(APT::Progress::PackageManager *progress)
{
- if(statusFd > 0)
- // FIXME: use SetCloseExec here once it taught about throwing
- // exceptions instead of doing _exit(100) on failure
- fcntl(statusFd,F_SETFD,FD_CLOEXEC);
- bool goResult = Go(statusFd);
- if(goResult == false)
- return Failed;
-
- return Res;
+ bool goResult = Go(progress);
+ if(goResult == false)
+ return Failed;
+
+ return Res;
};
+#else
+pkgPackageManager::OrderResult
+pkgPackageManager::DoInstallPostFork(int statusFd)
+{
+ bool goResult = Go(statusFd);
+ if(goResult == false)
+ return Failed;
+
+ return Res;
+}
+#endif
+ /*}}}*/
+// PM::DoInstall - Does the installation /*{{{*/
+// ---------------------------------------------------------------------
+/* compat */
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+pkgPackageManager::OrderResult
+pkgPackageManager::DoInstall(int statusFd)
+{
+ APT::Progress::PackageManager *progress = new
+ APT::Progress::PackageManagerProgressFd(statusFd);
+ OrderResult res = DoInstall(progress);
+ delete progress;
+ return res;
+ }
+#else
+pkgPackageManager::OrderResult pkgPackageManager::DoInstall(int statusFd)
+{
+ if(DoInstallPreFork() == Failed)
+ return Failed;
+ return DoInstallPostFork(statusFd);
+}
+#endif
+ /*}}}*/
// PM::DoInstall - Does the installation /*{{{*/
// ---------------------------------------------------------------------
/* This uses the filenames in FileNames and the information in the
DepCache to perform the installation of packages.*/
-pkgPackageManager::OrderResult pkgPackageManager::DoInstall(int statusFd)
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+pkgPackageManager::OrderResult
+pkgPackageManager::DoInstall(APT::Progress::PackageManager *progress)
{
if(DoInstallPreFork() == Failed)
return Failed;
- return DoInstallPostFork(statusFd);
+ return DoInstallPostFork(progress);
}
+#endif
/*}}}*/
diff --git a/apt-pkg/packagemanager.h b/apt-pkg/packagemanager.h
index 1a6a9f01c..853b9bac8 100644
--- a/apt-pkg/packagemanager.h
+++ b/apt-pkg/packagemanager.h
@@ -23,7 +23,10 @@
#ifndef PKGLIB_PACKAGEMANAGER_H
#define PKGLIB_PACKAGEMANAGER_H
+#include <apt-pkg/macros.h>
#include <apt-pkg/pkgcache.h>
+#include <apt-pkg/install-progress.h>
+#include <apt-pkg/init.h>
#include <string>
#include <iostream>
@@ -39,6 +42,8 @@ class pkgDepCache;
class pkgSourceList;
class pkgOrderList;
class pkgRecords;
+
+
class pkgPackageManager : protected pkgCache::Namespace
{
public:
@@ -83,7 +88,12 @@ class pkgPackageManager : protected pkgCache::Namespace
virtual bool Install(PkgIterator /*Pkg*/,std::string /*File*/) {return false;};
virtual bool Configure(PkgIterator /*Pkg*/) {return false;};
virtual bool Remove(PkgIterator /*Pkg*/,bool /*Purge*/=false) {return false;};
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+ virtual bool Go(APT::Progress::PackageManager *progress) {return true;};
+#else
virtual bool Go(int statusFd=-1) {return true;};
+#endif
+
virtual void Reset() {};
// the result of the operation
@@ -96,7 +106,13 @@ class pkgPackageManager : protected pkgCache::Namespace
pkgRecords *Recs);
// Do the installation
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
+ OrderResult DoInstall(APT::Progress::PackageManager *progress);
+ // compat
+ __deprecated OrderResult DoInstall(int statusFd=-1);
+#else
OrderResult DoInstall(int statusFd=-1);
+#endif
// stuff that needs to be done before the fork() of a library that
// uses apt
@@ -104,9 +120,16 @@ class pkgPackageManager : protected pkgCache::Namespace
Res = OrderInstall();
return Res;
};
-
+#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
// stuff that needs to be done after the fork
+ OrderResult DoInstallPostFork(APT::Progress::PackageManager *progress);
+ // compat
+ __deprecated OrderResult DoInstallPostFork(int statusFd=-1);
+#else
OrderResult DoInstallPostFork(int statusFd=-1);
+#endif
+
+ // ?
bool FixMissing();
/** \brief returns all packages dpkg let disappear */
diff --git a/apt-pkg/pkgcache.cc b/apt-pkg/pkgcache.cc
index 0b8b6fe77..80493c37b 100644
--- a/apt-pkg/pkgcache.cc
+++ b/apt-pkg/pkgcache.cc
@@ -8,7 +8,7 @@
Please see doc/apt-pkg/cache.sgml for a more detailed description of
this format. Also be sure to keep that file up-to-date!!
- This is the general utility functions for cache managment. They provide
+ This is the general utility functions for cache management. They provide
a complete set of accessor functions for the cache. The cacheiterators
header contains the STL-like iterators that can be used to easially
navigate the cache as well as seemlessly dereference the mmap'd
@@ -499,7 +499,7 @@ pkgCache::PkgIterator::CurVersion() const
// ostream operator to handle string representation of a package /*{{{*/
// ---------------------------------------------------------------------
/* Output name < cur.rent.version -> candid.ate.version | new.est.version > (section)
- Note that the characters <|>() are all literal above. Versions will be ommited
+ Note that the characters <|>() are all literal above. Versions will be omitted
if they provide no new information (e.g. there is no newer version than candidate)
If no version and/or section can be found "none" is used. */
std::ostream&
diff --git a/apt-pkg/pkgcache.h b/apt-pkg/pkgcache.h
index 565ee657c..7c01258f1 100644
--- a/apt-pkg/pkgcache.h
+++ b/apt-pkg/pkgcache.h
@@ -354,7 +354,7 @@ struct pkgCache::Group
the hash index of the name in the pkgCache::Header::PkgHashTable
A package can be created for every architecture so package names are
- not unique, but it is garanteed that packages with the same name
+ not unique, but it is guaranteed that packages with the same name
are sequencel ordered in the list. Packages with the same name can be
accessed with the Group.
*/
diff --git a/apt-pkg/pkgsystem.h b/apt-pkg/pkgsystem.h
index 75f7b9fcc..eb75df412 100644
--- a/apt-pkg/pkgsystem.h
+++ b/apt-pkg/pkgsystem.h
@@ -7,7 +7,7 @@
Instances of this class can be thought of as factories or meta-classes
for a variety of more specialized classes. Together this class and
- it's speciallized offspring completely define the environment and how
+ it's specialized offspring completely define the environment and how
to access resources for a specific system. There are several sub
areas that are all orthogonal - each system has a unique combination of
these sub areas:
@@ -23,7 +23,7 @@
- Selection of local 'status' indexes that make up the pkgCache.
It is important to note that the handling of index files is not a
- function of the system. Index files are handled through a seperate
+ function of the system. Index files are handled through a separate
abstraction - the only requirement is that the index files have the
same idea of versioning as the target system.
diff --git a/apt-pkg/policy.cc b/apt-pkg/policy.cc
index 0a06cc6e3..d0f97441d 100644
--- a/apt-pkg/policy.cc
+++ b/apt-pkg/policy.cc
@@ -405,6 +405,10 @@ bool ReadPinFile(pkgPolicy &Plcy,string File)
PreferenceSection Tags;
while (TF.Step(Tags) == true)
{
+ // can happen when there are only comments in a record
+ if (Tags.Count() == 0)
+ continue;
+
string Name = Tags.FindS("Package");
if (Name.empty() == true)
return _error->Error(_("Invalid record in the preferences file %s, no Package header"), File.c_str());
diff --git a/apt-pkg/sourcelist.cc b/apt-pkg/sourcelist.cc
index 0fd237cad..1f5179885 100644
--- a/apt-pkg/sourcelist.cc
+++ b/apt-pkg/sourcelist.cc
@@ -17,8 +17,10 @@
#include <apt-pkg/configuration.h>
#include <apt-pkg/metaindex.h>
#include <apt-pkg/indexfile.h>
+#include <apt-pkg/tagfile.h>
#include <fstream>
+#include <algorithm>
#include <apti18n.h>
/*}}}*/
@@ -70,6 +72,69 @@ bool pkgSourceList::Type::FixupURI(string &URI) const
return true;
}
/*}}}*/
+bool pkgSourceList::Type::ParseStanza(vector<metaIndex *> &List,
+ pkgTagSection &Tags,
+ int i,
+ FileFd &Fd)
+{
+ map<string, string> Options;
+
+ string Enabled = Tags.FindS("Enabled");
+ if (Enabled.size() > 0 && StringToBool(Enabled) == false)
+ return true;
+
+ // Define external/internal options
+ const char* option_deb822[] = {
+ "Architectures", "Architectures-Add", "Architectures-Remove", "Trusted",
+ };
+ const char* option_internal[] = {
+ "arch", "arch+", "arch-", "trusted",
+ };
+ for (unsigned int j=0; j < sizeof(option_deb822)/sizeof(char*); j++)
+ if (Tags.Exists(option_deb822[j]))
+ {
+ // for deb822 the " " is the delimiter, but the backend expects ","
+ std::string option = Tags.FindS(option_deb822[j]);
+ std::replace(option.begin(), option.end(), ' ', ',');
+ Options[option_internal[j]] = option;
+ }
+
+ // now create one item per suite/section
+ string Suite = Tags.FindS("Suites");
+ Suite = SubstVar(Suite,"$(ARCH)",_config->Find("APT::Architecture"));
+ string const Section = Tags.FindS("Sections");
+ string URIS = Tags.FindS("URIs");
+
+ std::vector<std::string> list_uris = StringSplit(URIS, " ");
+ std::vector<std::string> list_dist = StringSplit(Suite, " ");
+ std::vector<std::string> list_section = StringSplit(Section, " ");
+
+ for (std::vector<std::string>::const_iterator U = list_uris.begin();
+ U != list_uris.end(); U++)
+ {
+ std::string URI = (*U);
+ if (!FixupURI(URI))
+ {
+ _error->Error(_("Malformed stanza %u in source list %s (URI parse)"),i,Fd.Name().c_str());
+ return false;
+ }
+
+ for (std::vector<std::string>::const_iterator I = list_dist.begin();
+ I != list_dist.end(); I++)
+ {
+ for (std::vector<std::string>::const_iterator J = list_section.begin();
+ J != list_section.end(); J++)
+ {
+ if (CreateItem(List, URI, (*I), (*J), Options) == false)
+ {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
// Type::ParseLine - Parse a single line /*{{{*/
// ---------------------------------------------------------------------
/* This is a generic one that is the 'usual' format for sources.list
@@ -159,7 +224,6 @@ bool pkgSourceList::Type::ParseLine(vector<metaIndex *> &List,
return true;
}
/*}}}*/
-
// SourceList::pkgSourceList - Constructors /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -181,7 +245,6 @@ pkgSourceList::~pkgSourceList()
delete *I;
}
/*}}}*/
- /*}}}*/
// SourceList::ReadMainList - Read the main source list from etc /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -216,7 +279,6 @@ bool pkgSourceList::ReadMainList()
return Res;
}
/*}}}*/
-// CNC:2003-03-03 - Needed to preserve backwards compatibility.
// SourceList::Reset - Clear the sourcelist contents /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -227,7 +289,6 @@ void pkgSourceList::Reset()
SrcList.erase(SrcList.begin(),SrcList.end());
}
/*}}}*/
-// CNC:2003-03-03 - Function moved to ReadAppend() and Reset().
// SourceList::Read - Parse the sourcelist file /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -242,16 +303,28 @@ bool pkgSourceList::Read(string File)
/* */
bool pkgSourceList::ReadAppend(string File)
{
+ if (_config->FindB("APT::Sources::Use-Deb822", false) == true)
+ {
+ int lines_parsed =ParseFileDeb822(File);
+ if (lines_parsed < 0)
+ return false;
+ else if (lines_parsed > 0)
+ return true;
+ // no lines parsed ... fall through and use old style parser
+ }
+ return ParseFileOldStyle(File);
+}
+
+// SourceList::ReadFileOldStyle - Read Traditional style sources.list /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool pkgSourceList::ParseFileOldStyle(string File)
+{
// Open the stream for reading
ifstream F(File.c_str(),ios::in /*| ios::nocreate*/);
if (!F != 0)
return _error->Errno("ifstream::ifstream",_("Opening %s"),File.c_str());
-
-#if 0 // Now Reset() does this.
- for (const_iterator I = SrcList.begin(); I != SrcList.end(); I++)
- delete *I;
- SrcList.erase(SrcList.begin(),SrcList.end());
-#endif
+
// CNC:2003-12-10 - 300 is too short.
char Buffer[1024];
@@ -298,6 +371,54 @@ bool pkgSourceList::ReadAppend(string File)
return true;
}
/*}}}*/
+// SourceList::ParseFileDeb822 - Parse deb822 style sources.list /*{{{*/
+// ---------------------------------------------------------------------
+/* Returns: the number of stanzas parsed*/
+int pkgSourceList::ParseFileDeb822(string File)
+{
+ pkgTagSection Tags;
+ unsigned int i=0;
+
+ // see if we can read the file
+ _error->PushToStack();
+ FileFd Fd(File, FileFd::ReadOnly);
+ pkgTagFile Sources(&Fd);
+ if (_error->PendingError() == true)
+ {
+ _error->RevertToStack();
+ return 0;
+ }
+ _error->MergeWithStack();
+
+ // read step by step
+ while (Sources.Step(Tags) == true)
+ {
+ if(!Tags.Exists("Types"))
+ continue;
+
+ string const types = Tags.FindS("Types");
+ std::vector<std::string> list_types = StringSplit(types, " ");
+ for (std::vector<std::string>::const_iterator I = list_types.begin();
+ I != list_types.end(); I++)
+ {
+ Type *Parse = Type::GetType((*I).c_str());
+ if (Parse == 0)
+ {
+ _error->Error(_("Type '%s' is not known on stanza %u in source list %s"), (*I).c_str(),i,Fd.Name().c_str());
+ return -1;
+ }
+
+ if (!Parse->ParseStanza(SrcList, Tags, i, Fd))
+ return -1;
+
+ i++;
+ }
+ }
+
+ // we are done, return the number of stanzas read
+ return i;
+}
+ /*}}}*/
// SourceList::FindIndex - Get the index associated with a file /*{{{*/
// ---------------------------------------------------------------------
/* */
diff --git a/apt-pkg/sourcelist.h b/apt-pkg/sourcelist.h
index 02e27101a..0ccb4aa00 100644
--- a/apt-pkg/sourcelist.h
+++ b/apt-pkg/sourcelist.h
@@ -31,6 +31,7 @@
#include <vector>
#include <map>
#include <apt-pkg/pkgcache.h>
+#include <apt-pkg/tagfile.h>
#ifndef APT_8_CLEANER_HEADERS
#include <apt-pkg/metaindex.h>
@@ -60,6 +61,10 @@ class pkgSourceList
const char *Label;
bool FixupURI(std::string &URI) const;
+ virtual bool ParseStanza(std::vector<metaIndex *> &List,
+ pkgTagSection &Tags,
+ int stanza_n,
+ FileFd &Fd);
virtual bool ParseLine(std::vector<metaIndex *> &List,
const char *Buffer,
unsigned long const &CurLine,std::string const &File) const;
@@ -75,7 +80,10 @@ class pkgSourceList
protected:
std::vector<metaIndex *> SrcList;
-
+
+ int ParseFileDeb822(std::string File);
+ bool ParseFileOldStyle(std::string File);
+
public:
bool ReadMainList();
diff --git a/apt-pkg/srcrecords.cc b/apt-pkg/srcrecords.cc
index 297559957..60b62850a 100644
--- a/apt-pkg/srcrecords.cc
+++ b/apt-pkg/srcrecords.cc
@@ -70,8 +70,9 @@ bool pkgSrcRecords::Restart()
Current = Files.begin();
for (std::vector<Parser*>::iterator I = Files.begin();
I != Files.end(); ++I)
- (*I)->Restart();
-
+ if ((*I)->Offset() != 0)
+ (*I)->Restart();
+
return true;
}
/*}}}*/
diff --git a/apt-pkg/tagfile.cc b/apt-pkg/tagfile.cc
index 10bc08d95..832a40d1e 100644
--- a/apt-pkg/tagfile.cc
+++ b/apt-pkg/tagfile.cc
@@ -164,7 +164,7 @@ bool pkgTagFile::Fill()
unsigned long long const dataSize = d->Size - ((d->End - d->Buffer) + 1);
if (d->Fd.Read(d->End, dataSize, &Actual) == false)
return false;
- if (Actual != dataSize || d->Fd.Eof() == true)
+ if (Actual != dataSize)
d->Done = true;
d->End += Actual;
}
@@ -207,7 +207,11 @@ bool pkgTagFile::Jump(pkgTagSection &Tag,unsigned long long Offset)
unsigned long long Dist = Offset - d->iOffset;
d->Start += Dist;
d->iOffset += Dist;
- return Step(Tag);
+ // if we have seen the end, don't ask for more
+ if (d->Done == true)
+ return Tag.Scan(d->Start, d->End - d->Start);
+ else
+ return Step(Tag);
}
// Reposition and reload..
@@ -233,6 +237,16 @@ bool pkgTagFile::Jump(pkgTagSection &Tag,unsigned long long Offset)
return true;
}
/*}}}*/
+// pkgTagSection::pkgTagSection - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+pkgTagSection::pkgTagSection()
+ : Section(0), TagCount(0), d(NULL), Stop(0)
+{
+ memset(&Indexes, 0, sizeof(Indexes));
+ memset(&AlphaIndexes, 0, sizeof(AlphaIndexes));
+}
+ /*}}}*/
// TagSection::Scan - Scan for the end of the header information /*{{{*/
// ---------------------------------------------------------------------
/* This looks for the first double new line in the data stream.
@@ -249,7 +263,12 @@ bool pkgTagSection::Scan(const char *Start,unsigned long MaxLength)
TagCount = 0;
while (TagCount+1 < sizeof(Indexes)/sizeof(Indexes[0]) && Stop < End)
{
- TrimRecord(true,End);
+ TrimRecord(true,End);
+
+ // this can happen when TrimRecord trims away the entire Record
+ // (e.g. because it just contains comments)
+ if(Stop == End)
+ return true;
// Start a new index and add it to the hash
if (isspace(Stop[0]) == 0)
@@ -263,7 +282,9 @@ bool pkgTagSection::Scan(const char *Start,unsigned long MaxLength)
if (Stop == 0)
return false;
- for (; Stop+1 < End && Stop[1] == '\r'; Stop++);
+ for (; Stop+1 < End && Stop[1] == '\r'; Stop++)
+ /* nothing */
+ ;
// Double newline marks the end of the record
if (Stop+1 < End && Stop[1] == '\n')
@@ -550,52 +571,54 @@ bool TFRewrite(FILE *Output,pkgTagSection const &Tags,const char *Order[],
}
// Write all all of the tags, in order.
- for (unsigned int I = 0; Order[I] != 0; I++)
+ if (Order != NULL)
{
- bool Rewritten = false;
-
- // See if this is a field that needs to be rewritten
- for (unsigned int J = 0; Rewrite != 0 && Rewrite[J].Tag != 0; J++)
+ for (unsigned int I = 0; Order[I] != 0; I++)
{
- if (strcasecmp(Rewrite[J].Tag,Order[I]) == 0)
- {
- Visited[J] |= 2;
- if (Rewrite[J].Rewrite != 0 && Rewrite[J].Rewrite[0] != 0)
- {
- if (isspace(Rewrite[J].Rewrite[0]))
- fprintf(Output,"%s:%s\n",Rewrite[J].NewTag,Rewrite[J].Rewrite);
- else
- fprintf(Output,"%s: %s\n",Rewrite[J].NewTag,Rewrite[J].Rewrite);
- }
+ bool Rewritten = false;
+
+ // See if this is a field that needs to be rewritten
+ for (unsigned int J = 0; Rewrite != 0 && Rewrite[J].Tag != 0; J++)
+ {
+ if (strcasecmp(Rewrite[J].Tag,Order[I]) == 0)
+ {
+ Visited[J] |= 2;
+ if (Rewrite[J].Rewrite != 0 && Rewrite[J].Rewrite[0] != 0)
+ {
+ if (isspace(Rewrite[J].Rewrite[0]))
+ fprintf(Output,"%s:%s\n",Rewrite[J].NewTag,Rewrite[J].Rewrite);
+ else
+ fprintf(Output,"%s: %s\n",Rewrite[J].NewTag,Rewrite[J].Rewrite);
+ }
+ Rewritten = true;
+ break;
+ }
+ }
- Rewritten = true;
- break;
- }
- }
-
- // See if it is in the fragment
- unsigned Pos;
- if (Tags.Find(Order[I],Pos) == false)
- continue;
- Visited[Pos] |= 1;
-
- if (Rewritten == true)
- continue;
+ // See if it is in the fragment
+ unsigned Pos;
+ if (Tags.Find(Order[I],Pos) == false)
+ continue;
+ Visited[Pos] |= 1;
+
+ if (Rewritten == true)
+ continue;
- /* Write out this element, taking a moment to rewrite the tag
- in case of changes of case. */
- const char *Start;
- const char *Stop;
- Tags.Get(Start,Stop,Pos);
+ /* Write out this element, taking a moment to rewrite the tag
+ in case of changes of case. */
+ const char *Start;
+ const char *Stop;
+ Tags.Get(Start,Stop,Pos);
- if (fputs(Order[I],Output) < 0)
- return _error->Errno("fputs","IO Error to output");
- Start += strlen(Order[I]);
- if (fwrite(Start,Stop - Start,1,Output) != 1)
- return _error->Errno("fwrite","IO Error to output");
- if (Stop[-1] != '\n')
- fprintf(Output,"\n");
- }
+ if (fputs(Order[I],Output) < 0)
+ return _error->Errno("fputs","IO Error to output");
+ Start += strlen(Order[I]);
+ if (fwrite(Start,Stop - Start,1,Output) != 1)
+ return _error->Errno("fwrite","IO Error to output");
+ if (Stop[-1] != '\n')
+ fprintf(Output,"\n");
+ }
+ }
// Now write all the old tags that were missed.
for (unsigned int I = 0; I != Tags.Count(); I++)
diff --git a/apt-pkg/tagfile.h b/apt-pkg/tagfile.h
index 66c56799d..518d3dbcd 100644
--- a/apt-pkg/tagfile.h
+++ b/apt-pkg/tagfile.h
@@ -84,7 +84,7 @@ class pkgTagSection
Stop = this->Stop;
};
- pkgTagSection() : Section(0), TagCount(0), d(NULL), Stop(0) {};
+ pkgTagSection();
virtual ~pkgTagSection() {};
};
diff --git a/apt-pkg/update.cc b/apt-pkg/update.cc
new file mode 100644
index 000000000..97be5490b
--- /dev/null
+++ b/apt-pkg/update.cc
@@ -0,0 +1,126 @@
+
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/algorithms.h>
+#include <apt-pkg/update.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/version.h>
+#include <apt-pkg/sptr.h>
+#include <apt-pkg/acquire-item.h>
+#include <apt-pkg/edsp.h>
+#include <apt-pkg/sourcelist.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/progress.h>
+
+#include <sys/types.h>
+#include <cstdlib>
+#include <algorithm>
+#include <iostream>
+#include <stdio.h>
+
+#include <apti18n.h>
+ /*}}}*/
+
+using namespace std;
+
+// ListUpdate - construct Fetcher and update the cache files /*{{{*/
+// ---------------------------------------------------------------------
+/* This is a simple wrapper to update the cache. it will fetch stuff
+ * from the network (or any other sources defined in sources.list)
+ */
+bool ListUpdate(pkgAcquireStatus &Stat,
+ pkgSourceList &List,
+ int PulseInterval)
+{
+ pkgAcquire Fetcher;
+ if (Fetcher.Setup(&Stat, _config->FindDir("Dir::State::Lists")) == false)
+ return false;
+
+ // Populate it with the source selection
+ if (List.GetIndexes(&Fetcher) == false)
+ return false;
+
+ return AcquireUpdate(Fetcher, PulseInterval, true);
+}
+ /*}}}*/
+// AcquireUpdate - take Fetcher and update the cache files /*{{{*/
+// ---------------------------------------------------------------------
+/* This is a simple wrapper to update the cache with a provided acquire
+ * If you only need control over Status and the used SourcesList use
+ * ListUpdate method instead.
+ */
+bool AcquireUpdate(pkgAcquire &Fetcher, int const PulseInterval,
+ bool const RunUpdateScripts, bool const ListCleanup)
+{
+ // Run scripts
+ if (RunUpdateScripts == true)
+ RunScripts("APT::Update::Pre-Invoke");
+
+ pkgAcquire::RunResult res;
+ if(PulseInterval > 0)
+ res = Fetcher.Run(PulseInterval);
+ else
+ res = Fetcher.Run();
+
+ if (res == pkgAcquire::Failed)
+ return false;
+
+ bool Failed = false;
+ bool TransientNetworkFailure = false;
+ for (pkgAcquire::ItemIterator I = Fetcher.ItemsBegin();
+ I != Fetcher.ItemsEnd(); ++I)
+ {
+ if ((*I)->Status == pkgAcquire::Item::StatDone)
+ continue;
+
+ (*I)->Finished();
+
+ ::URI uri((*I)->DescURI());
+ uri.User.clear();
+ uri.Password.clear();
+ string descUri = string(uri);
+ _error->Warning(_("Failed to fetch %s %s\n"), descUri.c_str(),
+ (*I)->ErrorText.c_str());
+
+ if ((*I)->Status == pkgAcquire::Item::StatTransientNetworkError)
+ {
+ TransientNetworkFailure = true;
+ continue;
+ }
+
+ Failed = true;
+ }
+
+ // Clean out any old list files
+ // Keep "APT::Get::List-Cleanup" name for compatibility, but
+ // this is really a global option for the APT library now
+ if (!TransientNetworkFailure && !Failed && ListCleanup == true &&
+ (_config->FindB("APT::Get::List-Cleanup",true) == true &&
+ _config->FindB("APT::List-Cleanup",true) == true))
+ {
+ if (Fetcher.Clean(_config->FindDir("Dir::State::lists")) == false ||
+ Fetcher.Clean(_config->FindDir("Dir::State::lists") + "partial/") == false)
+ // something went wrong with the clean
+ return false;
+ }
+
+ if (TransientNetworkFailure == true)
+ _error->Warning(_("Some index files failed to download. They have been ignored, or old ones used instead."));
+ else if (Failed == true)
+ return _error->Error(_("Some index files failed to download. They have been ignored, or old ones used instead."));
+
+
+ // Run the success scripts if all was fine
+ if (RunUpdateScripts == true)
+ {
+ if(!TransientNetworkFailure && !Failed)
+ RunScripts("APT::Update::Post-Invoke-Success");
+
+ // Run the other scripts
+ RunScripts("APT::Update::Post-Invoke");
+ }
+ return true;
+}
+ /*}}}*/
diff --git a/apt-pkg/update.h b/apt-pkg/update.h
new file mode 100644
index 000000000..3835644de
--- /dev/null
+++ b/apt-pkg/update.h
@@ -0,0 +1,21 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+/* ######################################################################
+
+ Update - ListUpdate releated code
+
+ ##################################################################### */
+ /*}}}*/
+
+#ifndef PKGLIB_UPDATE_H
+#define PKGLIB_UPDATE_H
+
+class pkgAcquireStatus;
+
+
+bool ListUpdate(pkgAcquireStatus &progress, pkgSourceList &List, int PulseInterval=0);
+bool AcquireUpdate(pkgAcquire &Fetcher, int const PulseInterval = 0,
+ bool const RunUpdateScripts = true, bool const ListCleanup = true);
+
+
+#endif
diff --git a/apt-pkg/upgrade.cc b/apt-pkg/upgrade.cc
new file mode 100644
index 000000000..d6f6933dd
--- /dev/null
+++ b/apt-pkg/upgrade.cc
@@ -0,0 +1,263 @@
+
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/algorithms.h>
+#include <apt-pkg/upgrade.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/version.h>
+#include <apt-pkg/sptr.h>
+#include <apt-pkg/acquire-item.h>
+#include <apt-pkg/edsp.h>
+#include <apt-pkg/sourcelist.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/progress.h>
+
+#include <sys/types.h>
+#include <cstdlib>
+#include <algorithm>
+#include <iostream>
+#include <stdio.h>
+
+#include <apti18n.h>
+ /*}}}*/
+
+// DistUpgrade - Distribution upgrade /*{{{*/
+// ---------------------------------------------------------------------
+/* This autoinstalls every package and then force installs every
+ pre-existing package. This creates the initial set of conditions which
+ most likely contain problems because too many things were installed.
+
+ The problem resolver is used to resolve the problems.
+ */
+bool pkgDistUpgrade(pkgDepCache &Cache)
+{
+ std::string const solver = _config->Find("APT::Solver", "internal");
+ if (solver != "internal") {
+ OpTextProgress Prog(*_config);
+ return EDSP::ResolveExternal(solver.c_str(), Cache, false, true, false, &Prog);
+ }
+
+ pkgDepCache::ActionGroup group(Cache);
+
+ /* Upgrade all installed packages first without autoinst to help the resolver
+ in versioned or-groups to upgrade the old solver instead of installing
+ a new one (if the old solver is not the first one [anymore]) */
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
+ if (I->CurrentVer != 0)
+ Cache.MarkInstall(I, false, 0, false);
+
+ /* Auto upgrade all installed packages, this provides the basis
+ for the installation */
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
+ if (I->CurrentVer != 0)
+ Cache.MarkInstall(I, true, 0, false);
+
+ /* Now, install each essential package which is not installed
+ (and not provided by another package in the same name group) */
+ std::string essential = _config->Find("pkgCacheGen::Essential", "all");
+ if (essential == "all")
+ {
+ for (pkgCache::GrpIterator G = Cache.GrpBegin(); G.end() == false; ++G)
+ {
+ bool isEssential = false;
+ bool instEssential = false;
+ for (pkgCache::PkgIterator P = G.PackageList(); P.end() == false; P = G.NextPkg(P))
+ {
+ if ((P->Flags & pkgCache::Flag::Essential) != pkgCache::Flag::Essential)
+ continue;
+ isEssential = true;
+ if (Cache[P].Install() == true)
+ {
+ instEssential = true;
+ break;
+ }
+ }
+ if (isEssential == false || instEssential == true)
+ continue;
+ pkgCache::PkgIterator P = G.FindPreferredPkg();
+ Cache.MarkInstall(P, true, 0, false);
+ }
+ }
+ else if (essential != "none")
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
+ if ((I->Flags & pkgCache::Flag::Essential) == pkgCache::Flag::Essential)
+ Cache.MarkInstall(I, true, 0, false);
+
+ /* We do it again over all previously installed packages to force
+ conflict resolution on them all. */
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
+ if (I->CurrentVer != 0)
+ Cache.MarkInstall(I, false, 0, false);
+
+ pkgProblemResolver Fix(&Cache);
+
+ // Hold back held packages.
+ if (_config->FindB("APT::Ignore-Hold",false) == false)
+ {
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
+ {
+ if (I->SelectedState == pkgCache::State::Hold)
+ {
+ Fix.Protect(I);
+ Cache.MarkKeep(I, false, false);
+ }
+ }
+ }
+
+ return Fix.Resolve();
+}
+ /*}}}*/
+// AllUpgradeNoNewPackages - Upgrade but no removals or new pkgs /*{{{*/
+static bool pkgAllUpgradeNoNewPackages(pkgDepCache &Cache)
+{
+ std::string const solver = _config->Find("APT::Solver", "internal");
+ if (solver != "internal") {
+ OpTextProgress Prog(*_config);
+ return EDSP::ResolveExternal(solver.c_str(), Cache, true, false, false, &Prog);
+ }
+
+ pkgDepCache::ActionGroup group(Cache);
+
+ pkgProblemResolver Fix(&Cache);
+
+ if (Cache.BrokenCount() != 0)
+ return false;
+
+ // Upgrade all installed packages
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
+ {
+ if (Cache[I].Install() == true)
+ Fix.Protect(I);
+
+ if (_config->FindB("APT::Ignore-Hold",false) == false)
+ if (I->SelectedState == pkgCache::State::Hold)
+ continue;
+
+ if (I->CurrentVer != 0 && Cache[I].InstallVer != 0)
+ Cache.MarkInstall(I, false, 0, false);
+ }
+
+ return Fix.ResolveByKeep();
+}
+ /*}}}*/
+// AllUpgradeWithNewInstalls - Upgrade + install new packages as needed /*{{{*/
+// ---------------------------------------------------------------------
+/* Right now the system must be consistent before this can be called.
+ * Upgrade as much as possible without deleting anything (useful for
+ * stable systems)
+ */
+static bool pkgAllUpgradeWithNewPackages(pkgDepCache &Cache)
+{
+ pkgDepCache::ActionGroup group(Cache);
+
+ pkgProblemResolver Fix(&Cache);
+
+ if (Cache.BrokenCount() != 0)
+ return false;
+
+ // provide the initial set of stuff we want to upgrade by marking
+ // all upgradable packages for upgrade
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
+ {
+ if (I->CurrentVer != 0 && Cache[I].InstallVer != 0)
+ {
+ if (_config->FindB("APT::Ignore-Hold",false) == false)
+ if (I->SelectedState == pkgCache::State::Hold)
+ continue;
+
+ Cache.MarkInstall(I, false, 0, false);
+ }
+ }
+
+ // then let auto-install loose
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
+ if (Cache[I].Install())
+ Cache.MarkInstall(I, true, 0, false);
+
+ // ... but it may remove stuff, we we need to clean up afterwards again
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
+ if (Cache[I].Delete() == true)
+ Cache.MarkKeep(I, false, false);
+
+ // resolve remaining issues via keep
+ return Fix.ResolveByKeep();
+}
+ /*}}}*/
+// AllUpgrade - Upgrade as many packages as possible /*{{{*/
+// ---------------------------------------------------------------------
+/* Right now the system must be consistent before this can be called.
+ It also will not change packages marked for install, it only tries
+ to install packages not marked for install */
+bool pkgAllUpgrade(pkgDepCache &Cache)
+{
+ return pkgAllUpgradeNoNewPackages(Cache);
+}
+ /*}}}*/
+// MinimizeUpgrade - Minimizes the set of packages to be upgraded /*{{{*/
+// ---------------------------------------------------------------------
+/* This simply goes over the entire set of packages and tries to keep
+ each package marked for upgrade. If a conflict is generated then
+ the package is restored. */
+bool pkgMinimizeUpgrade(pkgDepCache &Cache)
+{
+ pkgDepCache::ActionGroup group(Cache);
+
+ if (Cache.BrokenCount() != 0)
+ return false;
+
+ // We loop for 10 tries to get the minimal set size.
+ bool Change = false;
+ unsigned int Count = 0;
+ do
+ {
+ Change = false;
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; ++I)
+ {
+ // Not interesting
+ if (Cache[I].Upgrade() == false || Cache[I].NewInstall() == true)
+ continue;
+
+ // Keep it and see if that is OK
+ Cache.MarkKeep(I, false, false);
+ if (Cache.BrokenCount() != 0)
+ Cache.MarkInstall(I, false, 0, false);
+ else
+ {
+ // If keep didn't actually do anything then there was no change..
+ if (Cache[I].Upgrade() == false)
+ Change = true;
+ }
+ }
+ ++Count;
+ }
+ while (Change == true && Count < 10);
+
+ if (Cache.BrokenCount() != 0)
+ return _error->Error("Internal Error in pkgMinimizeUpgrade");
+
+ return true;
+}
+ /*}}}*/
+// APT::Upgrade::Upgrade - Upgrade using a specific strategy /*{{{*/
+bool APT::Upgrade::Upgrade(pkgDepCache &Cache, int mode)
+{
+ if (mode == 0)
+ {
+ return pkgDistUpgrade(Cache);
+ }
+ else if ((mode & ~FORBID_REMOVE_PACKAGES) == 0)
+ {
+ return pkgAllUpgradeWithNewPackages(Cache);
+ }
+ else if ((mode & ~(FORBID_REMOVE_PACKAGES|FORBID_INSTALL_NEW_PACKAGES)) == 0)
+ {
+ return pkgAllUpgradeNoNewPackages(Cache);
+ }
+ else
+ _error->Error("pkgAllUpgrade called with unsupported mode %i", mode);
+
+ return false;
+}
+ /*}}}*/
diff --git a/apt-pkg/upgrade.h b/apt-pkg/upgrade.h
new file mode 100644
index 000000000..c4973472f
--- /dev/null
+++ b/apt-pkg/upgrade.h
@@ -0,0 +1,30 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+/* ######################################################################
+
+ Upgrade - Upgrade/DistUpgrade releated code
+
+ ##################################################################### */
+ /*}}}*/
+
+#ifndef PKGLIB_UPGRADE_H
+#define PKGLIB_UPGRADE_H
+
+namespace APT {
+ namespace Upgrade {
+ // FIXME: make this "enum class UpgradeMode {" once we enable c++11
+ enum UpgradeMode {
+ FORBID_REMOVE_PACKAGES = 1,
+ FORBID_INSTALL_NEW_PACKAGES = 2,
+ };
+ bool Upgrade(pkgDepCache &Cache, int UpgradeMode);
+ }
+}
+
+// please use APT::Upgrade::Upgrade() instead
+bool pkgDistUpgrade(pkgDepCache &Cache);
+bool pkgAllUpgrade(pkgDepCache &Cache);
+bool pkgMinimizeUpgrade(pkgDepCache &Cache);
+
+
+#endif