summaryrefslogtreecommitdiff
path: root/apt-pkg
diff options
context:
space:
mode:
authorMichael Vogt <mvo@debian.org>2013-07-24 22:06:18 +0200
committerMichael Vogt <mvo@debian.org>2013-07-24 22:06:18 +0200
commitf39daeb1f66b8910f91274055bf07c3d008cdc50 (patch)
tree9283950ebf8a3089b8b16b1d09c3aec8ecc5eb07 /apt-pkg
parent267275c59cc35704789a228c6e9b1464c4cabd74 (diff)
parentc7a629dd114c41a1244744e2f5085df2f505dc90 (diff)
Merge remote-tracking branch 'upstream/debian/sid'
Diffstat (limited to 'apt-pkg')
-rw-r--r--apt-pkg/acquire-item.cc13
-rw-r--r--apt-pkg/algorithms.cc18
-rw-r--r--apt-pkg/algorithms.h7
-rw-r--r--apt-pkg/cacheiterators.h1
-rw-r--r--apt-pkg/cdrom.cc8
-rw-r--r--apt-pkg/contrib/error.h41
-rw-r--r--apt-pkg/contrib/fileutl.cc191
-rw-r--r--apt-pkg/contrib/fileutl.h4
-rw-r--r--apt-pkg/contrib/gpgv.cc2
-rw-r--r--apt-pkg/contrib/gpgv.h15
-rw-r--r--apt-pkg/contrib/sha2_internal.cc12
-rw-r--r--apt-pkg/contrib/strutl.cc5
-rw-r--r--apt-pkg/deb/deblistparser.cc6
-rw-r--r--apt-pkg/deb/debmetaindex.cc23
-rw-r--r--apt-pkg/deb/dpkgpm.cc71
-rw-r--r--apt-pkg/deb/dpkgpm.h4
-rw-r--r--apt-pkg/depcache.cc86
-rw-r--r--apt-pkg/depcache.h30
-rw-r--r--apt-pkg/indexcopy.cc6
-rw-r--r--apt-pkg/orderlist.cc224
-rw-r--r--apt-pkg/packagemanager.cc121
-rw-r--r--apt-pkg/pkgcache.cc12
-rw-r--r--apt-pkg/pkgcachegen.cc4
23 files changed, 553 insertions, 351 deletions
diff --git a/apt-pkg/acquire-item.cc b/apt-pkg/acquire-item.cc
index 7b800e42f..c48443eff 100644
--- a/apt-pkg/acquire-item.cc
+++ b/apt-pkg/acquire-item.cc
@@ -1289,7 +1289,14 @@ void pkgAcqMetaIndex::RetrievalDone(string Message) /*{{{*/
string FinalFile = _config->FindDir("Dir::State::lists");
FinalFile += URItoFileName(RealURI);
if (SigFile == DestFile)
+ {
SigFile = FinalFile;
+ // constructor of pkgAcqMetaClearSig moved it out of the way,
+ // now move it back in on IMS hit for the 'old' file
+ string const OldClearSig = DestFile + ".reverify";
+ if (RealFileExists(OldClearSig) == true)
+ Rename(OldClearSig, FinalFile);
+ }
DestFile = FinalFile;
}
Complete = true;
@@ -1595,7 +1602,11 @@ string pkgAcqMetaClearSig::Custom600Headers()
struct stat Buf;
if (stat(Final.c_str(),&Buf) != 0)
- return "\nIndex-File: true\nFail-Ignore: true\n";
+ {
+ Final = DestFile + ".reverify";
+ if (stat(Final.c_str(),&Buf) != 0)
+ return "\nIndex-File: true\nFail-Ignore: true\n";
+ }
return "\nIndex-File: true\nFail-Ignore: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
}
diff --git a/apt-pkg/algorithms.cc b/apt-pkg/algorithms.cc
index 6cde4d6cc..85799a11b 100644
--- a/apt-pkg/algorithms.cc
+++ b/apt-pkg/algorithms.cc
@@ -845,8 +845,10 @@ bool pkgProblemResolver::ResolveInternal(bool const BrokenFix)
}
while (Again == true);
- if (Debug == true)
- clog << "Starting" << endl;
+ if (Debug == true) {
+ clog << "Starting pkgProblemResolver with broken count: "
+ << Cache.BrokenCount() << endl;
+ }
MakeScores();
@@ -874,8 +876,10 @@ bool pkgProblemResolver::ResolveInternal(bool const BrokenFix)
}
}
- if (Debug == true)
- clog << "Starting 2" << endl;
+ if (Debug == true) {
+ clog << "Starting 2 pkgProblemResolver with broken count: "
+ << Cache.BrokenCount() << endl;
+ }
/* Now consider all broken packages. For each broken package we either
remove the package or fix it's problem. We do this once, it should
@@ -1440,9 +1444,11 @@ bool pkgProblemResolver::ResolveByKeepInternal()
return true;
}
/*}}}*/
-// ProblemResolver::InstallProtect - Install all protected packages /*{{{*/
+// ProblemResolver::InstallProtect - deprecated cpu-eating no-op /*{{{*/
// ---------------------------------------------------------------------
-/* This is used to make sure protected packages are installed */
+/* Actions issued with FromUser bit set are protected from further
+ modification (expect by other calls with FromUser set) nowadays , so we
+ don't need to reissue actions here, they are already set in stone. */
void pkgProblemResolver::InstallProtect()
{
pkgDepCache::ActionGroup group(Cache);
diff --git a/apt-pkg/algorithms.h b/apt-pkg/algorithms.h
index aff8a68f2..7f58c8eed 100644
--- a/apt-pkg/algorithms.h
+++ b/apt-pkg/algorithms.h
@@ -36,6 +36,8 @@
#include <iostream>
+#include <apt-pkg/macros.h>
+
#ifndef APT_8_CLEANER_HEADERS
#include <apt-pkg/acquire.h>
using std::ostream;
@@ -132,9 +134,8 @@ class pkgProblemResolver /*{{{*/
// Try to resolve problems only by using keep
bool ResolveByKeep();
- // Install all protected packages
- void InstallProtect();
-
+ __deprecated void InstallProtect();
+
pkgProblemResolver(pkgDepCache *Cache);
~pkgProblemResolver();
};
diff --git a/apt-pkg/cacheiterators.h b/apt-pkg/cacheiterators.h
index 179a0e963..886d84838 100644
--- a/apt-pkg/cacheiterators.h
+++ b/apt-pkg/cacheiterators.h
@@ -220,6 +220,7 @@ class pkgCache::VerIterator : public Iterator<Version, VerIterator> {
inline VerFileIterator FileList() const;
bool Downloadable() const;
inline const char *PriorityType() const {return Owner->Priority(S->Priority);};
+ const char *MultiArchType() const;
std::string RelStr() const;
bool Automatic() const;
diff --git a/apt-pkg/cdrom.cc b/apt-pkg/cdrom.cc
index 9a9a854bf..a5668a50a 100644
--- a/apt-pkg/cdrom.cc
+++ b/apt-pkg/cdrom.cc
@@ -829,6 +829,14 @@ bool pkgCdrom::Add(pkgCdromStatus *log) /*{{{*/
log->Update(msg.str());
log->Update(_("Copying package lists..."), STEP_COPY);
}
+
+ // check for existence and possibly create state directory for copying
+ string const listDir = _config->FindDir("Dir::State::lists");
+ string const partialListDir = listDir + "partial/";
+ if (CreateAPTDirectoryIfNeeded(_config->FindDir("Dir::State"), partialListDir) == false &&
+ CreateAPTDirectoryIfNeeded(listDir, partialListDir) == false)
+ return _error->Errno("cdrom", _("List directory %spartial is missing."), listDir.c_str());
+
// take care of the signatures and copy them if they are ok
// (we do this before PackageCopy as it modifies "List" and "SourceList")
SigVerify SignVerify;
diff --git a/apt-pkg/contrib/error.h b/apt-pkg/contrib/error.h
index 21c51c1be..7d09b2d4a 100644
--- a/apt-pkg/contrib/error.h
+++ b/apt-pkg/contrib/error.h
@@ -123,6 +123,25 @@ public: /*{{{*/
bool InsertErrno(MsgType const &type, const char* Function,
const char* Description,...) __like_printf(4) __cold;
+ /** \brief adds an errno message with the given type
+ *
+ * args needs to be initialized with va_start and terminated
+ * with va_end by the caller. msgSize is also an out-parameter
+ * in case the msgSize was not enough to store the complete message.
+ *
+ * \param type of the error message
+ * \param Function which failed
+ * \param Description is the format string for args
+ * \param args list from a printf-like function
+ * \param errsv is the errno the error is for
+ * \param msgSize is the size of the char[] used to store message
+ * \return true if the message was added, false if not - the caller
+ * should call this method again in that case
+ */
+ bool InsertErrno(MsgType type, const char* Function,
+ const char* Description, va_list &args,
+ int const errsv, size_t &msgSize);
+
/** \brief add an fatal error message to the list
*
* Most of the stuff we consider as "error" is also "fatal" for
@@ -185,6 +204,22 @@ public: /*{{{*/
*/
bool Insert(MsgType const &type, const char* Description,...) __like_printf(3) __cold;
+ /** \brief adds an error message with the given type
+ *
+ * args needs to be initialized with va_start and terminated
+ * with va_end by the caller. msgSize is also an out-parameter
+ * in case the msgSize was not enough to store the complete message.
+ *
+ * \param type of the error message
+ * \param Description is the format string for args
+ * \param args list from a printf-like function
+ * \param msgSize is the size of the char[] used to store message
+ * \return true if the message was added, false if not - the caller
+ * should call this method again in that case
+ */
+ bool Insert(MsgType type, const char* Description,
+ va_list &args, size_t &msgSize) __cold;
+
/** \brief is an error in the list?
*
* \return \b true if an error is included in the list, \b false otherwise
@@ -305,12 +340,6 @@ private: /*{{{*/
};
std::list<MsgStack> Stacks;
-
- bool InsertErrno(MsgType type, const char* Function,
- const char* Description, va_list &args,
- int const errsv, size_t &msgSize);
- bool Insert(MsgType type, const char* Description,
- va_list &args, size_t &msgSize);
/*}}}*/
};
/*}}}*/
diff --git a/apt-pkg/contrib/fileutl.cc b/apt-pkg/contrib/fileutl.cc
index 46661887a..0f88923cf 100644
--- a/apt-pkg/contrib/fileutl.cc
+++ b/apt-pkg/contrib/fileutl.cc
@@ -184,7 +184,8 @@ bool RunScripts(const char *Cnf)
/* The caller is expected to set things so that failure causes erasure */
bool CopyFile(FileFd &From,FileFd &To)
{
- if (From.IsOpen() == false || To.IsOpen() == false)
+ if (From.IsOpen() == false || To.IsOpen() == false ||
+ From.Failed() == true || To.Failed() == true)
return false;
// Buffered copy between fds
@@ -861,7 +862,7 @@ bool FileFd::Open(string FileName,unsigned int const Mode,CompressMode Compress,
return Open(FileName, ReadOnly, Gzip, Perms);
if (Compress == Auto && (Mode & WriteOnly) == WriteOnly)
- return _error->Error("Autodetection on %s only works in ReadOnly openmode!", FileName.c_str());
+ return FileFdError("Autodetection on %s only works in ReadOnly openmode!", FileName.c_str());
std::vector<APT::Configuration::Compressor> const compressors = APT::Configuration::getCompressors();
std::vector<APT::Configuration::Compressor>::const_iterator compressor = compressors.begin();
@@ -914,17 +915,17 @@ bool FileFd::Open(string FileName,unsigned int const Mode,CompressMode Compress,
case Auto:
case Extension:
// Unreachable
- return _error->Error("Opening File %s in None, Auto or Extension should be already handled?!?", FileName.c_str());
+ return FileFdError("Opening File %s in None, Auto or Extension should be already handled?!?", FileName.c_str());
}
for (; compressor != compressors.end(); ++compressor)
if (compressor->Name == name)
break;
if (compressor == compressors.end())
- return _error->Error("Can't find a configured compressor %s for file %s", name.c_str(), FileName.c_str());
+ return FileFdError("Can't find a configured compressor %s for file %s", name.c_str(), FileName.c_str());
}
if (compressor == compressors.end())
- return _error->Error("Can't find a match for specified compressor mode for file %s", FileName.c_str());
+ return FileFdError("Can't find a match for specified compressor mode for file %s", FileName.c_str());
return Open(FileName, Mode, *compressor, Perms);
}
bool FileFd::Open(string FileName,unsigned int const Mode,APT::Configuration::Compressor const &compressor, unsigned long const Perms)
@@ -933,9 +934,9 @@ bool FileFd::Open(string FileName,unsigned int const Mode,APT::Configuration::Co
Flags = AutoClose;
if ((Mode & WriteOnly) != WriteOnly && (Mode & (Atomic | Create | Empty | Exclusive)) != 0)
- return _error->Error("ReadOnly mode for %s doesn't accept additional flags!", FileName.c_str());
+ return FileFdError("ReadOnly mode for %s doesn't accept additional flags!", FileName.c_str());
if ((Mode & ReadWrite) == 0)
- return _error->Error("No openmode provided in FileFd::Open for %s", FileName.c_str());
+ return FileFdError("No openmode provided in FileFd::Open for %s", FileName.c_str());
if ((Mode & Atomic) == Atomic)
{
@@ -981,7 +982,7 @@ bool FileFd::Open(string FileName,unsigned int const Mode,APT::Configuration::Co
close (iFd);
iFd = -1;
}
- return _error->Errno("open",_("Could not open file %s"), FileName.c_str());
+ return FileFdErrno("open",_("Could not open file %s"), FileName.c_str());
}
SetCloseExec(iFd,true);
@@ -1010,14 +1011,19 @@ bool FileFd::OpenDescriptor(int Fd, unsigned int const Mode, CompressMode Compre
case Xz: name = "xz"; break;
case Auto:
case Extension:
- return _error->Error("Opening Fd %d in Auto or Extension compression mode is not supported", Fd);
+ if (AutoClose == true && Fd != -1)
+ close(Fd);
+ return FileFdError("Opening Fd %d in Auto or Extension compression mode is not supported", Fd);
}
for (; compressor != compressors.end(); ++compressor)
if (compressor->Name == name)
break;
if (compressor == compressors.end())
- return _error->Error("Can't find a configured compressor %s for file %s", name.c_str(), FileName.c_str());
-
+ {
+ if (AutoClose == true && Fd != -1)
+ close(Fd);
+ return FileFdError("Can't find a configured compressor %s for file %s", name.c_str(), FileName.c_str());
+ }
return OpenDescriptor(Fd, Mode, *compressor, AutoClose);
}
bool FileFd::OpenDescriptor(int Fd, unsigned int const Mode, APT::Configuration::Compressor const &compressor, bool AutoClose)
@@ -1039,11 +1045,21 @@ bool FileFd::OpenDescriptor(int Fd, unsigned int const Mode, APT::Configuration:
else
iFd = Fd;
this->FileName = "";
- if (OpenInternDescriptor(Mode, compressor) == false)
+ if (Fd == -1 || OpenInternDescriptor(Mode, compressor) == false)
{
- if (AutoClose)
+ if (iFd != -1 && (
+#ifdef HAVE_ZLIB
+ compressor.Name == "gzip" ||
+#endif
+#ifdef HAVE_BZ2
+ compressor.Name == "bzip2" ||
+#endif
+ AutoClose == true))
+ {
close (iFd);
- return _error->Errno("gzdopen",_("Could not open file descriptor %d"), Fd);
+ iFd = -1;
+ }
+ return FileFdError(_("Could not open file descriptor %d"), Fd);
}
return true;
}
@@ -1105,10 +1121,7 @@ bool FileFd::OpenInternDescriptor(unsigned int const Mode, APT::Configuration::C
ExecWait(d->compressor_pid, "FileFdCompressor", true);
if ((Mode & ReadWrite) == ReadWrite)
- {
- Flags |= Fail;
- return _error->Error("ReadWrite mode is not supported for file %s", FileName.c_str());
- }
+ return FileFdError("ReadWrite mode is not supported for file %s", FileName.c_str());
bool const Comp = (Mode & WriteOnly) == WriteOnly;
if (Comp == false)
@@ -1131,10 +1144,7 @@ bool FileFd::OpenInternDescriptor(unsigned int const Mode, APT::Configuration::C
// Create a data pipe
int Pipe[2] = {-1,-1};
if (pipe(Pipe) != 0)
- {
- Flags |= Fail;
- return _error->Errno("pipe",_("Failed to create subprocess IPC"));
- }
+ return FileFdErrno("pipe",_("Failed to create subprocess IPC"));
for (int J = 0; J != 2; J++)
SetCloseExec(Pipe[J],true);
@@ -1244,14 +1254,13 @@ bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual)
{
if (errno == EINTR)
continue;
- Flags |= Fail;
#ifdef HAVE_ZLIB
if (d != NULL && d->gz != NULL)
{
int err;
char const * const errmsg = gzerror(d->gz, &err);
if (err != Z_ERRNO)
- return _error->Error("gzread: %s (%d: %s)", _("Read error"), err, errmsg);
+ return FileFdError("gzread: %s (%d: %s)", _("Read error"), err, errmsg);
}
#endif
#ifdef HAVE_BZ2
@@ -1260,10 +1269,10 @@ bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual)
int err;
char const * const errmsg = BZ2_bzerror(d->bz2, &err);
if (err != BZ_IO_ERROR)
- return _error->Error("BZ2_bzread: %s (%d: %s)", _("Read error"), err, errmsg);
+ return FileFdError("BZ2_bzread: %s (%d: %s)", _("Read error"), err, errmsg);
}
#endif
- return _error->Errno("read",_("Read error"));
+ return FileFdErrno("read",_("Read error"));
}
To = (char *)To + Res;
@@ -1284,9 +1293,8 @@ bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual)
Flags |= HitEof;
return true;
}
-
- Flags |= Fail;
- return _error->Error(_("read, still have %llu to read but none left"), Size);
+
+ return FileFdError(_("read, still have %llu to read but none left"), Size);
}
/*}}}*/
// FileFd::ReadLine - Read a complete line from the file /*{{{*/
@@ -1342,14 +1350,13 @@ bool FileFd::Write(const void *From,unsigned long long Size)
continue;
if (Res < 0)
{
- Flags |= Fail;
#ifdef HAVE_ZLIB
if (d != NULL && d->gz != NULL)
{
int err;
char const * const errmsg = gzerror(d->gz, &err);
if (err != Z_ERRNO)
- return _error->Error("gzwrite: %s (%d: %s)", _("Write error"), err, errmsg);
+ return FileFdError("gzwrite: %s (%d: %s)", _("Write error"), err, errmsg);
}
#endif
#ifdef HAVE_BZ2
@@ -1358,10 +1365,10 @@ bool FileFd::Write(const void *From,unsigned long long Size)
int err;
char const * const errmsg = BZ2_bzerror(d->bz2, &err);
if (err != BZ_IO_ERROR)
- return _error->Error("BZ2_bzwrite: %s (%d: %s)", _("Write error"), err, errmsg);
+ return FileFdError("BZ2_bzwrite: %s (%d: %s)", _("Write error"), err, errmsg);
}
#endif
- return _error->Errno("write",_("Write error"));
+ return FileFdErrno("write",_("Write error"));
}
From = (char *)From + Res;
@@ -1373,9 +1380,8 @@ bool FileFd::Write(const void *From,unsigned long long Size)
if (Size == 0)
return true;
-
- Flags |= Fail;
- return _error->Error(_("write, still have %llu to write but couldn't"), Size);
+
+ return FileFdError(_("write, still have %llu to write but couldn't"), Size);
}
bool FileFd::Write(int Fd, const void *From, unsigned long long Size)
{
@@ -1419,10 +1425,7 @@ bool FileFd::Seek(unsigned long long To)
return Skip(To - seekpos);
if ((d->openmode & ReadOnly) != ReadOnly)
- {
- Flags |= Fail;
- return _error->Error("Reopen is only implemented for read-only files!");
- }
+ return FileFdError("Reopen is only implemented for read-only files!");
#ifdef HAVE_BZ2
if (d->bz2 != NULL)
{
@@ -1443,17 +1446,11 @@ bool FileFd::Seek(unsigned long long To)
if (lseek(d->compressed_fd, 0, SEEK_SET) != 0)
iFd = d->compressed_fd;
if (iFd < 0)
- {
- Flags |= Fail;
- return _error->Error("Reopen is not implemented for pipes opened with FileFd::OpenDescriptor()!");
- }
+ return FileFdError("Reopen is not implemented for pipes opened with FileFd::OpenDescriptor()!");
}
if (OpenInternDescriptor(d->openmode, d->compressor) == false)
- {
- Flags |= Fail;
- return _error->Error("Seek on file %s because it couldn't be reopened", FileName.c_str());
- }
+ return FileFdError("Seek on file %s because it couldn't be reopened", FileName.c_str());
if (To != 0)
return Skip(To);
@@ -1469,10 +1466,7 @@ bool FileFd::Seek(unsigned long long To)
#endif
res = lseek(iFd,To,SEEK_SET);
if (res != (signed)To)
- {
- Flags |= Fail;
- return _error->Error("Unable to seek to %llu", To);
- }
+ return FileFdError("Unable to seek to %llu", To);
if (d != NULL)
d->seekpos = To;
@@ -1496,10 +1490,7 @@ bool FileFd::Skip(unsigned long long Over)
{
unsigned long long toread = std::min((unsigned long long) sizeof(buffer), Over);
if (Read(buffer, toread) == false)
- {
- Flags |= Fail;
- return _error->Error("Unable to seek ahead %llu",Over);
- }
+ return FileFdError("Unable to seek ahead %llu",Over);
Over -= toread;
}
return true;
@@ -1513,10 +1504,7 @@ bool FileFd::Skip(unsigned long long Over)
#endif
res = lseek(iFd,Over,SEEK_CUR);
if (res < 0)
- {
- Flags |= Fail;
- return _error->Error("Unable to seek ahead %llu",Over);
- }
+ return FileFdError("Unable to seek ahead %llu",Over);
if (d != NULL)
d->seekpos = res;
@@ -1530,17 +1518,11 @@ bool FileFd::Truncate(unsigned long long To)
{
#if defined HAVE_ZLIB || defined HAVE_BZ2
if (d != NULL && (d->gz != NULL || d->bz2 != NULL))
- {
- Flags |= Fail;
- return _error->Error("Truncating compressed files is not implemented (%s)", FileName.c_str());
- }
+ return FileFdError("Truncating compressed files is not implemented (%s)", FileName.c_str());
#endif
if (ftruncate(iFd,To) != 0)
- {
- Flags |= Fail;
- return _error->Error("Unable to truncate to %llu",To);
- }
-
+ return FileFdError("Unable to truncate to %llu",To);
+
return true;
}
/*}}}*/
@@ -1568,10 +1550,7 @@ unsigned long long FileFd::Tell()
#endif
Res = lseek(iFd,0,SEEK_CUR);
if (Res == (off_t)-1)
- {
- Flags |= Fail;
- _error->Errno("lseek","Failed to determine the current file position");
- }
+ FileFdErrno("lseek","Failed to determine the current file position");
if (d != NULL)
d->seekpos = Res;
return Res;
@@ -1584,10 +1563,7 @@ unsigned long long FileFd::FileSize()
{
struct stat Buf;
if ((d == NULL || d->pipe == false) && fstat(iFd,&Buf) != 0)
- {
- Flags |= Fail;
- return _error->Errno("fstat","Unable to determine the file size");
- }
+ return FileFdErrno("fstat","Unable to determine the file size");
// for compressor pipes st_size is undefined and at 'best' zero
if ((d != NULL && d->pipe == true) || S_ISFIFO(Buf.st_mode))
@@ -1597,10 +1573,7 @@ unsigned long long FileFd::FileSize()
if (d != NULL)
d->pipe = true;
if (stat(FileName.c_str(), &Buf) != 0)
- {
- Flags |= Fail;
- return _error->Errno("stat","Unable to determine the file size");
- }
+ return FileFdErrno("stat","Unable to determine the file size");
}
return Buf.st_size;
@@ -1642,16 +1615,10 @@ unsigned long long FileFd::Size()
* bits of the file */
// FIXME: Size for gz-files is limited by 32bit… no largefile support
if (lseek(iFd, -4, SEEK_END) < 0)
- {
- Flags |= Fail;
- return _error->Errno("lseek","Unable to seek to end of gzipped file");
- }
+ return FileFdErrno("lseek","Unable to seek to end of gzipped file");
size = 0L;
if (read(iFd, &size, 4) != 4)
- {
- Flags |= Fail;
- return _error->Errno("read","Unable to read original size of gzipped file");
- }
+ return FileFdErrno("read","Unable to read original size of gzipped file");
#ifdef WORDS_BIGENDIAN
uint32_t tmp_size = size;
@@ -1661,10 +1628,7 @@ unsigned long long FileFd::Size()
#endif
if (lseek(iFd, oldPos, SEEK_SET) < 0)
- {
- Flags |= Fail;
- return _error->Errno("lseek","Unable to seek in gzipped file");
- }
+ return FileFdErrno("lseek","Unable to seek in gzipped file");
return size;
}
@@ -1681,8 +1645,7 @@ time_t FileFd::ModificationTime()
struct stat Buf;
if ((d == NULL || d->pipe == false) && fstat(iFd,&Buf) != 0)
{
- Flags |= Fail;
- _error->Errno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
+ FileFdErrno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
return 0;
}
@@ -1695,8 +1658,7 @@ time_t FileFd::ModificationTime()
d->pipe = true;
if (stat(FileName.c_str(), &Buf) != 0)
{
- Flags |= Fail;
- _error->Errno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
+ FileFdErrno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
return 0;
}
}
@@ -1752,11 +1714,40 @@ bool FileFd::Close()
bool FileFd::Sync()
{
if (fsync(iFd) != 0)
+ return FileFdErrno("sync",_("Problem syncing the file"));
+ return true;
+}
+ /*}}}*/
+// FileFd::FileFdErrno - set Fail and call _error->Errno *{{{*/
+bool FileFd::FileFdErrno(const char *Function, const char *Description,...)
+{
+ Flags |= Fail;
+ va_list args;
+ size_t msgSize = 400;
+ int const errsv = errno;
+ while (true)
{
- Flags |= Fail;
- return _error->Errno("sync",_("Problem syncing the file"));
+ va_start(args,Description);
+ if (_error->InsertErrno(GlobalError::ERROR, Function, Description, args, errsv, msgSize) == false)
+ break;
+ va_end(args);
}
- return true;
+ return false;
+}
+ /*}}}*/
+// FileFd::FileFdError - set Fail and call _error->Error *{{{*/
+bool FileFd::FileFdError(const char *Description,...) {
+ Flags |= Fail;
+ va_list args;
+ size_t msgSize = 400;
+ while (true)
+ {
+ va_start(args,Description);
+ if (_error->Insert(GlobalError::ERROR, Description, args, msgSize) == false)
+ break;
+ va_end(args);
+ }
+ return false;
}
/*}}}*/
diff --git a/apt-pkg/contrib/fileutl.h b/apt-pkg/contrib/fileutl.h
index 426664d3a..3ec01dd9a 100644
--- a/apt-pkg/contrib/fileutl.h
+++ b/apt-pkg/contrib/fileutl.h
@@ -149,6 +149,10 @@ class FileFd
private:
FileFdPrivate* d;
bool OpenInternDescriptor(unsigned int const Mode, APT::Configuration::Compressor const &compressor);
+
+ // private helpers to set Fail flag and call _error->Error
+ bool FileFdErrno(const char* Function, const char* Description,...) __like_printf(3) __cold;
+ bool FileFdError(const char* Description,...) __like_printf(2) __cold;
};
bool RunScripts(const char *Cnf);
diff --git a/apt-pkg/contrib/gpgv.cc b/apt-pkg/contrib/gpgv.cc
index 31db7d5fe..f47e7ea48 100644
--- a/apt-pkg/contrib/gpgv.cc
+++ b/apt-pkg/contrib/gpgv.cc
@@ -154,7 +154,7 @@ void ExecGPGV(std::string const &File, std::string const &FileGPG,
if (sigFd != -1)
unlink(data);
ioprintf(std::cerr, "Splitting up %s into data and signature failed", File.c_str());
- exit(EINTERNAL);
+ exit(112);
}
Args.push_back(sig);
Args.push_back(data);
diff --git a/apt-pkg/contrib/gpgv.h b/apt-pkg/contrib/gpgv.h
index 08b10a97a..45f069058 100644
--- a/apt-pkg/contrib/gpgv.h
+++ b/apt-pkg/contrib/gpgv.h
@@ -23,9 +23,18 @@
/** \brief generates and run the command to verify a file with gpgv
*
* If File and FileSig specify the same file it is assumed that we
- * deal with a clear-signed message. In that case the file will be
- * rewritten to be in a good-known format without uneeded whitespaces
- * and additional messages (unsigned or signed).
+ * deal with a clear-signed message. Note that the method will accept
+ * and validate files which include additional (unsigned) messages
+ * without complaining. Do NOT open files accepted by this method
+ * for reading. Use #OpenMaybeClearSignedFile to access the message
+ * instead to ensure you are only reading signed data.
+ *
+ * The method does not return, but has some noteable exit-codes:
+ * 111 signals an internal error like the inability to execute gpgv,
+ * 112 indicates a clear-signed file which doesn't include a message,
+ * which can happen if APT is run while on a network requiring
+ * authentication before usage (e.g. in hotels)
+ * All other exit-codes are passed-through from gpgv.
*
* @param File is the message (unsigned or clear-signed)
* @param FileSig is the signature (detached or clear-signed)
diff --git a/apt-pkg/contrib/sha2_internal.cc b/apt-pkg/contrib/sha2_internal.cc
index 83b5a98d3..f84fb761c 100644
--- a/apt-pkg/contrib/sha2_internal.cc
+++ b/apt-pkg/contrib/sha2_internal.cc
@@ -632,7 +632,7 @@ void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) {
}
/* Clean up state data: */
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
usedspace = 0;
}
@@ -653,7 +653,7 @@ char *SHA256_End(SHA256_CTX* context, char buffer[]) {
}
*buffer = (char)0;
} else {
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
}
MEMSET_BZERO(digest, SHA256_DIGEST_LENGTH);
return buffer;
@@ -969,7 +969,7 @@ void SHA512_Final(sha2_byte digest[], SHA512_CTX* context) {
}
/* Zero out state data */
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
}
char *SHA512_End(SHA512_CTX* context, char buffer[]) {
@@ -989,7 +989,7 @@ char *SHA512_End(SHA512_CTX* context, char buffer[]) {
}
*buffer = (char)0;
} else {
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
}
MEMSET_BZERO(digest, SHA512_DIGEST_LENGTH);
return buffer;
@@ -1044,7 +1044,7 @@ void SHA384_Final(sha2_byte digest[], SHA384_CTX* context) {
}
/* Zero out state data */
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
}
char *SHA384_End(SHA384_CTX* context, char buffer[]) {
@@ -1064,7 +1064,7 @@ char *SHA384_End(SHA384_CTX* context, char buffer[]) {
}
*buffer = (char)0;
} else {
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
}
MEMSET_BZERO(digest, SHA384_DIGEST_LENGTH);
return buffer;
diff --git a/apt-pkg/contrib/strutl.cc b/apt-pkg/contrib/strutl.cc
index f4dd3407d..d0e74d8c5 100644
--- a/apt-pkg/contrib/strutl.cc
+++ b/apt-pkg/contrib/strutl.cc
@@ -758,7 +758,8 @@ bool ReadMessages(int Fd, vector<string> &List)
// Look for the end of the message
for (char *I = Buffer; I + 1 < End; I++)
{
- if (I[0] != '\n' || I[1] != '\n')
+ if (I[1] != '\n' ||
+ (I[0] != '\n' && strncmp(I, "\r\n\r\n", 4) != 0))
continue;
// Pull the message out
@@ -766,7 +767,7 @@ bool ReadMessages(int Fd, vector<string> &List)
PartialMessage += Message;
// Fix up the buffer
- for (; I < End && *I == '\n'; I++);
+ for (; I < End && (*I == '\n' || *I == '\r'); ++I);
End -= I-Buffer;
memmove(Buffer,I,End-Buffer);
I = Buffer;
diff --git a/apt-pkg/deb/deblistparser.cc b/apt-pkg/deb/deblistparser.cc
index db86bd698..28857176b 100644
--- a/apt-pkg/deb/deblistparser.cc
+++ b/apt-pkg/deb/deblistparser.cc
@@ -219,8 +219,12 @@ MD5SumValue debListParser::Description_md5()
string const value = Section.FindS("Description-md5");
if (value.empty() == true)
{
+ std::string const desc = Description() + "\n";
+ if (desc == "\n")
+ return MD5SumValue();
+
MD5Summation md5;
- md5.Add((Description() + "\n").c_str());
+ md5.Add(desc.c_str());
return md5.Result();
}
else if (likely(value.size() == 32))
diff --git a/apt-pkg/deb/debmetaindex.cc b/apt-pkg/deb/debmetaindex.cc
index 7a88d71e3..7dd5ab2bf 100644
--- a/apt-pkg/deb/debmetaindex.cc
+++ b/apt-pkg/deb/debmetaindex.cc
@@ -373,10 +373,29 @@ class debSLTypeDebian : public pkgSourceList::Type
string const &Dist, string const &Section,
bool const &IsSrc, map<string, string> const &Options) const
{
- map<string, string>::const_iterator const arch = Options.find("arch");
- vector<string> const Archs =
+ // parse arch=, arch+= and arch-= settings
+ map<string, string>::const_iterator arch = Options.find("arch");
+ vector<string> Archs =
(arch != Options.end()) ? VectorizeString(arch->second, ',') :
APT::Configuration::getArchitectures();
+ if ((arch = Options.find("arch+")) != Options.end())
+ {
+ std::vector<std::string> const plusArch = VectorizeString(arch->second, ',');
+ for (std::vector<std::string>::const_iterator plus = plusArch.begin(); plus != plusArch.end(); ++plus)
+ if (std::find(Archs.begin(), Archs.end(), *plus) == Archs.end())
+ Archs.push_back(*plus);
+ }
+ if ((arch = Options.find("arch-")) != Options.end())
+ {
+ std::vector<std::string> const minusArch = VectorizeString(arch->second, ',');
+ for (std::vector<std::string>::const_iterator minus = minusArch.begin(); minus != minusArch.end(); ++minus)
+ {
+ std::vector<std::string>::iterator kill = std::find(Archs.begin(), Archs.end(), *minus);
+ if (kill != Archs.end())
+ Archs.erase(kill);
+ }
+ }
+
map<string, string>::const_iterator const trusted = Options.find("trusted");
for (vector<metaIndex *>::const_iterator I = List.begin();
diff --git a/apt-pkg/deb/dpkgpm.cc b/apt-pkg/deb/dpkgpm.cc
index 3bc31dc37..fb0473535 100644
--- a/apt-pkg/deb/dpkgpm.cc
+++ b/apt-pkg/deb/dpkgpm.cc
@@ -238,15 +238,23 @@ bool pkgDPkgPM::Remove(PkgIterator Pkg,bool Purge)
return true;
}
/*}}}*/
-// DPkgPM::SendV2Pkgs - Send version 2 package info /*{{{*/
+// DPkgPM::SendPkgInfo - Send info for install-pkgs hook /*{{{*/
// ---------------------------------------------------------------------
/* This is part of the helper script communication interface, it sends
very complete information down to the other end of the pipe.*/
bool pkgDPkgPM::SendV2Pkgs(FILE *F)
{
- fprintf(F,"VERSION 2\n");
-
- /* Write out all of the configuration directives by walking the
+ return SendPkgsInfo(F, 2);
+}
+bool pkgDPkgPM::SendPkgsInfo(FILE * const F, unsigned int const &Version)
+{
+ // This version of APT supports only v3, so don't sent higher versions
+ if (Version <= 3)
+ fprintf(F,"VERSION %u\n", Version);
+ else
+ fprintf(F,"VERSION 3\n");
+
+ /* Write out all of the configuration directives by walking the
configuration tree */
const Configuration::Item *Top = _config->Tree(0);
for (; Top != 0;)
@@ -280,30 +288,51 @@ bool pkgDPkgPM::SendV2Pkgs(FILE *F)
pkgDepCache::StateCache &S = Cache[I->Pkg];
fprintf(F,"%s ",I->Pkg.Name());
- // Current version
- if (I->Pkg->CurrentVer == 0)
- fprintf(F,"- ");
+
+ // Current version which we are going to replace
+ pkgCache::VerIterator CurVer = I->Pkg.CurrentVer();
+ if (CurVer.end() == true && (I->Op == Item::Remove || I->Op == Item::Purge))
+ CurVer = FindNowVersion(I->Pkg);
+
+ else if (CurVer.end() == true)
+ {
+ if (Version <= 2)
+ fprintf(F, "- ");
+ else
+ fprintf(F, "- - none ");
+ }
else
- fprintf(F,"%s ",I->Pkg.CurrentVer().VerStr());
-
- // Show the compare operator
- // Target version
+ {
+ fprintf(F, "%s ", CurVer.VerStr());
+ if (Version >= 3)
+ fprintf(F, "%s %s ", CurVer.Arch(), CurVer.MultiArchType());
+ }
+
+ // Show the compare operator between current and install version
if (S.InstallVer != 0)
{
+ pkgCache::VerIterator const InstVer = S.InstVerIter(Cache);
int Comp = 2;
- if (I->Pkg->CurrentVer != 0)
- Comp = S.InstVerIter(Cache).CompareVer(I->Pkg.CurrentVer());
+ if (CurVer.end() == false)
+ Comp = InstVer.CompareVer(CurVer);
if (Comp < 0)
fprintf(F,"> ");
- if (Comp == 0)
+ else if (Comp == 0)
fprintf(F,"= ");
- if (Comp > 0)
+ else if (Comp > 0)
fprintf(F,"< ");
- fprintf(F,"%s ",S.InstVerIter(Cache).VerStr());
+ fprintf(F, "%s ", InstVer.VerStr());
+ if (Version >= 3)
+ fprintf(F, "%s %s ", InstVer.Arch(), InstVer.MultiArchType());
}
else
- fprintf(F,"> - ");
-
+ {
+ if (Version <= 2)
+ fprintf(F, "> - ");
+ else
+ fprintf(F, "> - - none ");
+ }
+
// Show the filename/operation
if (I->Op == Item::Install)
{
@@ -313,9 +342,9 @@ bool pkgDPkgPM::SendV2Pkgs(FILE *F)
else
fprintf(F,"%s\n",I->File.c_str());
}
- if (I->Op == Item::Configure)
+ else if (I->Op == Item::Configure)
fprintf(F,"**CONFIGURE**\n");
- if (I->Op == Item::Remove ||
+ else if (I->Op == Item::Remove ||
I->Op == Item::Purge)
fprintf(F,"**REMOVE**\n");
@@ -404,7 +433,7 @@ bool pkgDPkgPM::RunScriptsWithPkgs(const char *Cnf)
}
}
else
- SendV2Pkgs(F);
+ SendPkgsInfo(F, Version);
fclose(F);
diff --git a/apt-pkg/deb/dpkgpm.h b/apt-pkg/deb/dpkgpm.h
index aab39f633..c31d56f8e 100644
--- a/apt-pkg/deb/dpkgpm.h
+++ b/apt-pkg/deb/dpkgpm.h
@@ -14,6 +14,7 @@
#include <vector>
#include <map>
#include <stdio.h>
+#include <apt-pkg/macros.h>
#ifndef APT_8_CLEANER_HEADERS
using std::vector;
@@ -79,7 +80,8 @@ class pkgDPkgPM : public pkgPackageManager
// Helpers
bool RunScriptsWithPkgs(const char *Cnf);
- bool SendV2Pkgs(FILE *F);
+ __deprecated bool SendV2Pkgs(FILE *F);
+ bool SendPkgsInfo(FILE * const F, unsigned int const &Version);
void WriteHistoryTag(std::string const &tag, std::string value);
// apport integration
diff --git a/apt-pkg/depcache.cc b/apt-pkg/depcache.cc
index 6a3e9bfc4..2c6eb43bf 100644
--- a/apt-pkg/depcache.cc
+++ b/apt-pkg/depcache.cc
@@ -865,6 +865,11 @@ bool pkgDepCache::MarkDelete(PkgIterator const &Pkg, bool rPurge,
bool pkgDepCache::IsDeleteOk(PkgIterator const &Pkg,bool rPurge,
unsigned long Depth, bool FromUser)
{
+ return IsDeleteOkProtectInstallRequests(Pkg, rPurge, Depth, FromUser);
+}
+bool pkgDepCache::IsDeleteOkProtectInstallRequests(PkgIterator const &Pkg,
+ bool const rPurge, unsigned long const Depth, bool const FromUser)
+{
if (FromUser == false && Pkg->CurrentVer == 0)
{
StateCache &P = PkgState[Pkg->ID];
@@ -1004,7 +1009,7 @@ struct CompareProviders {
}
// higher priority seems like a good idea
if (AV->Priority != BV->Priority)
- return AV->Priority < BV->Priority;
+ return AV->Priority > BV->Priority;
// prefer native architecture
if (strcmp(A.Arch(), B.Arch()) != 0)
{
@@ -1047,9 +1052,10 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
return true;
}
- // check if we are allowed to install the package
- if (IsInstallOk(Pkg,AutoInst,Depth,FromUser) == false)
- return false;
+ // check if we are allowed to install the package (if we haven't already)
+ if (P.Mode != ModeInstall || P.InstallVer != P.CandidateVer)
+ if (IsInstallOk(Pkg,AutoInst,Depth,FromUser) == false)
+ return false;
ActionGroup group(*this);
P.iFlags &= ~AutoKept;
@@ -1200,16 +1206,23 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
verlist.insert(Cand);
}
CompareProviders comp(Start);
- APT::VersionList::iterator InstVer = std::max_element(verlist.begin(), verlist.end(), comp);
- if (InstVer != verlist.end())
- {
+ do {
+ APT::VersionList::iterator InstVer = std::max_element(verlist.begin(), verlist.end(), comp);
+
+ if (InstVer == verlist.end())
+ break;
+
pkgCache::PkgIterator InstPkg = InstVer.ParentPkg();
if(DebugAutoInstall == true)
std::clog << OutputInDepth(Depth) << "Installing " << InstPkg.Name()
<< " as " << Start.DepType() << " of " << Pkg.Name()
<< std::endl;
- MarkInstall(InstPkg, true, Depth + 1, false, ForceImportantDeps);
+ if (MarkInstall(InstPkg, true, Depth + 1, false, ForceImportantDeps) == false)
+ {
+ verlist.erase(InstVer);
+ continue;
+ }
// now check if we should consider it a automatic dependency or not
if(InstPkg->CurrentVer == 0 && Pkg->Section != 0 && ConfigValueInSubTree("APT::Never-MarkAuto-Sections", Pkg.Section()))
{
@@ -1218,7 +1231,8 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
<< Start.DepType() << " of pkg in APT::Never-MarkAuto-Sections)" << std::endl;
MarkAuto(InstPkg, false);
}
- }
+ break;
+ } while(true);
continue;
}
/* Negative dependencies have no or-group
@@ -1243,9 +1257,16 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
PkgState[Pkg->ID].CandidateVer != *I &&
MarkInstall(Pkg,true,Depth + 1, false, ForceImportantDeps) == true)
continue;
- else if ((Start->Type == pkgCache::Dep::Conflicts || Start->Type == pkgCache::Dep::DpkgBreaks) &&
- MarkDelete(Pkg,false,Depth + 1, false) == false)
- break;
+ else if (Start->Type == pkgCache::Dep::Conflicts ||
+ Start->Type == pkgCache::Dep::DpkgBreaks)
+ {
+ if(DebugAutoInstall == true)
+ std::clog << OutputInDepth(Depth)
+ << " Removing: " << Pkg.Name()
+ << std::endl;
+ if (MarkDelete(Pkg,false,Depth + 1, false) == false)
+ break;
+ }
}
continue;
}
@@ -1256,11 +1277,50 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
/*}}}*/
// DepCache::IsInstallOk - check if it is ok to install this package /*{{{*/
// ---------------------------------------------------------------------
-/* The default implementation does nothing.
+/* The default implementation checks if the installation of an M-A:same
+ package would lead us into a version-screw and if so forbids it.
dpkg holds are enforced by the private IsModeChangeOk */
bool pkgDepCache::IsInstallOk(PkgIterator const &Pkg,bool AutoInst,
unsigned long Depth, bool FromUser)
{
+ return IsInstallOkMultiArchSameVersionSynced(Pkg,AutoInst, Depth, FromUser);
+}
+bool pkgDepCache::IsInstallOkMultiArchSameVersionSynced(PkgIterator const &Pkg,
+ bool const AutoInst, unsigned long const Depth, bool const FromUser)
+{
+ if (FromUser == true) // as always: user is always right
+ return true;
+
+ // ignore packages with none-M-A:same candidates
+ VerIterator const CandVer = PkgState[Pkg->ID].CandidateVerIter(*this);
+ if (unlikely(CandVer.end() == true) || CandVer == Pkg.CurrentVer() ||
+ (CandVer->MultiArch & pkgCache::Version::Same) != pkgCache::Version::Same)
+ return true;
+
+ GrpIterator const Grp = Pkg.Group();
+ for (PkgIterator P = Grp.PackageList(); P.end() == false; P = Grp.NextPkg(P))
+ {
+ // not installed or version synced: fine by definition
+ // (simple string-compare as stuff like '1' == '0:1-0' can't happen here)
+ if (P->CurrentVer == 0 || strcmp(Pkg.CandVersion(), P.CandVersion()) == 0)
+ continue;
+ // packages loosing M-A:same can be out-of-sync
+ VerIterator CV = PkgState[P->ID].CandidateVerIter(*this);
+ if (unlikely(CV.end() == true) ||
+ (CV->MultiArch & pkgCache::Version::Same) != pkgCache::Version::Same)
+ continue;
+
+ // not downloadable means the package is obsolete, so allow out-of-sync
+ if (CV.Downloadable() == false)
+ continue;
+
+ PkgState[Pkg->ID].iFlags |= AutoKept;
+ if (unlikely(DebugMarker == true))
+ std::clog << OutputInDepth(Depth) << "Ignore MarkInstall of " << Pkg
+ << " as its M-A:same siblings are not version-synced" << std::endl;
+ return false;
+ }
+
return true;
}
/*}}}*/
diff --git a/apt-pkg/depcache.h b/apt-pkg/depcache.h
index 7358048ed..d9c95349b 100644
--- a/apt-pkg/depcache.h
+++ b/apt-pkg/depcache.h
@@ -442,16 +442,15 @@ class pkgDepCache : protected pkgCache::Namespace
/** \return \b true if it's OK for MarkInstall to install
* the given package.
*
- * See the default implementation for a simple example how this
- * method can be used.
- * Overriding implementations should use the hold-state-flag to cache
- * results from previous checks of this package - also it should
- * be used if the default resolver implementation is also used to
- * ensure that these packages are handled like "normal" dpkg holds.
+ * The default implementation simply calls all IsInstallOk*
+ * method mentioned below.
+ *
+ * Overriding implementations should use the hold-state-flag to
+ * cache results from previous checks of this package - if possible.
*
* The parameters are the same as in the calling MarkInstall:
* \param Pkg the package that MarkInstall wants to install.
- * \param AutoInst needs a previous MarkInstall this package?
+ * \param AutoInst install this and all its dependencies
* \param Depth recursive deep of this Marker call
* \param FromUser was the install requested by the user?
*/
@@ -461,12 +460,8 @@ class pkgDepCache : protected pkgCache::Namespace
/** \return \b true if it's OK for MarkDelete to remove
* the given package.
*
- * See the default implementation for a simple example how this
- * method can be used.
- * Overriding implementations should use the hold-state-flag to cache
- * results from previous checks of this package - also it should
- * be used if the default resolver implementation is also used to
- * ensure that these packages are handled like "normal" dpkg holds.
+ * The default implementation simply calls all IsDeleteOk*
+ * method mentioned below, see also #IsInstallOk.
*
* The parameters are the same as in the calling MarkDelete:
* \param Pkg the package that MarkDelete wants to remove.
@@ -498,6 +493,15 @@ class pkgDepCache : protected pkgCache::Namespace
pkgDepCache(pkgCache *Cache,Policy *Plcy = 0);
virtual ~pkgDepCache();
+ protected:
+ // methods call by IsInstallOk
+ bool IsInstallOkMultiArchSameVersionSynced(PkgIterator const &Pkg,
+ bool const AutoInst, unsigned long const Depth, bool const FromUser);
+
+ // methods call by IsDeleteOk
+ bool IsDeleteOkProtectInstallRequests(PkgIterator const &Pkg,
+ bool const rPurge, unsigned long const Depth, bool const FromUser);
+
private:
bool IsModeChangeOk(ModeList const mode, PkgIterator const &Pkg,
unsigned long const Depth, bool const FromUser);
diff --git a/apt-pkg/indexcopy.cc b/apt-pkg/indexcopy.cc
index 0e36b3ded..1d61b974d 100644
--- a/apt-pkg/indexcopy.cc
+++ b/apt-pkg/indexcopy.cc
@@ -544,11 +544,9 @@ bool SigVerify::CopyMetaIndex(string CDROM, string CDName, /*{{{*/
FileFd Rel;
Target.Open(TargetF,FileFd::WriteAtomic);
Rel.Open(prefix + file,FileFd::ReadOnly);
- if (_error->PendingError() == true)
- return false;
if (CopyFile(Rel,Target) == false)
- return false;
-
+ return _error->Error("Copying of '%s' for '%s' from '%s' failed", file.c_str(), CDName.c_str(), prefix.c_str());
+
return true;
}
/*}}}*/
diff --git a/apt-pkg/orderlist.cc b/apt-pkg/orderlist.cc
index 80d8fd490..984ae1d10 100644
--- a/apt-pkg/orderlist.cc
+++ b/apt-pkg/orderlist.cc
@@ -301,9 +301,8 @@ bool pkgOrderList::OrderConfigure()
/* Higher scores order earlier */
int pkgOrderList::Score(PkgIterator Pkg)
{
- static int const ScoreDelete = _config->FindI("OrderList::Score::Delete", 500);
-
- // Removal is always done first
+ // Removals should be done after we dealt with essentials
+ static int const ScoreDelete = _config->FindI("OrderList::Score::Delete", 100);
if (Cache[Pkg].Delete() == true)
return ScoreDelete;
@@ -893,149 +892,136 @@ bool pkgOrderList::DepConfigure(DepIterator D)
/*}}}*/
// OrderList::DepRemove - Removal ordering /*{{{*/
// ---------------------------------------------------------------------
-/* Removal visits all reverse depends. It considers if the dependency
- of the Now state version to see if it is okay with removing this
- package. This check should always fail, but is provided for symetery
- with the other critical handlers.
-
- Loops are preprocessed and logged. Removal loops can also be
- detected in the critical handler. They are characterized by an
- old version of A depending on B but the new version of A conflicting
- with B, thus either A or B must break to install. */
-bool pkgOrderList::DepRemove(DepIterator D)
+/* Checks all given dependencies if they are broken by the remove of a
+ package and if so fix it by visiting another provider or or-group
+ member to ensure that the dependee keeps working which is especially
+ important for Immediate packages like e.g. those depending on an
+ awk implementation. If the dependency can't be fixed with another
+ package this means an upgrade of the package will solve the problem. */
+bool pkgOrderList::DepRemove(DepIterator Broken)
{
- if (D.Reverse() == false)
+ if (Broken.Reverse() == false)
return true;
- for (; D.end() == false; ++D)
- if (D->Type == pkgCache::Dep::Depends || D->Type == pkgCache::Dep::PreDepends)
+
+ for (; Broken.end() == false; ++Broken)
+ {
+ if (Broken->Type != pkgCache::Dep::Depends &&
+ Broken->Type != pkgCache::Dep::PreDepends)
+ continue;
+
+ PkgIterator BrokenPkg = Broken.ParentPkg();
+ // uninstalled packages can't break via a remove
+ if (BrokenPkg->CurrentVer == 0)
+ continue;
+
+ // if its already added, we can't do anything useful
+ if (IsFlag(BrokenPkg, AddPending) == true || IsFlag(BrokenPkg, Added) == true)
+ continue;
+
+ // if the dependee is going to be removed, visit it now
+ if (Cache[BrokenPkg].Delete() == true)
+ return VisitNode(BrokenPkg, "Remove-Dependee");
+
+ // The package stays around, so find out how this is possible
+ for (DepIterator D = BrokenPkg.CurrentVer().DependsList(); D.end() == false;)
{
- // Duplication elimination, consider the current version only
- if (D.ParentPkg().CurrentVer() != D.ParentVer())
+ // only important or-groups need fixing
+ if (D->Type != pkgCache::Dep::Depends &&
+ D->Type != pkgCache::Dep::PreDepends)
+ {
+ ++D;
continue;
+ }
- /* We wish to see if the dep on the parent package is okay
- in the removed (install) state of the target pkg. */
- bool tryFixDeps = false;
- if (CheckDep(D) == true)
+ // Start is the beginning of the or-group, D is the first one after or
+ DepIterator Start = D;
+ bool foundBroken = false;
+ for (bool LastOR = true; D.end() == false && LastOR == true; ++D)
{
- // We want to catch loops with the code below.
- if (IsFlag(D.ParentPkg(),AddPending) == false)
- continue;
+ LastOR = (D->CompareOp & pkgCache::Dep::Or) == pkgCache::Dep::Or;
+ if (D == Broken)
+ foundBroken = true;
}
- else
- tryFixDeps = true;
- // This is the loop detection
- if (IsFlag(D.ParentPkg(),Added) == true ||
- IsFlag(D.ParentPkg(),AddPending) == true)
- {
- if (IsFlag(D.ParentPkg(),AddPending) == true)
- AddLoop(D);
+ // this or-group isn't the broken one: keep searching
+ if (foundBroken == false)
continue;
+
+ // iterate over all members of the or-group searching for a ready replacement
+ bool readyReplacement = false;
+ for (DepIterator OrMember = Start; OrMember != D && readyReplacement == false; ++OrMember)
+ {
+ Version ** Replacements = OrMember.AllTargets();
+ for (Version **R = Replacements; *R != 0; ++R)
+ {
+ VerIterator Ver(Cache,*R);
+ // only currently installed packages can be a replacement
+ PkgIterator RPkg = Ver.ParentPkg();
+ if (RPkg.CurrentVer() != Ver)
+ continue;
+
+ // packages going to be removed can't be a replacement
+ if (Cache[RPkg].Delete() == true)
+ continue;
+
+ readyReplacement = true;
+ break;
+ }
+ delete[] Replacements;
}
- if (tryFixDeps == true)
+ // something else is ready to take over, do nothing
+ if (readyReplacement == true)
+ continue;
+
+ // see if we can visit a replacement
+ bool visitReplacement = false;
+ for (DepIterator OrMember = Start; OrMember != D && visitReplacement == false; ++OrMember)
{
- for (pkgCache::DepIterator F = D.ParentPkg().CurrentVer().DependsList();
- F.end() == false; ++F)
+ Version ** Replacements = OrMember.AllTargets();
+ for (Version **R = Replacements; *R != 0; ++R)
{
- if (F->Type != pkgCache::Dep::Depends && F->Type != pkgCache::Dep::PreDepends)
+ VerIterator Ver(Cache,*R);
+ // consider only versions we plan to install
+ PkgIterator RPkg = Ver.ParentPkg();
+ if (Cache[RPkg].Install() == false || Cache[RPkg].InstallVer != Ver)
continue;
- // Check the Providers
- if (F.TargetPkg()->ProvidesList != 0)
- {
- pkgCache::PrvIterator Prov = F.TargetPkg().ProvidesList();
- for (; Prov.end() == false; ++Prov)
- {
- pkgCache::PkgIterator const P = Prov.OwnerPkg();
- if (IsFlag(P, InList) == true &&
- IsFlag(P, AddPending) == true &&
- IsFlag(P, Added) == false &&
- Cache[P].InstallVer == 0)
- break;
- }
- if (Prov.end() == false)
- for (pkgCache::PrvIterator Prv = F.TargetPkg().ProvidesList();
- Prv.end() == false; ++Prv)
- {
- pkgCache::PkgIterator const P = Prv.OwnerPkg();
- if (IsFlag(P, InList) == true &&
- IsFlag(P, AddPending) == false &&
- Cache[P].InstallVer != 0 &&
- VisitNode(P, "Remove-P") == true)
- {
- Flag(P, Immediate);
- tryFixDeps = false;
- break;
- }
- }
- if (tryFixDeps == false)
- break;
- }
- // Check for Or groups
- if ((F->CompareOp & pkgCache::Dep::Or) != pkgCache::Dep::Or)
+ // loops are not going to help us, so don't create them
+ if (IsFlag(RPkg, AddPending) == true)
continue;
- // Lets see if the package is part of the Or group
- pkgCache::DepIterator S = F;
- for (; S.end() == false; ++S)
+
+ if (IsMissing(RPkg) == true)
+ continue;
+
+ visitReplacement = true;
+ if (IsFlag(BrokenPkg, Immediate) == false)
{
- if (S.TargetPkg() == D.TargetPkg())
+ if (VisitNode(RPkg, "Remove-Rep") == true)
break;
- if ((S->CompareOp & pkgCache::Dep::Or) != pkgCache::Dep::Or ||
- CheckDep(S)) // Or group is satisfied by another package
- for (;S.end() == false; ++S);
}
- if (S.end() == true)
- continue;
- // skip to the end of the or group
- for (;S.end() == false && (S->CompareOp & pkgCache::Dep::Or) == pkgCache::Dep::Or; ++S);
- ++S;
- // The soon to be removed is part of the Or group
- // start again in the or group and find something which will serve as replacement
- for (; F.end() == false && F != S; ++F)
+ else
{
- if (IsFlag(F.TargetPkg(), InList) == true &&
- IsFlag(F.TargetPkg(), AddPending) == false &&
- Cache[F.TargetPkg()].InstallVer != 0 &&
- VisitNode(F.TargetPkg(), "Remove-Target") == true)
- {
- Flag(F.TargetPkg(), Immediate);
- tryFixDeps = false;
+ Flag(RPkg, Immediate);
+ if (VisitNode(RPkg, "Remove-ImmRep") == true)
break;
- }
- else if (F.TargetPkg()->ProvidesList != 0)
- {
- pkgCache::PrvIterator Prv = F.TargetPkg().ProvidesList();
- for (; Prv.end() == false; ++Prv)
- {
- if (IsFlag(Prv.OwnerPkg(), InList) == true &&
- IsFlag(Prv.OwnerPkg(), AddPending) == false &&
- Cache[Prv.OwnerPkg()].InstallVer != 0 &&
- VisitNode(Prv.OwnerPkg(), "Remove-Owner") == true)
- {
- Flag(Prv.OwnerPkg(), Immediate);
- tryFixDeps = false;
- break;
- }
- }
- if (Prv.end() == false)
- break;
- }
}
- if (tryFixDeps == false)
- break;
+ visitReplacement = false;
}
+ delete[] Replacements;
}
-
- // Skip over missing files
- if (IsMissing(D.ParentPkg()) == true)
+ if (visitReplacement == true)
continue;
-
- if (VisitNode(D.ParentPkg(), "Remove-Parent") == false)
+
+ // the broken package in current version can't be fixed, so install new version
+ if (IsMissing(BrokenPkg) == true)
+ break;
+
+ if (VisitNode(BrokenPkg, "Remove-Upgrade") == false)
return false;
}
-
+ }
+
return true;
}
/*}}}*/
diff --git a/apt-pkg/packagemanager.cc b/apt-pkg/packagemanager.cc
index e2d7dbf2a..8c0d2e855 100644
--- a/apt-pkg/packagemanager.cc
+++ b/apt-pkg/packagemanager.cc
@@ -338,8 +338,9 @@ bool pkgPackageManager::SmartConfigure(PkgIterator Pkg, int const Depth)
however if there is a loop (A depends on B, B depends on A) this will not
be the case, so check for dependencies before configuring. */
bool Bad = false, Changed = false;
- const unsigned int max_loops = _config->FindI("APT::pkgPackageManager::MaxLoopCount", 500);
+ const unsigned int max_loops = _config->FindI("APT::pkgPackageManager::MaxLoopCount", 5000);
unsigned int i=0;
+ std::list<DepIterator> needConfigure;
do
{
Changed = false;
@@ -353,7 +354,7 @@ bool pkgPackageManager::SmartConfigure(PkgIterator Pkg, int const Depth)
continue;
Bad = true;
- // Search for dependencies which are unpacked but aren't configured yet (maybe loops)
+ // Check for dependencies that have not been unpacked, probably due to loops.
for (DepIterator Cur = Start; true; ++Cur)
{
SPtrArray<Version *> VList = Cur.AllTargets();
@@ -373,51 +374,66 @@ bool pkgPackageManager::SmartConfigure(PkgIterator Pkg, int const Depth)
}
// Check if the version that is going to be installed will satisfy the dependency
- if (Cache[DepPkg].InstallVer != *I)
+ if (Cache[DepPkg].InstallVer != *I || List->IsNow(DepPkg) == false)
continue;
- if (List->IsFlag(DepPkg,pkgOrderList::UnPacked))
+ if (PkgLoop == true)
{
- if (List->IsFlag(DepPkg,pkgOrderList::Loop) && PkgLoop)
- {
- // This dependency has already been dealt with by another SmartConfigure on Pkg
- Bad = false;
- break;
- }
- /* Check for a loop to prevent one forming
- If A depends on B and B depends on A, SmartConfigure will
- just hop between them if this is not checked. Dont remove the
- loop flag after finishing however as loop is already set.
- This means that there is another SmartConfigure call for this
- package and it will remove the loop flag */
+ if (Debug)
+ std::clog << OutputInDepth(Depth) << "Package " << Pkg << " loops in SmartConfigure" << std::endl;
+ Bad = false;
+ break;
+ }
+ else
+ {
+ if (Debug)
+ clog << OutputInDepth(Depth) << "Unpacking " << DepPkg.FullName() << " to avoid loop " << Cur << endl;
if (PkgLoop == false)
List->Flag(Pkg,pkgOrderList::Loop);
- if (SmartConfigure(DepPkg, Depth + 1) == true)
+ if (SmartUnPack(DepPkg, true, Depth + 1) == true)
{
Bad = false;
if (List->IsFlag(DepPkg,pkgOrderList::Loop) == false)
- Changed = true;
+ Changed = true;
}
if (PkgLoop == false)
- List->RmFlag(Pkg,pkgOrderList::Loop);
- // If SmartConfigure was succesfull, Bad is false, so break
+ List->RmFlag(Pkg,pkgOrderList::Loop);
if (Bad == false)
break;
}
- else if (List->IsFlag(DepPkg,pkgOrderList::Configured))
- {
- Bad = false;
- break;
- }
}
- if (Cur == End)
+
+ if (Cur == End || Bad == false)
break;
- }
+ }
if (Bad == false)
continue;
- // Check for dependencies that have not been unpacked, probably due to loops.
+ needConfigure.push_back(Start);
+ }
+ if (i++ > max_loops)
+ return _error->Error("Internal error: MaxLoopCount reached in SmartUnPack (1) for %s, aborting", Pkg.FullName().c_str());
+ } while (Changed == true);
+
+ Bad = false, Changed = false, i = 0;
+ do
+ {
+ Changed = false;
+ for (std::list<DepIterator>::const_iterator D = needConfigure.begin(); D != needConfigure.end(); ++D)
+ {
+ // Compute a single dependency element (glob or) without modifying D
+ pkgCache::DepIterator Start, End;
+ {
+ pkgCache::DepIterator Discard = *D;
+ Discard.GlobOr(Start,End);
+ }
+
+ if (End->Type != pkgCache::Dep::Depends)
+ continue;
+ Bad = true;
+
+ // Search for dependencies which are unpacked but aren't configured yet (maybe loops)
for (DepIterator Cur = Start; true; ++Cur)
{
SPtrArray<Version *> VList = Cur.AllTargets();
@@ -428,44 +444,53 @@ bool pkgPackageManager::SmartConfigure(PkgIterator Pkg, int const Depth)
PkgIterator DepPkg = Ver.ParentPkg();
// Check if the version that is going to be installed will satisfy the dependency
- if (Cache[DepPkg].InstallVer != *I || List->IsNow(DepPkg) == false)
+ if (Cache[DepPkg].InstallVer != *I)
continue;
- if (PkgLoop == true)
- {
- if (Debug)
- std::clog << OutputInDepth(Depth) << "Package " << Pkg << " loops in SmartConfigure" << std::endl;
- Bad = false;
- break;
- }
- else
+ if (List->IsFlag(DepPkg,pkgOrderList::UnPacked))
{
- if (Debug)
- clog << OutputInDepth(Depth) << "Unpacking " << DepPkg.FullName() << " to avoid loop " << Cur << endl;
+ if (List->IsFlag(DepPkg,pkgOrderList::Loop) && PkgLoop)
+ {
+ // This dependency has already been dealt with by another SmartConfigure on Pkg
+ Bad = false;
+ break;
+ }
+ /* Check for a loop to prevent one forming
+ If A depends on B and B depends on A, SmartConfigure will
+ just hop between them if this is not checked. Dont remove the
+ loop flag after finishing however as loop is already set.
+ This means that there is another SmartConfigure call for this
+ package and it will remove the loop flag */
if (PkgLoop == false)
List->Flag(Pkg,pkgOrderList::Loop);
- if (SmartUnPack(DepPkg, true, Depth + 1) == true)
+ if (SmartConfigure(DepPkg, Depth + 1) == true)
{
Bad = false;
if (List->IsFlag(DepPkg,pkgOrderList::Loop) == false)
- Changed = true;
+ Changed = true;
}
if (PkgLoop == false)
- List->RmFlag(Pkg,pkgOrderList::Loop);
+ List->RmFlag(Pkg,pkgOrderList::Loop);
+ // If SmartConfigure was succesfull, Bad is false, so break
if (Bad == false)
break;
}
+ else if (List->IsFlag(DepPkg,pkgOrderList::Configured))
+ {
+ Bad = false;
+ break;
+ }
}
-
- if (Cur == End)
+ if (Cur == End || Bad == false)
break;
- }
+ }
+
if (Bad == true && Changed == false && Debug == true)
- std::clog << OutputInDepth(Depth) << "Could not satisfy " << Start << std::endl;
+ std::clog << OutputInDepth(Depth) << "Could not satisfy " << *D << std::endl;
}
if (i++ > max_loops)
- return _error->Error("Internal error: MaxLoopCount reached in SmartUnPack for %s, aborting", Pkg.FullName().c_str());
+ return _error->Error("Internal error: MaxLoopCount reached in SmartUnPack (2) for %s, aborting", Pkg.FullName().c_str());
} while (Changed == true);
if (Bad) {
@@ -603,7 +628,7 @@ bool pkgPackageManager::SmartUnPack(PkgIterator Pkg, bool const Immediate, int c
This will be either dealt with if the package is configured as a dependency of Pkg (if and when Pkg is configured),
or by the ConfigureAll call at the end of the for loop in OrderInstall. */
bool Changed = false;
- const unsigned int max_loops = _config->FindI("APT::pkgPackageManager::MaxLoopCount", 500);
+ const unsigned int max_loops = _config->FindI("APT::pkgPackageManager::MaxLoopCount", 5000);
unsigned int i = 0;
do
{
diff --git a/apt-pkg/pkgcache.cc b/apt-pkg/pkgcache.cc
index d7725563b..52e814c0b 100644
--- a/apt-pkg/pkgcache.cc
+++ b/apt-pkg/pkgcache.cc
@@ -924,6 +924,18 @@ string pkgCache::VerIterator::RelStr() const
return Res;
}
/*}}}*/
+// VerIterator::MultiArchType - string representing MultiArch flag /*{{{*/
+const char * pkgCache::VerIterator::MultiArchType() const
+{
+ if ((S->MultiArch & pkgCache::Version::Same) == pkgCache::Version::Same)
+ return "same";
+ else if ((S->MultiArch & pkgCache::Version::Foreign) == pkgCache::Version::Foreign)
+ return "foreign";
+ else if ((S->MultiArch & pkgCache::Version::Allowed) == pkgCache::Version::Allowed)
+ return "allowed";
+ return "none";
+}
+ /*}}}*/
// PkgFileIterator::IsOk - Checks if the cache is in sync with the file /*{{{*/
// ---------------------------------------------------------------------
/* This stats the file and compares its stats with the ones that were
diff --git a/apt-pkg/pkgcachegen.cc b/apt-pkg/pkgcachegen.cc
index 3f10b6c40..7ce7aba7b 100644
--- a/apt-pkg/pkgcachegen.cc
+++ b/apt-pkg/pkgcachegen.cc
@@ -296,6 +296,8 @@ bool pkgCacheGenerator::MergeListPackage(ListParser &List, pkgCache::PkgIterator
// Find the right version to write the description
MD5SumValue CurMd5 = List.Description_md5();
+ if (CurMd5.Value().empty() == true || List.Description().empty() == true)
+ return true;
std::string CurLang = List.DescriptionLanguage();
for (Ver = Pkg.VersionList(); Ver.end() == false; ++Ver)
@@ -480,7 +482,7 @@ bool pkgCacheGenerator::MergeListVersion(ListParser &List, pkgCache::PkgIterator
/* Record the Description (it is not translated) */
MD5SumValue CurMd5 = List.Description_md5();
- if (CurMd5.Value().empty() == true)
+ if (CurMd5.Value().empty() == true || List.Description().empty() == true)
return true;
std::string CurLang = List.DescriptionLanguage();