summaryrefslogtreecommitdiff
path: root/apt-pkg
diff options
context:
space:
mode:
authorMichael Vogt <mvo@debian.org>2013-08-15 09:26:00 +0200
committerMichael Vogt <mvo@debian.org>2013-08-15 09:26:00 +0200
commitd7a4635391d9ff36152603ab6faa6eafa206750a (patch)
treee16a562e3e9a195cae286433c5751a1ab9990635 /apt-pkg
parent2a49601f69e08f06fb2727d869d420daacdd09d5 (diff)
parent183116d1a64a2610b88fa6b50f2c5199b69d5841 (diff)
Merge branch 'debian/sid' into debian/experimental
Conflicts: apt-pkg/contrib/strutl.cc apt-pkg/deb/dpkgpm.cc configure.ac debian/changelog doc/po/apt-doc.pot po/apt-all.pot po/ar.po po/ast.po po/bg.po po/bs.po po/ca.po po/cs.po po/cy.po po/da.po po/de.po po/dz.po po/el.po po/es.po po/eu.po po/fi.po po/fr.po po/gl.po po/hu.po po/it.po po/ja.po po/km.po po/ko.po po/ku.po po/lt.po po/mr.po po/nb.po po/ne.po po/nl.po po/nn.po po/pl.po po/pt.po po/pt_BR.po po/ro.po po/ru.po po/sk.po po/sl.po po/sv.po po/th.po po/tl.po po/uk.po po/vi.po po/zh_CN.po po/zh_TW.po test/integration/framework test/integration/test-bug-602412-dequote-redirect test/integration/test-ubuntu-bug-346386-apt-get-update-paywall test/interactive-helper/aptwebserver.cc test/interactive-helper/makefile
Diffstat (limited to 'apt-pkg')
-rw-r--r--apt-pkg/acquire-item.cc61
-rw-r--r--apt-pkg/acquire-item.h2
-rw-r--r--apt-pkg/acquire-worker.cc13
-rw-r--r--apt-pkg/algorithms.cc23
-rw-r--r--apt-pkg/algorithms.h7
-rw-r--r--apt-pkg/aptconfiguration.cc4
-rw-r--r--apt-pkg/cacheiterators.h1
-rw-r--r--apt-pkg/cacheset.h1
-rw-r--r--apt-pkg/cdrom.cc8
-rw-r--r--apt-pkg/cdrom.h2
-rw-r--r--apt-pkg/contrib/cdromutl.cc3
-rw-r--r--apt-pkg/contrib/configuration.cc2
-rw-r--r--apt-pkg/contrib/error.cc10
-rw-r--r--apt-pkg/contrib/error.h41
-rw-r--r--apt-pkg/contrib/fileutl.cc220
-rw-r--r--apt-pkg/contrib/fileutl.h4
-rw-r--r--apt-pkg/contrib/gpgv.cc2
-rw-r--r--apt-pkg/contrib/gpgv.h15
-rw-r--r--apt-pkg/contrib/sha2.h4
-rw-r--r--apt-pkg/contrib/sha2_internal.cc12
-rw-r--r--apt-pkg/contrib/strutl.cc19
-rw-r--r--apt-pkg/deb/deblistparser.cc110
-rw-r--r--apt-pkg/deb/debmetaindex.cc24
-rw-r--r--apt-pkg/deb/dpkgpm.cc107
-rw-r--r--apt-pkg/deb/dpkgpm.h4
-rw-r--r--apt-pkg/depcache.cc90
-rw-r--r--apt-pkg/depcache.h30
-rw-r--r--apt-pkg/indexcopy.cc23
-rw-r--r--apt-pkg/indexcopy.h10
-rw-r--r--apt-pkg/indexrecords.cc17
-rw-r--r--apt-pkg/orderlist.cc224
-rw-r--r--apt-pkg/packagemanager.cc121
-rw-r--r--apt-pkg/pkgcache.cc12
-rw-r--r--apt-pkg/pkgcachegen.cc4
-rw-r--r--apt-pkg/policy.cc25
-rw-r--r--apt-pkg/tagfile.h2
-rw-r--r--apt-pkg/vendorlist.cc2
37 files changed, 743 insertions, 516 deletions
diff --git a/apt-pkg/acquire-item.cc b/apt-pkg/acquire-item.cc
index 5eee8c709..12000a8c1 100644
--- a/apt-pkg/acquire-item.cc
+++ b/apt-pkg/acquire-item.cc
@@ -1067,8 +1067,7 @@ pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire *Owner, /*{{{*/
string Final = _config->FindDir("Dir::State::lists");
Final += URItoFileName(RealURI);
- struct stat Buf;
- if (stat(Final.c_str(),&Buf) == 0)
+ if (RealFileExists(Final) == true)
{
// File was already in place. It needs to be re-downloaded/verified
// because Release might have changed, we do give it a differnt
@@ -1082,6 +1081,19 @@ pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire *Owner, /*{{{*/
QueueURI(Desc);
}
/*}}}*/
+pkgAcqMetaSig::~pkgAcqMetaSig() /*{{{*/
+{
+ // if the file was never queued undo file-changes done in the constructor
+ if (QueueCounter == 1 && Status == StatIdle && FileSize == 0 && Complete == false &&
+ LastGoodSig.empty() == false)
+ {
+ string const Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
+ if (RealFileExists(Final) == false && RealFileExists(LastGoodSig) == true)
+ Rename(LastGoodSig, Final);
+ }
+
+}
+ /*}}}*/
// pkgAcqMetaSig::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
/* The only header we use is the last-modified header. */
@@ -1300,7 +1312,14 @@ void pkgAcqMetaIndex::RetrievalDone(string Message) /*{{{*/
string FinalFile = _config->FindDir("Dir::State::lists");
FinalFile += URItoFileName(RealURI);
if (SigFile == DestFile)
+ {
SigFile = FinalFile;
+ // constructor of pkgAcqMetaClearSig moved it out of the way,
+ // now move it back in on IMS hit for the 'old' file
+ string const OldClearSig = DestFile + ".reverify";
+ if (RealFileExists(OldClearSig) == true)
+ Rename(OldClearSig, FinalFile);
+ }
DestFile = FinalFile;
}
Complete = true;
@@ -1373,9 +1392,20 @@ void pkgAcqMetaIndex::QueueIndexes(bool verify) /*{{{*/
{
HashString ExpectedIndexHash;
const indexRecords::checkSum *Record = MetaIndexParser->Lookup((*Target)->MetaKey);
+ bool compressedAvailable = false;
if (Record == NULL)
{
- if (verify == true && (*Target)->IsOptional() == false)
+ if ((*Target)->IsOptional() == true)
+ {
+ std::vector<std::string> types = APT::Configuration::getCompressionTypes();
+ for (std::vector<std::string>::const_iterator t = types.begin(); t != types.end(); ++t)
+ if (MetaIndexParser->Exists(string((*Target)->MetaKey).append(".").append(*t)) == true)
+ {
+ compressedAvailable = true;
+ break;
+ }
+ }
+ else if (verify == true)
{
Status = StatAuthError;
strprintf(ErrorText, _("Unable to find expected entry '%s' in Release file (Wrong sources.list entry or malformed file)"), (*Target)->MetaKey.c_str());
@@ -1404,7 +1434,7 @@ void pkgAcqMetaIndex::QueueIndexes(bool verify) /*{{{*/
if ((*Target)->IsSubIndex() == true)
new pkgAcqSubIndex(Owner, (*Target)->URI, (*Target)->Description,
(*Target)->ShortDesc, ExpectedIndexHash);
- else if (transInRelease == false || MetaIndexParser->Exists((*Target)->MetaKey) == true)
+ else if (transInRelease == false || Record != NULL || compressedAvailable == true)
{
if (_config->FindB("Acquire::PDiffs",true) == true && transInRelease == true &&
MetaIndexParser->Exists(string((*Target)->MetaKey).append(".diff/Index")) == true)
@@ -1526,7 +1556,7 @@ void pkgAcqMetaIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
VerifiedSigFile.append(".gpg");
Rename(LastGoodSigFile, VerifiedSigFile);
Status = StatTransientNetworkError;
- _error->Warning(_("A error occurred during the signature "
+ _error->Warning(_("An error occurred during the signature "
"verification. The repository is not updated "
"and the previous index files will be used. "
"GPG error: %s: %s\n"),
@@ -1588,14 +1618,25 @@ pkgAcqMetaClearSig::pkgAcqMetaClearSig(pkgAcquire *Owner, /*{{{*/
// keep the old InRelease around in case of transistent network errors
string const Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
- struct stat Buf;
- if (stat(Final.c_str(),&Buf) == 0)
+ if (RealFileExists(Final) == true)
{
string const LastGoodSig = DestFile + ".reverify";
Rename(Final,LastGoodSig);
}
}
/*}}}*/
+pkgAcqMetaClearSig::~pkgAcqMetaClearSig() /*{{{*/
+{
+ // if the file was never queued undo file-changes done in the constructor
+ if (QueueCounter == 1 && Status == StatIdle && FileSize == 0 && Complete == false)
+ {
+ string const Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
+ string const LastGoodSig = DestFile + ".reverify";
+ if (RealFileExists(Final) == false && RealFileExists(LastGoodSig) == true)
+ Rename(LastGoodSig, Final);
+ }
+}
+ /*}}}*/
// pkgAcqMetaClearSig::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
// FIXME: this can go away once the InRelease file is used widely
@@ -1606,7 +1647,11 @@ string pkgAcqMetaClearSig::Custom600Headers()
struct stat Buf;
if (stat(Final.c_str(),&Buf) != 0)
- return "\nIndex-File: true\nFail-Ignore: true\n";
+ {
+ Final = DestFile + ".reverify";
+ if (stat(Final.c_str(),&Buf) != 0)
+ return "\nIndex-File: true\nFail-Ignore: true\n";
+ }
return "\nIndex-File: true\nFail-Ignore: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
}
diff --git a/apt-pkg/acquire-item.h b/apt-pkg/acquire-item.h
index 51d539450..10c855e63 100644
--- a/apt-pkg/acquire-item.h
+++ b/apt-pkg/acquire-item.h
@@ -774,6 +774,7 @@ class pkgAcqMetaSig : public pkgAcquire::Item
std::string MetaIndexURI, std::string MetaIndexURIDesc, std::string MetaIndexShortDesc,
const std::vector<struct IndexTarget*>* IndexTargets,
indexRecords* MetaIndexParser);
+ virtual ~pkgAcqMetaSig();
};
/*}}}*/
/** \brief An item that is responsible for downloading the meta-index {{{
@@ -904,6 +905,7 @@ public:
std::string const &MetaSigURI, std::string const &MetaSigURIDesc, std::string const &MetaSigShortDesc,
const std::vector<struct IndexTarget*>* IndexTargets,
indexRecords* MetaIndexParser);
+ virtual ~pkgAcqMetaClearSig();
};
/*}}}*/
/** \brief An item that is responsible for fetching a package file. {{{
diff --git a/apt-pkg/acquire-worker.cc b/apt-pkg/acquire-worker.cc
index 9d90b08bc..44a84216a 100644
--- a/apt-pkg/acquire-worker.cc
+++ b/apt-pkg/acquire-worker.cc
@@ -305,7 +305,15 @@ bool pkgAcquire::Worker::RunMessages()
OwnerQ->ItemDone(Itm);
unsigned long long const ServerSize = strtoull(LookupTag(Message,"Size","0").c_str(), NULL, 10);
- if (TotalSize != 0 && ServerSize != TotalSize)
+ bool isHit = StringToBool(LookupTag(Message,"IMS-Hit"),false) ||
+ StringToBool(LookupTag(Message,"Alt-IMS-Hit"),false);
+ // Using the https method the server might return 200, but the
+ // If-Modified-Since condition is not satsified, libcurl will
+ // discard the download. In this case, however, TotalSize will be
+ // set to the actual size of the file, while ServerSize will be set
+ // to 0. Therefore, if the item is marked as a hit and the
+ // downloaded size (ServerSize) is 0, we ignore TotalSize.
+ if (TotalSize != 0 && (!isHit || ServerSize != 0) && ServerSize != TotalSize)
_error->Warning("Size of file %s is not what the server reported %s %llu",
Owner->DestFile.c_str(), LookupTag(Message,"Size","0").c_str(),TotalSize);
@@ -332,8 +340,7 @@ bool pkgAcquire::Worker::RunMessages()
// Log that we are done
if (Log != 0)
{
- if (StringToBool(LookupTag(Message,"IMS-Hit"),false) == true ||
- StringToBool(LookupTag(Message,"Alt-IMS-Hit"),false) == true)
+ if (isHit)
{
/* Hide 'hits' for local only sources - we also manage to
hide gets */
diff --git a/apt-pkg/algorithms.cc b/apt-pkg/algorithms.cc
index b2a40add1..6296e8fe8 100644
--- a/apt-pkg/algorithms.cc
+++ b/apt-pkg/algorithms.cc
@@ -646,7 +646,10 @@ void pkgProblemResolver::MakeScores()
D->Type != pkgCache::Dep::Recommends))
continue;
- Scores[I->ID] += abs(OldScores[D.ParentPkg()->ID]);
+ // Do not propagate negative scores otherwise
+ // an extra (-2) package might score better than an optional (-1)
+ if (OldScores[D.ParentPkg()->ID] > 0)
+ Scores[I->ID] += OldScores[D.ParentPkg()->ID];
}
}
@@ -840,8 +843,10 @@ bool pkgProblemResolver::ResolveInternal(bool const BrokenFix)
}
while (Again == true);
- if (Debug == true)
- clog << "Starting" << endl;
+ if (Debug == true) {
+ clog << "Starting pkgProblemResolver with broken count: "
+ << Cache.BrokenCount() << endl;
+ }
MakeScores();
@@ -869,8 +874,10 @@ bool pkgProblemResolver::ResolveInternal(bool const BrokenFix)
}
}
- if (Debug == true)
- clog << "Starting 2" << endl;
+ if (Debug == true) {
+ clog << "Starting 2 pkgProblemResolver with broken count: "
+ << Cache.BrokenCount() << endl;
+ }
/* Now consider all broken packages. For each broken package we either
remove the package or fix it's problem. We do this once, it should
@@ -1435,9 +1442,11 @@ bool pkgProblemResolver::ResolveByKeepInternal()
return true;
}
/*}}}*/
-// ProblemResolver::InstallProtect - Install all protected packages /*{{{*/
+// ProblemResolver::InstallProtect - deprecated cpu-eating no-op /*{{{*/
// ---------------------------------------------------------------------
-/* This is used to make sure protected packages are installed */
+/* Actions issued with FromUser bit set are protected from further
+ modification (expect by other calls with FromUser set) nowadays , so we
+ don't need to reissue actions here, they are already set in stone. */
void pkgProblemResolver::InstallProtect()
{
pkgDepCache::ActionGroup group(Cache);
diff --git a/apt-pkg/algorithms.h b/apt-pkg/algorithms.h
index aff8a68f2..7f58c8eed 100644
--- a/apt-pkg/algorithms.h
+++ b/apt-pkg/algorithms.h
@@ -36,6 +36,8 @@
#include <iostream>
+#include <apt-pkg/macros.h>
+
#ifndef APT_8_CLEANER_HEADERS
#include <apt-pkg/acquire.h>
using std::ostream;
@@ -132,9 +134,8 @@ class pkgProblemResolver /*{{{*/
// Try to resolve problems only by using keep
bool ResolveByKeep();
- // Install all protected packages
- void InstallProtect();
-
+ __deprecated void InstallProtect();
+
pkgProblemResolver(pkgDepCache *Cache);
~pkgProblemResolver();
};
diff --git a/apt-pkg/aptconfiguration.cc b/apt-pkg/aptconfiguration.cc
index 37c846582..e32e553a4 100644
--- a/apt-pkg/aptconfiguration.cc
+++ b/apt-pkg/aptconfiguration.cc
@@ -388,12 +388,12 @@ std::vector<std::string> const Configuration::getArchitectures(bool const &Cache
if (dpkgMultiArch == 0) {
close(external[0]);
std::string const chrootDir = _config->FindDir("DPkg::Chroot-Directory");
- if (chrootDir != "/" && chroot(chrootDir.c_str()) != 0)
- _error->WarningE("getArchitecture", "Couldn't chroot into %s for dpkg --print-foreign-architectures", chrootDir.c_str());
int const nullfd = open("/dev/null", O_RDONLY);
dup2(nullfd, STDIN_FILENO);
dup2(external[1], STDOUT_FILENO);
dup2(nullfd, STDERR_FILENO);
+ if (chrootDir != "/" && chroot(chrootDir.c_str()) != 0)
+ _error->WarningE("getArchitecture", "Couldn't chroot into %s for dpkg --print-foreign-architectures", chrootDir.c_str());
execvp(Args[0], (char**) &Args[0]);
_error->WarningE("getArchitecture", "Can't detect foreign architectures supported by dpkg!");
_exit(100);
diff --git a/apt-pkg/cacheiterators.h b/apt-pkg/cacheiterators.h
index 179a0e963..886d84838 100644
--- a/apt-pkg/cacheiterators.h
+++ b/apt-pkg/cacheiterators.h
@@ -220,6 +220,7 @@ class pkgCache::VerIterator : public Iterator<Version, VerIterator> {
inline VerFileIterator FileList() const;
bool Downloadable() const;
inline const char *PriorityType() const {return Owner->Priority(S->Priority);};
+ const char *MultiArchType() const;
std::string RelStr() const;
bool Automatic() const;
diff --git a/apt-pkg/cacheset.h b/apt-pkg/cacheset.h
index 2a45910ba..d7328d705 100644
--- a/apt-pkg/cacheset.h
+++ b/apt-pkg/cacheset.h
@@ -11,7 +11,6 @@
// Include Files /*{{{*/
#include <iostream>
#include <fstream>
-#include <list>
#include <map>
#include <set>
#include <list>
diff --git a/apt-pkg/cdrom.cc b/apt-pkg/cdrom.cc
index 9a9a854bf..a5668a50a 100644
--- a/apt-pkg/cdrom.cc
+++ b/apt-pkg/cdrom.cc
@@ -829,6 +829,14 @@ bool pkgCdrom::Add(pkgCdromStatus *log) /*{{{*/
log->Update(msg.str());
log->Update(_("Copying package lists..."), STEP_COPY);
}
+
+ // check for existence and possibly create state directory for copying
+ string const listDir = _config->FindDir("Dir::State::lists");
+ string const partialListDir = listDir + "partial/";
+ if (CreateAPTDirectoryIfNeeded(_config->FindDir("Dir::State"), partialListDir) == false &&
+ CreateAPTDirectoryIfNeeded(listDir, partialListDir) == false)
+ return _error->Errno("cdrom", _("List directory %spartial is missing."), listDir.c_str());
+
// take care of the signatures and copy them if they are ok
// (we do this before PackageCopy as it modifies "List" and "SourceList")
SigVerify SignVerify;
diff --git a/apt-pkg/cdrom.h b/apt-pkg/cdrom.h
index 4fc3d3928..7d19eb813 100644
--- a/apt-pkg/cdrom.h
+++ b/apt-pkg/cdrom.h
@@ -18,7 +18,7 @@ class pkgCdromStatus /*{{{*/
int totalSteps;
public:
- pkgCdromStatus() {};
+ pkgCdromStatus() : totalSteps(0) {};
virtual ~pkgCdromStatus() {};
// total steps
diff --git a/apt-pkg/contrib/cdromutl.cc b/apt-pkg/contrib/cdromutl.cc
index 187f6bd59..afa01a562 100644
--- a/apt-pkg/contrib/cdromutl.cc
+++ b/apt-pkg/contrib/cdromutl.cc
@@ -122,8 +122,9 @@ bool MountCdrom(string Path, string DeviceName)
if (Child == 0)
{
// Make all the fds /dev/null
+ int null_fd = open("/dev/null",O_RDWR);
for (int I = 0; I != 3; I++)
- dup2(open("/dev/null",O_RDWR),I);
+ dup2(null_fd, I);
if (_config->Exists("Acquire::cdrom::"+Path+"::Mount") == true)
{
diff --git a/apt-pkg/contrib/configuration.cc b/apt-pkg/contrib/configuration.cc
index 31cd9f8ad..4ef4663c0 100644
--- a/apt-pkg/contrib/configuration.cc
+++ b/apt-pkg/contrib/configuration.cc
@@ -823,7 +823,7 @@ bool ReadConfigFile(Configuration &Conf,const string &FName,bool const &AsSectio
// Go down a level
if (TermChar == '{')
{
- if (StackPos <= 100)
+ if (StackPos < sizeof(Stack)/sizeof(std::string))
Stack[StackPos++] = ParentTag;
/* Make sectional tags incorperate the section into the
diff --git a/apt-pkg/contrib/error.cc b/apt-pkg/contrib/error.cc
index 122e2c809..d457781c3 100644
--- a/apt-pkg/contrib/error.cc
+++ b/apt-pkg/contrib/error.cc
@@ -67,9 +67,10 @@ bool GlobalError::NAME (const char *Function, const char *Description,...) { \
int const errsv = errno; \
while (true) { \
va_start(args,Description); \
- if (InsertErrno(TYPE, Function, Description, args, errsv, msgSize) == false) \
- break; \
+ bool const retry = InsertErrno(TYPE, Function, Description, args, errsv, msgSize); \
va_end(args); \
+ if (retry == false) \
+ break; \
} \
return false; \
}
@@ -88,9 +89,10 @@ bool GlobalError::InsertErrno(MsgType const &type, const char *Function,
int const errsv = errno;
while (true) {
va_start(args,Description);
- if (InsertErrno(type, Function, Description, args, errsv, msgSize) == false)
- break;
+ bool const retry = InsertErrno(type, Function, Description, args, errsv, msgSize);
va_end(args);
+ if (retry == false)
+ break;
}
return false;
}
diff --git a/apt-pkg/contrib/error.h b/apt-pkg/contrib/error.h
index 21c51c1be..7d09b2d4a 100644
--- a/apt-pkg/contrib/error.h
+++ b/apt-pkg/contrib/error.h
@@ -123,6 +123,25 @@ public: /*{{{*/
bool InsertErrno(MsgType const &type, const char* Function,
const char* Description,...) __like_printf(4) __cold;
+ /** \brief adds an errno message with the given type
+ *
+ * args needs to be initialized with va_start and terminated
+ * with va_end by the caller. msgSize is also an out-parameter
+ * in case the msgSize was not enough to store the complete message.
+ *
+ * \param type of the error message
+ * \param Function which failed
+ * \param Description is the format string for args
+ * \param args list from a printf-like function
+ * \param errsv is the errno the error is for
+ * \param msgSize is the size of the char[] used to store message
+ * \return true if the message was added, false if not - the caller
+ * should call this method again in that case
+ */
+ bool InsertErrno(MsgType type, const char* Function,
+ const char* Description, va_list &args,
+ int const errsv, size_t &msgSize);
+
/** \brief add an fatal error message to the list
*
* Most of the stuff we consider as "error" is also "fatal" for
@@ -185,6 +204,22 @@ public: /*{{{*/
*/
bool Insert(MsgType const &type, const char* Description,...) __like_printf(3) __cold;
+ /** \brief adds an error message with the given type
+ *
+ * args needs to be initialized with va_start and terminated
+ * with va_end by the caller. msgSize is also an out-parameter
+ * in case the msgSize was not enough to store the complete message.
+ *
+ * \param type of the error message
+ * \param Description is the format string for args
+ * \param args list from a printf-like function
+ * \param msgSize is the size of the char[] used to store message
+ * \return true if the message was added, false if not - the caller
+ * should call this method again in that case
+ */
+ bool Insert(MsgType type, const char* Description,
+ va_list &args, size_t &msgSize) __cold;
+
/** \brief is an error in the list?
*
* \return \b true if an error is included in the list, \b false otherwise
@@ -305,12 +340,6 @@ private: /*{{{*/
};
std::list<MsgStack> Stacks;
-
- bool InsertErrno(MsgType type, const char* Function,
- const char* Description, va_list &args,
- int const errsv, size_t &msgSize);
- bool Insert(MsgType type, const char* Description,
- va_list &args, size_t &msgSize);
/*}}}*/
};
/*}}}*/
diff --git a/apt-pkg/contrib/fileutl.cc b/apt-pkg/contrib/fileutl.cc
index 6e13b91d9..ac2879017 100644
--- a/apt-pkg/contrib/fileutl.cc
+++ b/apt-pkg/contrib/fileutl.cc
@@ -186,7 +186,8 @@ bool RunScripts(const char *Cnf)
/* The caller is expected to set things so that failure causes erasure */
bool CopyFile(FileFd &From,FileFd &To)
{
- if (From.IsOpen() == false || To.IsOpen() == false)
+ if (From.IsOpen() == false || To.IsOpen() == false ||
+ From.Failed() == true || To.Failed() == true)
return false;
// Buffered copy between fds
@@ -245,17 +246,20 @@ int GetLock(string File,bool Errors)
fl.l_len = 0;
if (fcntl(FD,F_SETLK,&fl) == -1)
{
+ // always close to not leak resources
+ int Tmp = errno;
+ close(FD);
+ errno = Tmp;
+
if (errno == ENOLCK)
{
_error->Warning(_("Not using locking for nfs mounted lock file %s"),File.c_str());
return dup(0); // Need something for the caller to close
- }
+ }
+
if (Errors == true)
_error->Errno("open",_("Could not get lock %s"),File.c_str());
- int Tmp = errno;
- close(FD);
- errno = Tmp;
return -1;
}
@@ -883,7 +887,7 @@ bool FileFd::Open(string FileName,unsigned int const Mode,CompressMode Compress,
return Open(FileName, ReadOnly, Gzip, Perms);
if (Compress == Auto && (Mode & WriteOnly) == WriteOnly)
- return _error->Error("Autodetection on %s only works in ReadOnly openmode!", FileName.c_str());
+ return FileFdError("Autodetection on %s only works in ReadOnly openmode!", FileName.c_str());
std::vector<APT::Configuration::Compressor> const compressors = APT::Configuration::getCompressors();
std::vector<APT::Configuration::Compressor>::const_iterator compressor = compressors.begin();
@@ -936,17 +940,17 @@ bool FileFd::Open(string FileName,unsigned int const Mode,CompressMode Compress,
case Auto:
case Extension:
// Unreachable
- return _error->Error("Opening File %s in None, Auto or Extension should be already handled?!?", FileName.c_str());
+ return FileFdError("Opening File %s in None, Auto or Extension should be already handled?!?", FileName.c_str());
}
for (; compressor != compressors.end(); ++compressor)
if (compressor->Name == name)
break;
if (compressor == compressors.end())
- return _error->Error("Can't find a configured compressor %s for file %s", name.c_str(), FileName.c_str());
+ return FileFdError("Can't find a configured compressor %s for file %s", name.c_str(), FileName.c_str());
}
if (compressor == compressors.end())
- return _error->Error("Can't find a match for specified compressor mode for file %s", FileName.c_str());
+ return FileFdError("Can't find a match for specified compressor mode for file %s", FileName.c_str());
return Open(FileName, Mode, *compressor, Perms);
}
bool FileFd::Open(string FileName,unsigned int const Mode,APT::Configuration::Compressor const &compressor, unsigned long const Perms)
@@ -955,9 +959,9 @@ bool FileFd::Open(string FileName,unsigned int const Mode,APT::Configuration::Co
Flags = AutoClose;
if ((Mode & WriteOnly) != WriteOnly && (Mode & (Atomic | Create | Empty | Exclusive)) != 0)
- return _error->Error("ReadOnly mode for %s doesn't accept additional flags!", FileName.c_str());
+ return FileFdError("ReadOnly mode for %s doesn't accept additional flags!", FileName.c_str());
if ((Mode & ReadWrite) == 0)
- return _error->Error("No openmode provided in FileFd::Open for %s", FileName.c_str());
+ return FileFdError("No openmode provided in FileFd::Open for %s", FileName.c_str());
if ((Mode & Atomic) == Atomic)
{
@@ -1003,7 +1007,7 @@ bool FileFd::Open(string FileName,unsigned int const Mode,APT::Configuration::Co
close (iFd);
iFd = -1;
}
- return _error->Errno("open",_("Could not open file %s"), FileName.c_str());
+ return FileFdErrno("open",_("Could not open file %s"), FileName.c_str());
}
SetCloseExec(iFd,true);
@@ -1032,14 +1036,19 @@ bool FileFd::OpenDescriptor(int Fd, unsigned int const Mode, CompressMode Compre
case Xz: name = "xz"; break;
case Auto:
case Extension:
- return _error->Error("Opening Fd %d in Auto or Extension compression mode is not supported", Fd);
+ if (AutoClose == true && Fd != -1)
+ close(Fd);
+ return FileFdError("Opening Fd %d in Auto or Extension compression mode is not supported", Fd);
}
for (; compressor != compressors.end(); ++compressor)
if (compressor->Name == name)
break;
if (compressor == compressors.end())
- return _error->Error("Can't find a configured compressor %s for file %s", name.c_str(), FileName.c_str());
-
+ {
+ if (AutoClose == true && Fd != -1)
+ close(Fd);
+ return FileFdError("Can't find a configured compressor %s for file %s", name.c_str(), FileName.c_str());
+ }
return OpenDescriptor(Fd, Mode, *compressor, AutoClose);
}
bool FileFd::OpenDescriptor(int Fd, unsigned int const Mode, APT::Configuration::Compressor const &compressor, bool AutoClose)
@@ -1061,11 +1070,21 @@ bool FileFd::OpenDescriptor(int Fd, unsigned int const Mode, APT::Configuration:
else
iFd = Fd;
this->FileName = "";
- if (OpenInternDescriptor(Mode, compressor) == false)
+ if (Fd == -1 || OpenInternDescriptor(Mode, compressor) == false)
{
- if (AutoClose)
+ if (iFd != -1 && (
+#ifdef HAVE_ZLIB
+ compressor.Name == "gzip" ||
+#endif
+#ifdef HAVE_BZ2
+ compressor.Name == "bzip2" ||
+#endif
+ AutoClose == true))
+ {
close (iFd);
- return _error->Errno("gzdopen",_("Could not open file descriptor %d"), Fd);
+ iFd = -1;
+ }
+ return FileFdError(_("Could not open file descriptor %d"), Fd);
}
return true;
}
@@ -1127,10 +1146,7 @@ bool FileFd::OpenInternDescriptor(unsigned int const Mode, APT::Configuration::C
ExecWait(d->compressor_pid, "FileFdCompressor", true);
if ((Mode & ReadWrite) == ReadWrite)
- {
- Flags |= Fail;
- return _error->Error("ReadWrite mode is not supported for file %s", FileName.c_str());
- }
+ return FileFdError("ReadWrite mode is not supported for file %s", FileName.c_str());
bool const Comp = (Mode & WriteOnly) == WriteOnly;
if (Comp == false)
@@ -1153,10 +1169,7 @@ bool FileFd::OpenInternDescriptor(unsigned int const Mode, APT::Configuration::C
// Create a data pipe
int Pipe[2] = {-1,-1};
if (pipe(Pipe) != 0)
- {
- Flags |= Fail;
- return _error->Errno("pipe",_("Failed to create subprocess IPC"));
- }
+ return FileFdErrno("pipe",_("Failed to create subprocess IPC"));
for (int J = 0; J != 2; J++)
SetCloseExec(Pipe[J],true);
@@ -1230,11 +1243,9 @@ FileFd::~FileFd()
{
Close();
if (d != NULL)
- {
d->CloseDown(FileName);
- delete d;
- d = NULL;
- }
+ delete d;
+ d = NULL;
}
/*}}}*/
// FileFd::Read - Read a bit of the file /*{{{*/
@@ -1266,14 +1277,13 @@ bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual)
{
if (errno == EINTR)
continue;
- Flags |= Fail;
#ifdef HAVE_ZLIB
if (d != NULL && d->gz != NULL)
{
int err;
char const * const errmsg = gzerror(d->gz, &err);
if (err != Z_ERRNO)
- return _error->Error("gzread: %s (%d: %s)", _("Read error"), err, errmsg);
+ return FileFdError("gzread: %s (%d: %s)", _("Read error"), err, errmsg);
}
#endif
#ifdef HAVE_BZ2
@@ -1282,10 +1292,10 @@ bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual)
int err;
char const * const errmsg = BZ2_bzerror(d->bz2, &err);
if (err != BZ_IO_ERROR)
- return _error->Error("BZ2_bzread: %s (%d: %s)", _("Read error"), err, errmsg);
+ return FileFdError("BZ2_bzread: %s (%d: %s)", _("Read error"), err, errmsg);
}
#endif
- return _error->Errno("read",_("Read error"));
+ return FileFdErrno("read",_("Read error"));
}
To = (char *)To + Res;
@@ -1306,9 +1316,8 @@ bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual)
Flags |= HitEof;
return true;
}
-
- Flags |= Fail;
- return _error->Error(_("read, still have %llu to read but none left"), Size);
+
+ return FileFdError(_("read, still have %llu to read but none left"), Size);
}
/*}}}*/
// FileFd::ReadLine - Read a complete line from the file /*{{{*/
@@ -1364,14 +1373,13 @@ bool FileFd::Write(const void *From,unsigned long long Size)
continue;
if (Res < 0)
{
- Flags |= Fail;
#ifdef HAVE_ZLIB
if (d != NULL && d->gz != NULL)
{
int err;
char const * const errmsg = gzerror(d->gz, &err);
if (err != Z_ERRNO)
- return _error->Error("gzwrite: %s (%d: %s)", _("Write error"), err, errmsg);
+ return FileFdError("gzwrite: %s (%d: %s)", _("Write error"), err, errmsg);
}
#endif
#ifdef HAVE_BZ2
@@ -1380,10 +1388,10 @@ bool FileFd::Write(const void *From,unsigned long long Size)
int err;
char const * const errmsg = BZ2_bzerror(d->bz2, &err);
if (err != BZ_IO_ERROR)
- return _error->Error("BZ2_bzwrite: %s (%d: %s)", _("Write error"), err, errmsg);
+ return FileFdError("BZ2_bzwrite: %s (%d: %s)", _("Write error"), err, errmsg);
}
#endif
- return _error->Errno("write",_("Write error"));
+ return FileFdErrno("write",_("Write error"));
}
From = (char *)From + Res;
@@ -1395,9 +1403,8 @@ bool FileFd::Write(const void *From,unsigned long long Size)
if (Size == 0)
return true;
-
- Flags |= Fail;
- return _error->Error(_("write, still have %llu to write but couldn't"), Size);
+
+ return FileFdError(_("write, still have %llu to write but couldn't"), Size);
}
bool FileFd::Write(int Fd, const void *From, unsigned long long Size)
{
@@ -1441,13 +1448,13 @@ bool FileFd::Seek(unsigned long long To)
return Skip(To - seekpos);
if ((d->openmode & ReadOnly) != ReadOnly)
- {
- Flags |= Fail;
- return _error->Error("Reopen is only implemented for read-only files!");
- }
+ return FileFdError("Reopen is only implemented for read-only files!");
#ifdef HAVE_BZ2
- if (d->bz2 != NULL)
- BZ2_bzclose(d->bz2);
+ if (d->bz2 != NULL)
+ {
+ BZ2_bzclose(d->bz2);
+ d->bz2 = NULL;
+ }
#endif
if (iFd != -1)
close(iFd);
@@ -1462,17 +1469,11 @@ bool FileFd::Seek(unsigned long long To)
if (lseek(d->compressed_fd, 0, SEEK_SET) != 0)
iFd = d->compressed_fd;
if (iFd < 0)
- {
- Flags |= Fail;
- return _error->Error("Reopen is not implemented for pipes opened with FileFd::OpenDescriptor()!");
- }
+ return FileFdError("Reopen is not implemented for pipes opened with FileFd::OpenDescriptor()!");
}
if (OpenInternDescriptor(d->openmode, d->compressor) == false)
- {
- Flags |= Fail;
- return _error->Error("Seek on file %s because it couldn't be reopened", FileName.c_str());
- }
+ return FileFdError("Seek on file %s because it couldn't be reopened", FileName.c_str());
if (To != 0)
return Skip(To);
@@ -1488,10 +1489,7 @@ bool FileFd::Seek(unsigned long long To)
#endif
res = lseek(iFd,To,SEEK_SET);
if (res != (signed)To)
- {
- Flags |= Fail;
- return _error->Error("Unable to seek to %llu", To);
- }
+ return FileFdError("Unable to seek to %llu", To);
if (d != NULL)
d->seekpos = To;
@@ -1515,10 +1513,7 @@ bool FileFd::Skip(unsigned long long Over)
{
unsigned long long toread = std::min((unsigned long long) sizeof(buffer), Over);
if (Read(buffer, toread) == false)
- {
- Flags |= Fail;
- return _error->Error("Unable to seek ahead %llu",Over);
- }
+ return FileFdError("Unable to seek ahead %llu",Over);
Over -= toread;
}
return true;
@@ -1532,10 +1527,7 @@ bool FileFd::Skip(unsigned long long Over)
#endif
res = lseek(iFd,Over,SEEK_CUR);
if (res < 0)
- {
- Flags |= Fail;
- return _error->Error("Unable to seek ahead %llu",Over);
- }
+ return FileFdError("Unable to seek ahead %llu",Over);
if (d != NULL)
d->seekpos = res;
@@ -1549,17 +1541,11 @@ bool FileFd::Truncate(unsigned long long To)
{
#if defined HAVE_ZLIB || defined HAVE_BZ2
if (d != NULL && (d->gz != NULL || d->bz2 != NULL))
- {
- Flags |= Fail;
- return _error->Error("Truncating compressed files is not implemented (%s)", FileName.c_str());
- }
+ return FileFdError("Truncating compressed files is not implemented (%s)", FileName.c_str());
#endif
if (ftruncate(iFd,To) != 0)
- {
- Flags |= Fail;
- return _error->Error("Unable to truncate to %llu",To);
- }
-
+ return FileFdError("Unable to truncate to %llu",To);
+
return true;
}
/*}}}*/
@@ -1587,10 +1573,7 @@ unsigned long long FileFd::Tell()
#endif
Res = lseek(iFd,0,SEEK_CUR);
if (Res == (off_t)-1)
- {
- Flags |= Fail;
- _error->Errno("lseek","Failed to determine the current file position");
- }
+ FileFdErrno("lseek","Failed to determine the current file position");
if (d != NULL)
d->seekpos = Res;
return Res;
@@ -1603,10 +1586,7 @@ unsigned long long FileFd::FileSize()
{
struct stat Buf;
if ((d == NULL || d->pipe == false) && fstat(iFd,&Buf) != 0)
- {
- Flags |= Fail;
- return _error->Errno("fstat","Unable to determine the file size");
- }
+ return FileFdErrno("fstat","Unable to determine the file size");
// for compressor pipes st_size is undefined and at 'best' zero
if ((d != NULL && d->pipe == true) || S_ISFIFO(Buf.st_mode))
@@ -1616,10 +1596,7 @@ unsigned long long FileFd::FileSize()
if (d != NULL)
d->pipe = true;
if (stat(FileName.c_str(), &Buf) != 0)
- {
- Flags |= Fail;
- return _error->Errno("stat","Unable to determine the file size");
- }
+ return FileFdErrno("stat","Unable to determine the file size");
}
return Buf.st_size;
@@ -1644,7 +1621,11 @@ unsigned long long FileFd::Size()
char ignore[1000];
unsigned long long read = 0;
do {
- Read(ignore, sizeof(ignore), &read);
+ if (Read(ignore, sizeof(ignore), &read) == false)
+ {
+ Seek(oldSeek);
+ return 0;
+ }
} while(read != 0);
size = Tell();
Seek(oldSeek);
@@ -1662,14 +1643,14 @@ unsigned long long FileFd::Size()
// FIXME: Size for gz-files is limited by 32bitā€¦ no largefile support
if (lseek(iFd, -4, SEEK_END) < 0)
{
- Flags |= Fail;
- return _error->Errno("lseek","Unable to seek to end of gzipped file");
+ FileFdErrno("lseek","Unable to seek to end of gzipped file");
+ return 0;
}
- size = 0L;
+ size = 0;
if (read(iFd, &size, 4) != 4)
{
- Flags |= Fail;
- return _error->Errno("read","Unable to read original size of gzipped file");
+ FileFdErrno("read","Unable to read original size of gzipped file");
+ return 0;
}
#ifdef WORDS_BIGENDIAN
@@ -1681,8 +1662,8 @@ unsigned long long FileFd::Size()
if (lseek(iFd, oldPos, SEEK_SET) < 0)
{
- Flags |= Fail;
- return _error->Errno("lseek","Unable to seek in gzipped file");
+ FileFdErrno("lseek","Unable to seek in gzipped file");
+ return 0;
}
return size;
@@ -1700,8 +1681,7 @@ time_t FileFd::ModificationTime()
struct stat Buf;
if ((d == NULL || d->pipe == false) && fstat(iFd,&Buf) != 0)
{
- Flags |= Fail;
- _error->Errno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
+ FileFdErrno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
return 0;
}
@@ -1714,8 +1694,7 @@ time_t FileFd::ModificationTime()
d->pipe = true;
if (stat(FileName.c_str(), &Buf) != 0)
{
- Flags |= Fail;
- _error->Errno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
+ FileFdErrno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
return 0;
}
}
@@ -1771,11 +1750,40 @@ bool FileFd::Close()
bool FileFd::Sync()
{
if (fsync(iFd) != 0)
+ return FileFdErrno("sync",_("Problem syncing the file"));
+ return true;
+}
+ /*}}}*/
+// FileFd::FileFdErrno - set Fail and call _error->Errno *{{{*/
+bool FileFd::FileFdErrno(const char *Function, const char *Description,...)
+{
+ Flags |= Fail;
+ va_list args;
+ size_t msgSize = 400;
+ int const errsv = errno;
+ while (true)
{
- Flags |= Fail;
- return _error->Errno("sync",_("Problem syncing the file"));
+ va_start(args,Description);
+ if (_error->InsertErrno(GlobalError::ERROR, Function, Description, args, errsv, msgSize) == false)
+ break;
+ va_end(args);
}
- return true;
+ return false;
+}
+ /*}}}*/
+// FileFd::FileFdError - set Fail and call _error->Error *{{{*/
+bool FileFd::FileFdError(const char *Description,...) {
+ Flags |= Fail;
+ va_list args;
+ size_t msgSize = 400;
+ while (true)
+ {
+ va_start(args,Description);
+ if (_error->Insert(GlobalError::ERROR, Description, args, msgSize) == false)
+ break;
+ va_end(args);
+ }
+ return false;
}
/*}}}*/
diff --git a/apt-pkg/contrib/fileutl.h b/apt-pkg/contrib/fileutl.h
index 4d933a307..9402c8f75 100644
--- a/apt-pkg/contrib/fileutl.h
+++ b/apt-pkg/contrib/fileutl.h
@@ -149,6 +149,10 @@ class FileFd
private:
FileFdPrivate* d;
bool OpenInternDescriptor(unsigned int const Mode, APT::Configuration::Compressor const &compressor);
+
+ // private helpers to set Fail flag and call _error->Error
+ bool FileFdErrno(const char* Function, const char* Description,...) __like_printf(3) __cold;
+ bool FileFdError(const char* Description,...) __like_printf(2) __cold;
};
bool RunScripts(const char *Cnf);
diff --git a/apt-pkg/contrib/gpgv.cc b/apt-pkg/contrib/gpgv.cc
index 31db7d5fe..f47e7ea48 100644
--- a/apt-pkg/contrib/gpgv.cc
+++ b/apt-pkg/contrib/gpgv.cc
@@ -154,7 +154,7 @@ void ExecGPGV(std::string const &File, std::string const &FileGPG,
if (sigFd != -1)
unlink(data);
ioprintf(std::cerr, "Splitting up %s into data and signature failed", File.c_str());
- exit(EINTERNAL);
+ exit(112);
}
Args.push_back(sig);
Args.push_back(data);
diff --git a/apt-pkg/contrib/gpgv.h b/apt-pkg/contrib/gpgv.h
index 08b10a97a..45f069058 100644
--- a/apt-pkg/contrib/gpgv.h
+++ b/apt-pkg/contrib/gpgv.h
@@ -23,9 +23,18 @@
/** \brief generates and run the command to verify a file with gpgv
*
* If File and FileSig specify the same file it is assumed that we
- * deal with a clear-signed message. In that case the file will be
- * rewritten to be in a good-known format without uneeded whitespaces
- * and additional messages (unsigned or signed).
+ * deal with a clear-signed message. Note that the method will accept
+ * and validate files which include additional (unsigned) messages
+ * without complaining. Do NOT open files accepted by this method
+ * for reading. Use #OpenMaybeClearSignedFile to access the message
+ * instead to ensure you are only reading signed data.
+ *
+ * The method does not return, but has some noteable exit-codes:
+ * 111 signals an internal error like the inability to execute gpgv,
+ * 112 indicates a clear-signed file which doesn't include a message,
+ * which can happen if APT is run while on a network requiring
+ * authentication before usage (e.g. in hotels)
+ * All other exit-codes are passed-through from gpgv.
*
* @param File is the message (unsigned or clear-signed)
* @param FileSig is the signature (detached or clear-signed)
diff --git a/apt-pkg/contrib/sha2.h b/apt-pkg/contrib/sha2.h
index 51c921dbd..8e0c99a1b 100644
--- a/apt-pkg/contrib/sha2.h
+++ b/apt-pkg/contrib/sha2.h
@@ -60,10 +60,11 @@ class SHA256Summation : public SHA2SummationBase
res.Set(Sum);
return res;
};
- SHA256Summation()
+ SHA256Summation()
{
SHA256_Init(&ctx);
Done = false;
+ memset(&Sum, 0, sizeof(Sum));
};
};
@@ -96,6 +97,7 @@ class SHA512Summation : public SHA2SummationBase
{
SHA512_Init(&ctx);
Done = false;
+ memset(&Sum, 0, sizeof(Sum));
};
};
diff --git a/apt-pkg/contrib/sha2_internal.cc b/apt-pkg/contrib/sha2_internal.cc
index 83b5a98d3..f84fb761c 100644
--- a/apt-pkg/contrib/sha2_internal.cc
+++ b/apt-pkg/contrib/sha2_internal.cc
@@ -632,7 +632,7 @@ void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) {
}
/* Clean up state data: */
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
usedspace = 0;
}
@@ -653,7 +653,7 @@ char *SHA256_End(SHA256_CTX* context, char buffer[]) {
}
*buffer = (char)0;
} else {
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
}
MEMSET_BZERO(digest, SHA256_DIGEST_LENGTH);
return buffer;
@@ -969,7 +969,7 @@ void SHA512_Final(sha2_byte digest[], SHA512_CTX* context) {
}
/* Zero out state data */
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
}
char *SHA512_End(SHA512_CTX* context, char buffer[]) {
@@ -989,7 +989,7 @@ char *SHA512_End(SHA512_CTX* context, char buffer[]) {
}
*buffer = (char)0;
} else {
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
}
MEMSET_BZERO(digest, SHA512_DIGEST_LENGTH);
return buffer;
@@ -1044,7 +1044,7 @@ void SHA384_Final(sha2_byte digest[], SHA384_CTX* context) {
}
/* Zero out state data */
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
}
char *SHA384_End(SHA384_CTX* context, char buffer[]) {
@@ -1064,7 +1064,7 @@ char *SHA384_End(SHA384_CTX* context, char buffer[]) {
}
*buffer = (char)0;
} else {
- MEMSET_BZERO(context, sizeof(context));
+ MEMSET_BZERO(context, sizeof(*context));
}
MEMSET_BZERO(digest, SHA384_DIGEST_LENGTH);
return buffer;
diff --git a/apt-pkg/contrib/strutl.cc b/apt-pkg/contrib/strutl.cc
index 64731b482..df02c3499 100644
--- a/apt-pkg/contrib/strutl.cc
+++ b/apt-pkg/contrib/strutl.cc
@@ -759,7 +759,7 @@ bool ReadMessages(int Fd, vector<string> &List)
for (char *I = Buffer; I + 1 < End; I++)
{
if (I[1] != '\n' ||
- (strncmp(I, "\n\n", 2) != 0 && strncmp(I, "\r\n\r\n", 4) != 0))
+ (I[0] != '\n' && strncmp(I, "\r\n\r\n", 4) != 0))
continue;
// Pull the message out
@@ -767,7 +767,7 @@ bool ReadMessages(int Fd, vector<string> &List)
PartialMessage += Message;
// Fix up the buffer
- for (; I < End && (*I == '\r' || *I == '\n'); ++I);
+ for (; I < End && (*I == '\n' || *I == '\r'); ++I);
End -= I-Buffer;
memmove(Buffer,I,End-Buffer);
I = Buffer;
@@ -1233,12 +1233,12 @@ char *safe_snprintf(char *Buffer,char *End,const char *Format,...)
va_list args;
int Did;
- va_start(args,Format);
-
if (End <= Buffer)
return End;
-
+ va_start(args,Format);
Did = vsnprintf(Buffer,End - Buffer,Format,args);
+ va_end(args);
+
if (Did < 0 || Buffer + Did > End)
return End;
return Buffer + Did;
@@ -1484,9 +1484,12 @@ URI::operator string()
if (User.empty() == false)
{
- Res += User;
+ // FIXME: Technically userinfo is permitted even less
+ // characters than these, but this is not conveniently
+ // expressed with a blacklist.
+ Res += QuoteString(User, ":/?#[]@");
if (Password.empty() == false)
- Res += ":" + Password;
+ Res += ":" + QuoteString(Password, ":/?#[]@");
Res += "@";
}
@@ -1525,7 +1528,6 @@ string URI::SiteOnly(const string &URI)
U.User.clear();
U.Password.clear();
U.Path.clear();
- U.Port = 0;
return U;
}
/*}}}*/
@@ -1537,7 +1539,6 @@ string URI::NoUserPassword(const string &URI)
::URI U(URI);
U.User.clear();
U.Password.clear();
- U.Port = 0;
return U;
}
/*}}}*/
diff --git a/apt-pkg/deb/deblistparser.cc b/apt-pkg/deb/deblistparser.cc
index db86bd698..c2707d0a5 100644
--- a/apt-pkg/deb/deblistparser.cc
+++ b/apt-pkg/deb/deblistparser.cc
@@ -219,8 +219,12 @@ MD5SumValue debListParser::Description_md5()
string const value = Section.FindS("Description-md5");
if (value.empty() == true)
{
+ std::string const desc = Description() + "\n";
+ if (desc == "\n")
+ return MD5SumValue();
+
MD5Summation md5;
- md5.Add((Description() + "\n").c_str());
+ md5.Add(desc.c_str());
return md5.Result();
}
else if (likely(value.size() == 32))
@@ -801,94 +805,28 @@ bool debListParser::LoadReleaseInfo(pkgCache::PkgFileIterator &FileI,
map_ptrloc const storage = WriteUniqString(component);
FileI->Component = storage;
- // FIXME: should use FileFd and TagSection
- FILE* release = fdopen(dup(File.Fd()), "r");
- if (release == NULL)
+ pkgTagFile TagFile(&File);
+ pkgTagSection Section;
+ if (_error->PendingError() == true || TagFile.Step(Section) == false)
return false;
- char buffer[101];
- while (fgets(buffer, sizeof(buffer), release) != NULL)
- {
- size_t len = 0;
-
- // Skip empty lines
- for (; buffer[len] == '\r' && buffer[len] == '\n'; ++len)
- /* nothing */
- ;
- if (buffer[len] == '\0')
- continue;
-
- // seperate the tag from the data
- const char* dataStart = strchr(buffer + len, ':');
- if (dataStart == NULL)
- continue;
- len = dataStart - buffer;
- for (++dataStart; *dataStart == ' '; ++dataStart)
- /* nothing */
- ;
- const char* dataEnd = (const char*)rawmemchr(dataStart, '\0');
- // The last char should be a newline, but we can never be sure: #633350
- const char* lineEnd = dataEnd;
- for (--lineEnd; *lineEnd == '\r' || *lineEnd == '\n'; --lineEnd)
- /* nothing */
- ;
- ++lineEnd;
-
- // which datastorage need to be updated
- enum { Suite, Component, Version, Origin, Codename, Label, None } writeTo = None;
- if (buffer[0] == ' ')
- ;
- #define APT_PARSER_WRITETO(X) else if (strncmp(#X, buffer, len) == 0) writeTo = X;
- APT_PARSER_WRITETO(Suite)
- APT_PARSER_WRITETO(Component)
- APT_PARSER_WRITETO(Version)
- APT_PARSER_WRITETO(Origin)
- APT_PARSER_WRITETO(Codename)
- APT_PARSER_WRITETO(Label)
- #undef APT_PARSER_WRITETO
- #define APT_PARSER_FLAGIT(X) else if (strncmp(#X, buffer, len) == 0) \
- pkgTagSection::FindFlag(FileI->Flags, pkgCache::Flag:: X, dataStart, lineEnd);
- APT_PARSER_FLAGIT(NotAutomatic)
- APT_PARSER_FLAGIT(ButAutomaticUpgrades)
- #undef APT_PARSER_FLAGIT
-
- // load all data from the line and save it
- string data;
- if (writeTo != None)
- data.append(dataStart, dataEnd);
- if (sizeof(buffer) - 1 == (dataEnd - buffer))
- {
- while (fgets(buffer, sizeof(buffer), release) != NULL)
- {
- if (writeTo != None)
- data.append(buffer);
- if (strlen(buffer) != sizeof(buffer) - 1)
- break;
- }
- }
- if (writeTo != None)
- {
- // remove spaces and stuff from the end of the data line
- for (std::string::reverse_iterator s = data.rbegin();
- s != data.rend(); ++s)
- {
- if (*s != '\r' && *s != '\n' && *s != ' ')
- break;
- *s = '\0';
- }
- map_ptrloc const storage = WriteUniqString(data);
- switch (writeTo) {
- case Suite: FileI->Archive = storage; break;
- case Component: FileI->Component = storage; break;
- case Version: FileI->Version = storage; break;
- case Origin: FileI->Origin = storage; break;
- case Codename: FileI->Codename = storage; break;
- case Label: FileI->Label = storage; break;
- case None: break;
- }
- }
+ std::string data;
+ #define APT_INRELEASE(TAG, STORE) \
+ data = Section.FindS(TAG); \
+ if (data.empty() == false) \
+ { \
+ map_ptrloc const storage = WriteUniqString(data); \
+ STORE = storage; \
}
- fclose(release);
+ APT_INRELEASE("Suite", FileI->Archive)
+ APT_INRELEASE("Component", FileI->Component)
+ APT_INRELEASE("Version", FileI->Version)
+ APT_INRELEASE("Origin", FileI->Origin)
+ APT_INRELEASE("Codename", FileI->Codename)
+ APT_INRELEASE("Label", FileI->Label)
+ #undef APT_INRELEASE
+ Section.FindFlag("NotAutomatic", FileI->Flags, pkgCache::Flag::NotAutomatic);
+ Section.FindFlag("ButAutomaticUpgrades", FileI->Flags, pkgCache::Flag::ButAutomaticUpgrades);
return !_error->PendingError();
}
diff --git a/apt-pkg/deb/debmetaindex.cc b/apt-pkg/deb/debmetaindex.cc
index 7a88d71e3..b597b6f3c 100644
--- a/apt-pkg/deb/debmetaindex.cc
+++ b/apt-pkg/deb/debmetaindex.cc
@@ -238,6 +238,7 @@ bool debReleaseIndex::GetIndexes(pkgAcquire *Owner, bool const &GetAll) const
new pkgAcqIndex(Owner, (*Target)->URI, (*Target)->Description,
(*Target)->ShortDesc, HashString());
}
+ delete targets;
// this is normally created in pkgAcqMetaSig, but if we run
// in --print-uris mode, we add it here
@@ -373,10 +374,29 @@ class debSLTypeDebian : public pkgSourceList::Type
string const &Dist, string const &Section,
bool const &IsSrc, map<string, string> const &Options) const
{
- map<string, string>::const_iterator const arch = Options.find("arch");
- vector<string> const Archs =
+ // parse arch=, arch+= and arch-= settings
+ map<string, string>::const_iterator arch = Options.find("arch");
+ vector<string> Archs =
(arch != Options.end()) ? VectorizeString(arch->second, ',') :
APT::Configuration::getArchitectures();
+ if ((arch = Options.find("arch+")) != Options.end())
+ {
+ std::vector<std::string> const plusArch = VectorizeString(arch->second, ',');
+ for (std::vector<std::string>::const_iterator plus = plusArch.begin(); plus != plusArch.end(); ++plus)
+ if (std::find(Archs.begin(), Archs.end(), *plus) == Archs.end())
+ Archs.push_back(*plus);
+ }
+ if ((arch = Options.find("arch-")) != Options.end())
+ {
+ std::vector<std::string> const minusArch = VectorizeString(arch->second, ',');
+ for (std::vector<std::string>::const_iterator minus = minusArch.begin(); minus != minusArch.end(); ++minus)
+ {
+ std::vector<std::string>::iterator kill = std::find(Archs.begin(), Archs.end(), *minus);
+ if (kill != Archs.end())
+ Archs.erase(kill);
+ }
+ }
+
map<string, string>::const_iterator const trusted = Options.find("trusted");
for (vector<metaIndex *>::const_iterator I = List.begin();
diff --git a/apt-pkg/deb/dpkgpm.cc b/apt-pkg/deb/dpkgpm.cc
index cb137729d..03986d0b9 100644
--- a/apt-pkg/deb/dpkgpm.cc
+++ b/apt-pkg/deb/dpkgpm.cc
@@ -134,6 +134,8 @@ static void dpkgChrootDirectory()
std::cerr << "Chrooting into " << chrootDir << std::endl;
if (chroot(chrootDir.c_str()) != 0)
_exit(100);
+ if (chdir("/") != 0)
+ _exit(100);
}
/*}}}*/
@@ -238,15 +240,23 @@ bool pkgDPkgPM::Remove(PkgIterator Pkg,bool Purge)
return true;
}
/*}}}*/
-// DPkgPM::SendV2Pkgs - Send version 2 package info /*{{{*/
+// DPkgPM::SendPkgInfo - Send info for install-pkgs hook /*{{{*/
// ---------------------------------------------------------------------
/* This is part of the helper script communication interface, it sends
very complete information down to the other end of the pipe.*/
bool pkgDPkgPM::SendV2Pkgs(FILE *F)
{
- fprintf(F,"VERSION 2\n");
-
- /* Write out all of the configuration directives by walking the
+ return SendPkgsInfo(F, 2);
+}
+bool pkgDPkgPM::SendPkgsInfo(FILE * const F, unsigned int const &Version)
+{
+ // This version of APT supports only v3, so don't sent higher versions
+ if (Version <= 3)
+ fprintf(F,"VERSION %u\n", Version);
+ else
+ fprintf(F,"VERSION 3\n");
+
+ /* Write out all of the configuration directives by walking the
configuration tree */
const Configuration::Item *Top = _config->Tree(0);
for (; Top != 0;)
@@ -280,30 +290,51 @@ bool pkgDPkgPM::SendV2Pkgs(FILE *F)
pkgDepCache::StateCache &S = Cache[I->Pkg];
fprintf(F,"%s ",I->Pkg.Name());
- // Current version
- if (I->Pkg->CurrentVer == 0)
- fprintf(F,"- ");
+
+ // Current version which we are going to replace
+ pkgCache::VerIterator CurVer = I->Pkg.CurrentVer();
+ if (CurVer.end() == true && (I->Op == Item::Remove || I->Op == Item::Purge))
+ CurVer = FindNowVersion(I->Pkg);
+
+ if (CurVer.end() == true)
+ {
+ if (Version <= 2)
+ fprintf(F, "- ");
+ else
+ fprintf(F, "- - none ");
+ }
else
- fprintf(F,"%s ",I->Pkg.CurrentVer().VerStr());
-
- // Show the compare operator
- // Target version
+ {
+ fprintf(F, "%s ", CurVer.VerStr());
+ if (Version >= 3)
+ fprintf(F, "%s %s ", CurVer.Arch(), CurVer.MultiArchType());
+ }
+
+ // Show the compare operator between current and install version
if (S.InstallVer != 0)
{
+ pkgCache::VerIterator const InstVer = S.InstVerIter(Cache);
int Comp = 2;
- if (I->Pkg->CurrentVer != 0)
- Comp = S.InstVerIter(Cache).CompareVer(I->Pkg.CurrentVer());
+ if (CurVer.end() == false)
+ Comp = InstVer.CompareVer(CurVer);
if (Comp < 0)
fprintf(F,"> ");
- if (Comp == 0)
+ else if (Comp == 0)
fprintf(F,"= ");
- if (Comp > 0)
+ else if (Comp > 0)
fprintf(F,"< ");
- fprintf(F,"%s ",S.InstVerIter(Cache).VerStr());
+ fprintf(F, "%s ", InstVer.VerStr());
+ if (Version >= 3)
+ fprintf(F, "%s %s ", InstVer.Arch(), InstVer.MultiArchType());
}
else
- fprintf(F,"> - ");
-
+ {
+ if (Version <= 2)
+ fprintf(F, "> - ");
+ else
+ fprintf(F, "> - - none ");
+ }
+
// Show the filename/operation
if (I->Op == Item::Install)
{
@@ -313,9 +344,9 @@ bool pkgDPkgPM::SendV2Pkgs(FILE *F)
else
fprintf(F,"%s\n",I->File.c_str());
}
- if (I->Op == Item::Configure)
+ else if (I->Op == Item::Configure)
fprintf(F,"**CONFIGURE**\n");
- if (I->Op == Item::Remove ||
+ else if (I->Op == Item::Remove ||
I->Op == Item::Purge)
fprintf(F,"**REMOVE**\n");
@@ -404,7 +435,7 @@ bool pkgDPkgPM::RunScriptsWithPkgs(const char *Cnf)
}
}
else
- SendV2Pkgs(F);
+ SendPkgsInfo(F, Version);
fclose(F);
@@ -720,13 +751,15 @@ bool pkgDPkgPM::OpenLog()
return _error->WarningE("OpenLog", _("Could not open file '%s'"), logfile_name.c_str());
setvbuf(d->term_out, NULL, _IONBF, 0);
SetCloseExec(fileno(d->term_out), true);
- struct passwd *pw;
- struct group *gr;
- pw = getpwnam("root");
- gr = getgrnam("adm");
- if (pw != NULL && gr != NULL)
- chown(logfile_name.c_str(), pw->pw_uid, gr->gr_gid);
- chmod(logfile_name.c_str(), 0640);
+ if (getuid() == 0) // if we aren't root, we can't chown a file, so don't try it
+ {
+ struct passwd *pw = getpwnam("root");
+ struct group *gr = getgrnam("adm");
+ if (pw != NULL && gr != NULL && chown(logfile_name.c_str(), pw->pw_uid, gr->gr_gid) != 0)
+ _error->WarningE("OpenLog", "chown to root:adm of file %s failed", logfile_name.c_str());
+ }
+ if (chmod(logfile_name.c_str(), 0640) != 0)
+ _error->WarningE("OpenLog", "chmod 0640 of file %s failed", logfile_name.c_str());
fprintf(d->term_out, "\nLog started: %s\n", timestr);
}
@@ -1205,16 +1238,13 @@ bool pkgDPkgPM::Go(int OutStatusFd)
// if tcgetattr does not return zero there was a error
// and we do not do any pty magic
+ _error->PushToStack();
if (tcgetattr(STDOUT_FILENO, &tt) == 0)
{
ioctl(0, TIOCGWINSZ, (char *)&win);
- if (openpty(&master, &slave, NULL, &tt, &win) < 0)
+ if (openpty(&master, &slave, NULL, &tt, &win) < 0)
{
- const char *s = _("Can not write log, openpty() "
- "failed (/dev/pts not mounted?)\n");
- fprintf(stderr, "%s",s);
- if(d->term_out)
- fprintf(d->term_out, "%s",s);
+ _error->Errno("openpty", _("Can not write log (%s)"), _("Is /dev/pts mounted?"));
master = slave = -1;
} else {
struct termios rtt;
@@ -1237,6 +1267,15 @@ bool pkgDPkgPM::Go(int OutStatusFd)
if(d->term_out)
fprintf(d->term_out, "%s",s);
}
+ // complain only if stdout is either a terminal (but still failed) or is an invalid
+ // descriptor otherwise we would complain about redirection to e.g. /dev/null as well.
+ else if (isatty(STDOUT_FILENO) == 1 || errno == EBADF)
+ _error->Errno("tcgetattr", _("Can not write log (%s)"), _("Is stdout a terminal?"));
+
+ if (_error->PendingError() == true)
+ _error->DumpErrors(std::cerr);
+ _error->RevertToStack();
+
// Fork dpkg
pid_t Child;
_config->Set("APT::Keep-Fds::",fd[1]);
diff --git a/apt-pkg/deb/dpkgpm.h b/apt-pkg/deb/dpkgpm.h
index aab39f633..c31d56f8e 100644
--- a/apt-pkg/deb/dpkgpm.h
+++ b/apt-pkg/deb/dpkgpm.h
@@ -14,6 +14,7 @@
#include <vector>
#include <map>
#include <stdio.h>
+#include <apt-pkg/macros.h>
#ifndef APT_8_CLEANER_HEADERS
using std::vector;
@@ -79,7 +80,8 @@ class pkgDPkgPM : public pkgPackageManager
// Helpers
bool RunScriptsWithPkgs(const char *Cnf);
- bool SendV2Pkgs(FILE *F);
+ __deprecated bool SendV2Pkgs(FILE *F);
+ bool SendPkgsInfo(FILE * const F, unsigned int const &Version);
void WriteHistoryTag(std::string const &tag, std::string value);
// apport integration
diff --git a/apt-pkg/depcache.cc b/apt-pkg/depcache.cc
index 6a3e9bfc4..978a893f7 100644
--- a/apt-pkg/depcache.cc
+++ b/apt-pkg/depcache.cc
@@ -865,6 +865,11 @@ bool pkgDepCache::MarkDelete(PkgIterator const &Pkg, bool rPurge,
bool pkgDepCache::IsDeleteOk(PkgIterator const &Pkg,bool rPurge,
unsigned long Depth, bool FromUser)
{
+ return IsDeleteOkProtectInstallRequests(Pkg, rPurge, Depth, FromUser);
+}
+bool pkgDepCache::IsDeleteOkProtectInstallRequests(PkgIterator const &Pkg,
+ bool const rPurge, unsigned long const Depth, bool const FromUser)
+{
if (FromUser == false && Pkg->CurrentVer == 0)
{
StateCache &P = PkgState[Pkg->ID];
@@ -1002,9 +1007,6 @@ struct CompareProviders {
else if ((B->Flags & pkgCache::Flag::Important) == pkgCache::Flag::Important)
return true;
}
- // higher priority seems like a good idea
- if (AV->Priority != BV->Priority)
- return AV->Priority < BV->Priority;
// prefer native architecture
if (strcmp(A.Arch(), B.Arch()) != 0)
{
@@ -1019,6 +1021,9 @@ struct CompareProviders {
else if (*a == B.Arch())
return true;
}
+ // higher priority seems like a good idea
+ if (AV->Priority != BV->Priority)
+ return AV->Priority > BV->Priority;
// unable to decideā€¦
return A->ID < B->ID;
}
@@ -1047,9 +1052,10 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
return true;
}
- // check if we are allowed to install the package
- if (IsInstallOk(Pkg,AutoInst,Depth,FromUser) == false)
- return false;
+ // check if we are allowed to install the package (if we haven't already)
+ if (P.Mode != ModeInstall || P.InstallVer != P.CandidateVer)
+ if (IsInstallOk(Pkg,AutoInst,Depth,FromUser) == false)
+ return false;
ActionGroup group(*this);
P.iFlags &= ~AutoKept;
@@ -1200,16 +1206,23 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
verlist.insert(Cand);
}
CompareProviders comp(Start);
- APT::VersionList::iterator InstVer = std::max_element(verlist.begin(), verlist.end(), comp);
- if (InstVer != verlist.end())
- {
+ do {
+ APT::VersionList::iterator InstVer = std::max_element(verlist.begin(), verlist.end(), comp);
+
+ if (InstVer == verlist.end())
+ break;
+
pkgCache::PkgIterator InstPkg = InstVer.ParentPkg();
if(DebugAutoInstall == true)
std::clog << OutputInDepth(Depth) << "Installing " << InstPkg.Name()
<< " as " << Start.DepType() << " of " << Pkg.Name()
<< std::endl;
- MarkInstall(InstPkg, true, Depth + 1, false, ForceImportantDeps);
+ if (MarkInstall(InstPkg, true, Depth + 1, false, ForceImportantDeps) == false)
+ {
+ verlist.erase(InstVer);
+ continue;
+ }
// now check if we should consider it a automatic dependency or not
if(InstPkg->CurrentVer == 0 && Pkg->Section != 0 && ConfigValueInSubTree("APT::Never-MarkAuto-Sections", Pkg.Section()))
{
@@ -1218,7 +1231,8 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
<< Start.DepType() << " of pkg in APT::Never-MarkAuto-Sections)" << std::endl;
MarkAuto(InstPkg, false);
}
- }
+ break;
+ } while(true);
continue;
}
/* Negative dependencies have no or-group
@@ -1243,9 +1257,16 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
PkgState[Pkg->ID].CandidateVer != *I &&
MarkInstall(Pkg,true,Depth + 1, false, ForceImportantDeps) == true)
continue;
- else if ((Start->Type == pkgCache::Dep::Conflicts || Start->Type == pkgCache::Dep::DpkgBreaks) &&
- MarkDelete(Pkg,false,Depth + 1, false) == false)
- break;
+ else if (Start->Type == pkgCache::Dep::Conflicts ||
+ Start->Type == pkgCache::Dep::DpkgBreaks)
+ {
+ if(DebugAutoInstall == true)
+ std::clog << OutputInDepth(Depth)
+ << " Removing: " << Pkg.Name()
+ << std::endl;
+ if (MarkDelete(Pkg,false,Depth + 1, false) == false)
+ break;
+ }
}
continue;
}
@@ -1256,11 +1277,50 @@ bool pkgDepCache::MarkInstall(PkgIterator const &Pkg,bool AutoInst,
/*}}}*/
// DepCache::IsInstallOk - check if it is ok to install this package /*{{{*/
// ---------------------------------------------------------------------
-/* The default implementation does nothing.
+/* The default implementation checks if the installation of an M-A:same
+ package would lead us into a version-screw and if so forbids it.
dpkg holds are enforced by the private IsModeChangeOk */
bool pkgDepCache::IsInstallOk(PkgIterator const &Pkg,bool AutoInst,
unsigned long Depth, bool FromUser)
{
+ return IsInstallOkMultiArchSameVersionSynced(Pkg,AutoInst, Depth, FromUser);
+}
+bool pkgDepCache::IsInstallOkMultiArchSameVersionSynced(PkgIterator const &Pkg,
+ bool const AutoInst, unsigned long const Depth, bool const FromUser)
+{
+ if (FromUser == true) // as always: user is always right
+ return true;
+
+ // ignore packages with none-M-A:same candidates
+ VerIterator const CandVer = PkgState[Pkg->ID].CandidateVerIter(*this);
+ if (unlikely(CandVer.end() == true) || CandVer == Pkg.CurrentVer() ||
+ (CandVer->MultiArch & pkgCache::Version::Same) != pkgCache::Version::Same)
+ return true;
+
+ GrpIterator const Grp = Pkg.Group();
+ for (PkgIterator P = Grp.PackageList(); P.end() == false; P = Grp.NextPkg(P))
+ {
+ // not installed or version synced: fine by definition
+ // (simple string-compare as stuff like '1' == '0:1-0' can't happen here)
+ if (P->CurrentVer == 0 || strcmp(Pkg.CandVersion(), P.CandVersion()) == 0)
+ continue;
+ // packages loosing M-A:same can be out-of-sync
+ VerIterator CV = PkgState[P->ID].CandidateVerIter(*this);
+ if (unlikely(CV.end() == true) ||
+ (CV->MultiArch & pkgCache::Version::Same) != pkgCache::Version::Same)
+ continue;
+
+ // not downloadable means the package is obsolete, so allow out-of-sync
+ if (CV.Downloadable() == false)
+ continue;
+
+ PkgState[Pkg->ID].iFlags |= AutoKept;
+ if (unlikely(DebugMarker == true))
+ std::clog << OutputInDepth(Depth) << "Ignore MarkInstall of " << Pkg
+ << " as its M-A:same siblings are not version-synced" << std::endl;
+ return false;
+ }
+
return true;
}
/*}}}*/
diff --git a/apt-pkg/depcache.h b/apt-pkg/depcache.h
index 7358048ed..d9c95349b 100644
--- a/apt-pkg/depcache.h
+++ b/apt-pkg/depcache.h
@@ -442,16 +442,15 @@ class pkgDepCache : protected pkgCache::Namespace
/** \return \b true if it's OK for MarkInstall to install
* the given package.
*
- * See the default implementation for a simple example how this
- * method can be used.
- * Overriding implementations should use the hold-state-flag to cache
- * results from previous checks of this package - also it should
- * be used if the default resolver implementation is also used to
- * ensure that these packages are handled like "normal" dpkg holds.
+ * The default implementation simply calls all IsInstallOk*
+ * method mentioned below.
+ *
+ * Overriding implementations should use the hold-state-flag to
+ * cache results from previous checks of this package - if possible.
*
* The parameters are the same as in the calling MarkInstall:
* \param Pkg the package that MarkInstall wants to install.
- * \param AutoInst needs a previous MarkInstall this package?
+ * \param AutoInst install this and all its dependencies
* \param Depth recursive deep of this Marker call
* \param FromUser was the install requested by the user?
*/
@@ -461,12 +460,8 @@ class pkgDepCache : protected pkgCache::Namespace
/** \return \b true if it's OK for MarkDelete to remove
* the given package.
*
- * See the default implementation for a simple example how this
- * method can be used.
- * Overriding implementations should use the hold-state-flag to cache
- * results from previous checks of this package - also it should
- * be used if the default resolver implementation is also used to
- * ensure that these packages are handled like "normal" dpkg holds.
+ * The default implementation simply calls all IsDeleteOk*
+ * method mentioned below, see also #IsInstallOk.
*
* The parameters are the same as in the calling MarkDelete:
* \param Pkg the package that MarkDelete wants to remove.
@@ -498,6 +493,15 @@ class pkgDepCache : protected pkgCache::Namespace
pkgDepCache(pkgCache *Cache,Policy *Plcy = 0);
virtual ~pkgDepCache();
+ protected:
+ // methods call by IsInstallOk
+ bool IsInstallOkMultiArchSameVersionSynced(PkgIterator const &Pkg,
+ bool const AutoInst, unsigned long const Depth, bool const FromUser);
+
+ // methods call by IsDeleteOk
+ bool IsDeleteOkProtectInstallRequests(PkgIterator const &Pkg,
+ bool const rPurge, unsigned long const Depth, bool const FromUser);
+
private:
bool IsModeChangeOk(ModeList const mode, PkgIterator const &Pkg,
unsigned long const Depth, bool const FromUser);
diff --git a/apt-pkg/indexcopy.cc b/apt-pkg/indexcopy.cc
index a262ef789..7694cb1dd 100644
--- a/apt-pkg/indexcopy.cc
+++ b/apt-pkg/indexcopy.cc
@@ -106,9 +106,9 @@ bool IndexCopy::CopyPackages(string CDROM,string Name,vector<string> &List,
} else {
Target.Open(TargetF,FileFd::WriteAtomic);
}
- FILE *TargetFl = fdopen(dup(Target.Fd()),"w");
if (_error->PendingError() == true)
return false;
+ FILE *TargetFl = fdopen(dup(Target.Fd()),"w");
if (TargetFl == 0)
return _error->Errno("fdopen","Failed to reopen fd");
@@ -544,11 +544,9 @@ bool SigVerify::CopyMetaIndex(string CDROM, string CDName, /*{{{*/
FileFd Rel;
Target.Open(TargetF,FileFd::WriteAtomic);
Rel.Open(prefix + file,FileFd::ReadOnly);
- if (_error->PendingError() == true)
- return false;
if (CopyFile(Rel,Target) == false)
- return false;
-
+ return _error->Error("Copying of '%s' for '%s' from '%s' failed", file.c_str(), CDName.c_str(), prefix.c_str());
+
return true;
}
/*}}}*/
@@ -603,6 +601,7 @@ bool SigVerify::CopyAndVerify(string CDROM,string Name,vector<string> &SigList,
(useInRelease ? inrelease.c_str() : releasegpg.c_str()));
// something went wrong, don't copy the Release.gpg
// FIXME: delete any existing gpg file?
+ delete MetaIndex;
continue;
}
@@ -642,6 +641,18 @@ bool SigVerify::CopyAndVerify(string CDROM,string Name,vector<string> &SigList,
return true;
}
/*}}}*/
+// SigVerify::RunGPGV - deprecated wrapper calling ExecGPGV /*{{{*/
+bool SigVerify::RunGPGV(std::string const &File, std::string const &FileOut,
+ int const &statusfd, int fd[2]) {
+ ExecGPGV(File, FileOut, statusfd, fd);
+ return false;
+};
+bool SigVerify::RunGPGV(std::string const &File, std::string const &FileOut,
+ int const &statusfd) {
+ ExecGPGV(File, FileOut, statusfd);
+ return false;
+};
+ /*}}}*/
bool TranslationsCopy::CopyTranslations(string CDROM,string Name, /*{{{*/
vector<string> &List, pkgCdromStatus *log)
{
@@ -704,9 +715,9 @@ bool TranslationsCopy::CopyTranslations(string CDROM,string Name, /*{{{*/
} else {
Target.Open(TargetF,FileFd::WriteAtomic);
}
- FILE *TargetFl = fdopen(dup(Target.Fd()),"w");
if (_error->PendingError() == true)
return false;
+ FILE *TargetFl = fdopen(dup(Target.Fd()),"w");
if (TargetFl == 0)
return _error->Errno("fdopen","Failed to reopen fd");
diff --git a/apt-pkg/indexcopy.h b/apt-pkg/indexcopy.h
index aa221158e..e6a07a887 100644
--- a/apt-pkg/indexcopy.h
+++ b/apt-pkg/indexcopy.h
@@ -100,15 +100,9 @@ class SigVerify /*{{{*/
std::vector<std::string> PkgList,std::vector<std::string> SrcList);
__deprecated static bool RunGPGV(std::string const &File, std::string const &FileOut,
- int const &statusfd, int fd[2]) {
- ExecGPGV(File, FileOut, statusfd, fd);
- return false;
- };
+ int const &statusfd, int fd[2]);
__deprecated static bool RunGPGV(std::string const &File, std::string const &FileOut,
- int const &statusfd = -1) {
- ExecGPGV(File, FileOut, statusfd);
- return false;
- };
+ int const &statusfd = -1);
};
/*}}}*/
diff --git a/apt-pkg/indexrecords.cc b/apt-pkg/indexrecords.cc
index e37a78cfb..6d89949a0 100644
--- a/apt-pkg/indexrecords.cc
+++ b/apt-pkg/indexrecords.cc
@@ -62,7 +62,7 @@ bool indexRecords::Load(const string Filename) /*{{{*/
if (OpenMaybeClearSignedFile(Filename, Fd) == false)
return false;
- pkgTagFile TagFile(&Fd, Fd.Size() + 256); // XXX
+ pkgTagFile TagFile(&Fd);
if (_error->PendingError() == true)
{
strprintf(ErrorText, _("Unable to parse Release file %s"),Filename.c_str());
@@ -71,16 +71,11 @@ bool indexRecords::Load(const string Filename) /*{{{*/
pkgTagSection Section;
const char *Start, *End;
- // Skip over sections beginning with ----- as this is an idicator for clearsigns
- do {
- if (TagFile.Step(Section) == false)
- {
- strprintf(ErrorText, _("No sections in Release file %s"), Filename.c_str());
- return false;
- }
-
- Section.Get (Start, End, 0);
- } while (End - Start > 5 && strncmp(Start, "-----", 5) == 0);
+ if (TagFile.Step(Section) == false)
+ {
+ strprintf(ErrorText, _("No sections in Release file %s"), Filename.c_str());
+ return false;
+ }
Suite = Section.FindS("Suite");
Dist = Section.FindS("Codename");
diff --git a/apt-pkg/orderlist.cc b/apt-pkg/orderlist.cc
index 80d8fd490..984ae1d10 100644
--- a/apt-pkg/orderlist.cc
+++ b/apt-pkg/orderlist.cc
@@ -301,9 +301,8 @@ bool pkgOrderList::OrderConfigure()
/* Higher scores order earlier */
int pkgOrderList::Score(PkgIterator Pkg)
{
- static int const ScoreDelete = _config->FindI("OrderList::Score::Delete", 500);
-
- // Removal is always done first
+ // Removals should be done after we dealt with essentials
+ static int const ScoreDelete = _config->FindI("OrderList::Score::Delete", 100);
if (Cache[Pkg].Delete() == true)
return ScoreDelete;
@@ -893,149 +892,136 @@ bool pkgOrderList::DepConfigure(DepIterator D)
/*}}}*/
// OrderList::DepRemove - Removal ordering /*{{{*/
// ---------------------------------------------------------------------
-/* Removal visits all reverse depends. It considers if the dependency
- of the Now state version to see if it is okay with removing this
- package. This check should always fail, but is provided for symetery
- with the other critical handlers.
-
- Loops are preprocessed and logged. Removal loops can also be
- detected in the critical handler. They are characterized by an
- old version of A depending on B but the new version of A conflicting
- with B, thus either A or B must break to install. */
-bool pkgOrderList::DepRemove(DepIterator D)
+/* Checks all given dependencies if they are broken by the remove of a
+ package and if so fix it by visiting another provider or or-group
+ member to ensure that the dependee keeps working which is especially
+ important for Immediate packages like e.g. those depending on an
+ awk implementation. If the dependency can't be fixed with another
+ package this means an upgrade of the package will solve the problem. */
+bool pkgOrderList::DepRemove(DepIterator Broken)
{
- if (D.Reverse() == false)
+ if (Broken.Reverse() == false)
return true;
- for (; D.end() == false; ++D)
- if (D->Type == pkgCache::Dep::Depends || D->Type == pkgCache::Dep::PreDepends)
+
+ for (; Broken.end() == false; ++Broken)
+ {
+ if (Broken->Type != pkgCache::Dep::Depends &&
+ Broken->Type != pkgCache::Dep::PreDepends)
+ continue;
+
+ PkgIterator BrokenPkg = Broken.ParentPkg();
+ // uninstalled packages can't break via a remove
+ if (BrokenPkg->CurrentVer == 0)
+ continue;
+
+ // if its already added, we can't do anything useful
+ if (IsFlag(BrokenPkg, AddPending) == true || IsFlag(BrokenPkg, Added) == true)
+ continue;
+
+ // if the dependee is going to be removed, visit it now
+ if (Cache[BrokenPkg].Delete() == true)
+ return VisitNode(BrokenPkg, "Remove-Dependee");
+
+ // The package stays around, so find out how this is possible
+ for (DepIterator D = BrokenPkg.CurrentVer().DependsList(); D.end() == false;)
{
- // Duplication elimination, consider the current version only
- if (D.ParentPkg().CurrentVer() != D.ParentVer())
+ // only important or-groups need fixing
+ if (D->Type != pkgCache::Dep::Depends &&
+ D->Type != pkgCache::Dep::PreDepends)
+ {
+ ++D;
continue;
+ }
- /* We wish to see if the dep on the parent package is okay
- in the removed (install) state of the target pkg. */
- bool tryFixDeps = false;
- if (CheckDep(D) == true)
+ // Start is the beginning of the or-group, D is the first one after or
+ DepIterator Start = D;
+ bool foundBroken = false;
+ for (bool LastOR = true; D.end() == false && LastOR == true; ++D)
{
- // We want to catch loops with the code below.
- if (IsFlag(D.ParentPkg(),AddPending) == false)
- continue;
+ LastOR = (D->CompareOp & pkgCache::Dep::Or) == pkgCache::Dep::Or;
+ if (D == Broken)
+ foundBroken = true;
}
- else
- tryFixDeps = true;
- // This is the loop detection
- if (IsFlag(D.ParentPkg(),Added) == true ||
- IsFlag(D.ParentPkg(),AddPending) == true)
- {
- if (IsFlag(D.ParentPkg(),AddPending) == true)
- AddLoop(D);
+ // this or-group isn't the broken one: keep searching
+ if (foundBroken == false)
continue;
+
+ // iterate over all members of the or-group searching for a ready replacement
+ bool readyReplacement = false;
+ for (DepIterator OrMember = Start; OrMember != D && readyReplacement == false; ++OrMember)
+ {
+ Version ** Replacements = OrMember.AllTargets();
+ for (Version **R = Replacements; *R != 0; ++R)
+ {
+ VerIterator Ver(Cache,*R);
+ // only currently installed packages can be a replacement
+ PkgIterator RPkg = Ver.ParentPkg();
+ if (RPkg.CurrentVer() != Ver)
+ continue;
+
+ // packages going to be removed can't be a replacement
+ if (Cache[RPkg].Delete() == true)
+ continue;
+
+ readyReplacement = true;
+ break;
+ }
+ delete[] Replacements;
}
- if (tryFixDeps == true)
+ // something else is ready to take over, do nothing
+ if (readyReplacement == true)
+ continue;
+
+ // see if we can visit a replacement
+ bool visitReplacement = false;
+ for (DepIterator OrMember = Start; OrMember != D && visitReplacement == false; ++OrMember)
{
- for (pkgCache::DepIterator F = D.ParentPkg().CurrentVer().DependsList();
- F.end() == false; ++F)
+ Version ** Replacements = OrMember.AllTargets();
+ for (Version **R = Replacements; *R != 0; ++R)
{
- if (F->Type != pkgCache::Dep::Depends && F->Type != pkgCache::Dep::PreDepends)
+ VerIterator Ver(Cache,*R);
+ // consider only versions we plan to install
+ PkgIterator RPkg = Ver.ParentPkg();
+ if (Cache[RPkg].Install() == false || Cache[RPkg].InstallVer != Ver)
continue;
- // Check the Providers
- if (F.TargetPkg()->ProvidesList != 0)
- {
- pkgCache::PrvIterator Prov = F.TargetPkg().ProvidesList();
- for (; Prov.end() == false; ++Prov)
- {
- pkgCache::PkgIterator const P = Prov.OwnerPkg();
- if (IsFlag(P, InList) == true &&
- IsFlag(P, AddPending) == true &&
- IsFlag(P, Added) == false &&
- Cache[P].InstallVer == 0)
- break;
- }
- if (Prov.end() == false)
- for (pkgCache::PrvIterator Prv = F.TargetPkg().ProvidesList();
- Prv.end() == false; ++Prv)
- {
- pkgCache::PkgIterator const P = Prv.OwnerPkg();
- if (IsFlag(P, InList) == true &&
- IsFlag(P, AddPending) == false &&
- Cache[P].InstallVer != 0 &&
- VisitNode(P, "Remove-P") == true)
- {
- Flag(P, Immediate);
- tryFixDeps = false;
- break;
- }
- }
- if (tryFixDeps == false)
- break;
- }
- // Check for Or groups
- if ((F->CompareOp & pkgCache::Dep::Or) != pkgCache::Dep::Or)
+ // loops are not going to help us, so don't create them
+ if (IsFlag(RPkg, AddPending) == true)
continue;
- // Lets see if the package is part of the Or group
- pkgCache::DepIterator S = F;
- for (; S.end() == false; ++S)
+
+ if (IsMissing(RPkg) == true)
+ continue;
+
+ visitReplacement = true;
+ if (IsFlag(BrokenPkg, Immediate) == false)
{
- if (S.TargetPkg() == D.TargetPkg())
+ if (VisitNode(RPkg, "Remove-Rep") == true)
break;
- if ((S->CompareOp & pkgCache::Dep::Or) != pkgCache::Dep::Or ||
- CheckDep(S)) // Or group is satisfied by another package
- for (;S.end() == false; ++S);
}
- if (S.end() == true)
- continue;
- // skip to the end of the or group
- for (;S.end() == false && (S->CompareOp & pkgCache::Dep::Or) == pkgCache::Dep::Or; ++S);
- ++S;
- // The soon to be removed is part of the Or group
- // start again in the or group and find something which will serve as replacement
- for (; F.end() == false && F != S; ++F)
+ else
{
- if (IsFlag(F.TargetPkg(), InList) == true &&
- IsFlag(F.TargetPkg(), AddPending) == false &&
- Cache[F.TargetPkg()].InstallVer != 0 &&
- VisitNode(F.TargetPkg(), "Remove-Target") == true)
- {
- Flag(F.TargetPkg(), Immediate);
- tryFixDeps = false;
+ Flag(RPkg, Immediate);
+ if (VisitNode(RPkg, "Remove-ImmRep") == true)
break;
- }
- else if (F.TargetPkg()->ProvidesList != 0)
- {
- pkgCache::PrvIterator Prv = F.TargetPkg().ProvidesList();
- for (; Prv.end() == false; ++Prv)
- {
- if (IsFlag(Prv.OwnerPkg(), InList) == true &&
- IsFlag(Prv.OwnerPkg(), AddPending) == false &&
- Cache[Prv.OwnerPkg()].InstallVer != 0 &&
- VisitNode(Prv.OwnerPkg(), "Remove-Owner") == true)
- {
- Flag(Prv.OwnerPkg(), Immediate);
- tryFixDeps = false;
- break;
- }
- }
- if (Prv.end() == false)
- break;
- }
}
- if (tryFixDeps == false)
- break;
+ visitReplacement = false;
}
+ delete[] Replacements;
}
-
- // Skip over missing files
- if (IsMissing(D.ParentPkg()) == true)
+ if (visitReplacement == true)
continue;
-
- if (VisitNode(D.ParentPkg(), "Remove-Parent") == false)
+
+ // the broken package in current version can't be fixed, so install new version
+ if (IsMissing(BrokenPkg) == true)
+ break;
+
+ if (VisitNode(BrokenPkg, "Remove-Upgrade") == false)
return false;
}
-
+ }
+
return true;
}
/*}}}*/
diff --git a/apt-pkg/packagemanager.cc b/apt-pkg/packagemanager.cc
index e2d7dbf2a..8c0d2e855 100644
--- a/apt-pkg/packagemanager.cc
+++ b/apt-pkg/packagemanager.cc
@@ -338,8 +338,9 @@ bool pkgPackageManager::SmartConfigure(PkgIterator Pkg, int const Depth)
however if there is a loop (A depends on B, B depends on A) this will not
be the case, so check for dependencies before configuring. */
bool Bad = false, Changed = false;
- const unsigned int max_loops = _config->FindI("APT::pkgPackageManager::MaxLoopCount", 500);
+ const unsigned int max_loops = _config->FindI("APT::pkgPackageManager::MaxLoopCount", 5000);
unsigned int i=0;
+ std::list<DepIterator> needConfigure;
do
{
Changed = false;
@@ -353,7 +354,7 @@ bool pkgPackageManager::SmartConfigure(PkgIterator Pkg, int const Depth)
continue;
Bad = true;
- // Search for dependencies which are unpacked but aren't configured yet (maybe loops)
+ // Check for dependencies that have not been unpacked, probably due to loops.
for (DepIterator Cur = Start; true; ++Cur)
{
SPtrArray<Version *> VList = Cur.AllTargets();
@@ -373,51 +374,66 @@ bool pkgPackageManager::SmartConfigure(PkgIterator Pkg, int const Depth)
}
// Check if the version that is going to be installed will satisfy the dependency
- if (Cache[DepPkg].InstallVer != *I)
+ if (Cache[DepPkg].InstallVer != *I || List->IsNow(DepPkg) == false)
continue;
- if (List->IsFlag(DepPkg,pkgOrderList::UnPacked))
+ if (PkgLoop == true)
{
- if (List->IsFlag(DepPkg,pkgOrderList::Loop) && PkgLoop)
- {
- // This dependency has already been dealt with by another SmartConfigure on Pkg
- Bad = false;
- break;
- }
- /* Check for a loop to prevent one forming
- If A depends on B and B depends on A, SmartConfigure will
- just hop between them if this is not checked. Dont remove the
- loop flag after finishing however as loop is already set.
- This means that there is another SmartConfigure call for this
- package and it will remove the loop flag */
+ if (Debug)
+ std::clog << OutputInDepth(Depth) << "Package " << Pkg << " loops in SmartConfigure" << std::endl;
+ Bad = false;
+ break;
+ }
+ else
+ {
+ if (Debug)
+ clog << OutputInDepth(Depth) << "Unpacking " << DepPkg.FullName() << " to avoid loop " << Cur << endl;
if (PkgLoop == false)
List->Flag(Pkg,pkgOrderList::Loop);
- if (SmartConfigure(DepPkg, Depth + 1) == true)
+ if (SmartUnPack(DepPkg, true, Depth + 1) == true)
{
Bad = false;
if (List->IsFlag(DepPkg,pkgOrderList::Loop) == false)
- Changed = true;
+ Changed = true;
}
if (PkgLoop == false)
- List->RmFlag(Pkg,pkgOrderList::Loop);
- // If SmartConfigure was succesfull, Bad is false, so break
+ List->RmFlag(Pkg,pkgOrderList::Loop);
if (Bad == false)
break;
}
- else if (List->IsFlag(DepPkg,pkgOrderList::Configured))
- {
- Bad = false;
- break;
- }
}
- if (Cur == End)
+
+ if (Cur == End || Bad == false)
break;
- }
+ }
if (Bad == false)
continue;
- // Check for dependencies that have not been unpacked, probably due to loops.
+ needConfigure.push_back(Start);
+ }
+ if (i++ > max_loops)
+ return _error->Error("Internal error: MaxLoopCount reached in SmartUnPack (1) for %s, aborting", Pkg.FullName().c_str());
+ } while (Changed == true);
+
+ Bad = false, Changed = false, i = 0;
+ do
+ {
+ Changed = false;
+ for (std::list<DepIterator>::const_iterator D = needConfigure.begin(); D != needConfigure.end(); ++D)
+ {
+ // Compute a single dependency element (glob or) without modifying D
+ pkgCache::DepIterator Start, End;
+ {
+ pkgCache::DepIterator Discard = *D;
+ Discard.GlobOr(Start,End);
+ }
+
+ if (End->Type != pkgCache::Dep::Depends)
+ continue;
+ Bad = true;
+
+ // Search for dependencies which are unpacked but aren't configured yet (maybe loops)
for (DepIterator Cur = Start; true; ++Cur)
{
SPtrArray<Version *> VList = Cur.AllTargets();
@@ -428,44 +444,53 @@ bool pkgPackageManager::SmartConfigure(PkgIterator Pkg, int const Depth)
PkgIterator DepPkg = Ver.ParentPkg();
// Check if the version that is going to be installed will satisfy the dependency
- if (Cache[DepPkg].InstallVer != *I || List->IsNow(DepPkg) == false)
+ if (Cache[DepPkg].InstallVer != *I)
continue;
- if (PkgLoop == true)
- {
- if (Debug)
- std::clog << OutputInDepth(Depth) << "Package " << Pkg << " loops in SmartConfigure" << std::endl;
- Bad = false;
- break;
- }
- else
+ if (List->IsFlag(DepPkg,pkgOrderList::UnPacked))
{
- if (Debug)
- clog << OutputInDepth(Depth) << "Unpacking " << DepPkg.FullName() << " to avoid loop " << Cur << endl;
+ if (List->IsFlag(DepPkg,pkgOrderList::Loop) && PkgLoop)
+ {
+ // This dependency has already been dealt with by another SmartConfigure on Pkg
+ Bad = false;
+ break;
+ }
+ /* Check for a loop to prevent one forming
+ If A depends on B and B depends on A, SmartConfigure will
+ just hop between them if this is not checked. Dont remove the
+ loop flag after finishing however as loop is already set.
+ This means that there is another SmartConfigure call for this
+ package and it will remove the loop flag */
if (PkgLoop == false)
List->Flag(Pkg,pkgOrderList::Loop);
- if (SmartUnPack(DepPkg, true, Depth + 1) == true)
+ if (SmartConfigure(DepPkg, Depth + 1) == true)
{
Bad = false;
if (List->IsFlag(DepPkg,pkgOrderList::Loop) == false)
- Changed = true;
+ Changed = true;
}
if (PkgLoop == false)
- List->RmFlag(Pkg,pkgOrderList::Loop);
+ List->RmFlag(Pkg,pkgOrderList::Loop);
+ // If SmartConfigure was succesfull, Bad is false, so break
if (Bad == false)
break;
}
+ else if (List->IsFlag(DepPkg,pkgOrderList::Configured))
+ {
+ Bad = false;
+ break;
+ }
}
-
- if (Cur == End)
+ if (Cur == End || Bad == false)
break;
- }
+ }
+
if (Bad == true && Changed == false && Debug == true)
- std::clog << OutputInDepth(Depth) << "Could not satisfy " << Start << std::endl;
+ std::clog << OutputInDepth(Depth) << "Could not satisfy " << *D << std::endl;
}
if (i++ > max_loops)
- return _error->Error("Internal error: MaxLoopCount reached in SmartUnPack for %s, aborting", Pkg.FullName().c_str());
+ return _error->Error("Internal error: MaxLoopCount reached in SmartUnPack (2) for %s, aborting", Pkg.FullName().c_str());
} while (Changed == true);
if (Bad) {
@@ -603,7 +628,7 @@ bool pkgPackageManager::SmartUnPack(PkgIterator Pkg, bool const Immediate, int c
This will be either dealt with if the package is configured as a dependency of Pkg (if and when Pkg is configured),
or by the ConfigureAll call at the end of the for loop in OrderInstall. */
bool Changed = false;
- const unsigned int max_loops = _config->FindI("APT::pkgPackageManager::MaxLoopCount", 500);
+ const unsigned int max_loops = _config->FindI("APT::pkgPackageManager::MaxLoopCount", 5000);
unsigned int i = 0;
do
{
diff --git a/apt-pkg/pkgcache.cc b/apt-pkg/pkgcache.cc
index bc6616839..0b8b6fe77 100644
--- a/apt-pkg/pkgcache.cc
+++ b/apt-pkg/pkgcache.cc
@@ -924,6 +924,18 @@ string pkgCache::VerIterator::RelStr() const
return Res;
}
/*}}}*/
+// VerIterator::MultiArchType - string representing MultiArch flag /*{{{*/
+const char * pkgCache::VerIterator::MultiArchType() const
+{
+ if ((S->MultiArch & pkgCache::Version::Same) == pkgCache::Version::Same)
+ return "same";
+ else if ((S->MultiArch & pkgCache::Version::Foreign) == pkgCache::Version::Foreign)
+ return "foreign";
+ else if ((S->MultiArch & pkgCache::Version::Allowed) == pkgCache::Version::Allowed)
+ return "allowed";
+ return "none";
+}
+ /*}}}*/
// PkgFileIterator::IsOk - Checks if the cache is in sync with the file /*{{{*/
// ---------------------------------------------------------------------
/* This stats the file and compares its stats with the ones that were
diff --git a/apt-pkg/pkgcachegen.cc b/apt-pkg/pkgcachegen.cc
index 3f10b6c40..7ce7aba7b 100644
--- a/apt-pkg/pkgcachegen.cc
+++ b/apt-pkg/pkgcachegen.cc
@@ -296,6 +296,8 @@ bool pkgCacheGenerator::MergeListPackage(ListParser &List, pkgCache::PkgIterator
// Find the right version to write the description
MD5SumValue CurMd5 = List.Description_md5();
+ if (CurMd5.Value().empty() == true || List.Description().empty() == true)
+ return true;
std::string CurLang = List.DescriptionLanguage();
for (Ver = Pkg.VersionList(); Ver.end() == false; ++Ver)
@@ -480,7 +482,7 @@ bool pkgCacheGenerator::MergeListVersion(ListParser &List, pkgCache::PkgIterator
/* Record the Description (it is not translated) */
MD5SumValue CurMd5 = List.Description_md5();
- if (CurMd5.Value().empty() == true)
+ if (CurMd5.Value().empty() == true || List.Description().empty() == true)
return true;
std::string CurLang = List.DescriptionLanguage();
diff --git a/apt-pkg/policy.cc b/apt-pkg/policy.cc
index 4ae3b5f87..0a06cc6e3 100644
--- a/apt-pkg/policy.cc
+++ b/apt-pkg/policy.cc
@@ -166,11 +166,15 @@ pkgCache::VerIterator pkgPolicy::GetCandidateVer(pkgCache::PkgIterator const &Pk
tracks the default when the default is taken away, and a permanent
pin that stays at that setting.
*/
+ bool PrefSeen = false;
for (pkgCache::VerIterator Ver = Pkg.VersionList(); Ver.end() == false; ++Ver)
{
/* Lets see if this version is the installed version */
bool instVer = (Pkg.CurrentVer() == Ver);
+ if (Pref == Ver)
+ PrefSeen = true;
+
for (pkgCache::VerFileIterator VF = Ver.FileList(); VF.end() == false; ++VF)
{
/* If this is the status file, and the current version is not the
@@ -187,26 +191,33 @@ pkgCache::VerIterator pkgPolicy::GetCandidateVer(pkgCache::PkgIterator const &Pk
{
Pref = Ver;
Max = Prio;
+ PrefSeen = true;
}
if (Prio > MaxAlt)
{
PrefAlt = Ver;
MaxAlt = Prio;
- }
- }
-
+ }
+ }
+
if (instVer == true && Max < 1000)
{
+ /* Not having seen the Pref yet means we have a specific pin below 1000
+ on a version below the current installed one, so ignore the specific pin
+ as this would be a downgrade otherwise */
+ if (PrefSeen == false || Pref.end() == true)
+ {
+ Pref = Ver;
+ PrefSeen = true;
+ }
/* Elevate our current selection (or the status file itself)
to the Pseudo-status priority. */
- if (Pref.end() == true)
- Pref = Ver;
Max = 1000;
-
+
// Fast path optimize.
if (StatusOverride == false)
break;
- }
+ }
}
// If we do not find our candidate, use the one with the highest pin.
// This means that if there is a version available with pin > 0; there
diff --git a/apt-pkg/tagfile.h b/apt-pkg/tagfile.h
index 4718f5101..fedd72701 100644
--- a/apt-pkg/tagfile.h
+++ b/apt-pkg/tagfile.h
@@ -84,7 +84,7 @@ class pkgTagSection
Stop = this->Stop;
};
- pkgTagSection() : Section(0), TagCount(0), Stop(0) {};
+ pkgTagSection() : Section(0), TagCount(0), d(NULL), Stop(0) {};
virtual ~pkgTagSection() {};
};
diff --git a/apt-pkg/vendorlist.cc b/apt-pkg/vendorlist.cc
index ecfc7db87..602425624 100644
--- a/apt-pkg/vendorlist.cc
+++ b/apt-pkg/vendorlist.cc
@@ -66,7 +66,7 @@ bool pkgVendorList::CreateList(Configuration& Cnf) /*{{{*/
Configuration Block(Top);
string VendorID = Top->Tag;
vector <struct Vendor::Fingerprint *> *Fingerprints = new vector<Vendor::Fingerprint *>;
- struct Vendor::Fingerprint *Fingerprint = new struct Vendor::Fingerprint;
+ struct Vendor::Fingerprint *Fingerprint = new struct Vendor::Fingerprint();
string Origin = Block.Find("Origin");
Fingerprint->Print = Block.Find("Fingerprint");