summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--apt-inst/deb/debfile.h4
-rw-r--r--apt-pkg/acquire-worker.cc13
-rw-r--r--apt-pkg/algorithms.cc37
-rw-r--r--apt-pkg/algorithms.h4
-rw-r--r--apt-pkg/aptconfiguration.cc6
-rw-r--r--apt-pkg/contrib/fileutl.cc172
-rw-r--r--apt-pkg/deb/debindexfile.cc6
-rw-r--r--debian/changelog24
-rw-r--r--methods/rred.cc1
-rwxr-xr-xprepare-release29
-rw-r--r--test/integration/framework4
11 files changed, 222 insertions, 78 deletions
diff --git a/apt-inst/deb/debfile.h b/apt-inst/deb/debfile.h
index d94b74446..38211fb0f 100644
--- a/apt-inst/deb/debfile.h
+++ b/apt-inst/deb/debfile.h
@@ -29,6 +29,10 @@
#include <apt-pkg/tagfile.h>
#include <apt-pkg/pkgcache.h>
+#ifndef APT_8_CLEANER_HEADERS
+#include <apt-pkg/md5.h>
+#endif
+
class FileFd;
class debDebFile
diff --git a/apt-pkg/acquire-worker.cc b/apt-pkg/acquire-worker.cc
index 8bc043c58..d6db8bc02 100644
--- a/apt-pkg/acquire-worker.cc
+++ b/apt-pkg/acquire-worker.cc
@@ -511,10 +511,17 @@ bool pkgAcquire::Worker::QueueItem(pkgAcquire::Queue::QItem *Item)
/* */
bool pkgAcquire::Worker::OutFdReady()
{
- if (FileFd::Write(OutFd,OutQueue.c_str(),OutQueue.length()) == false)
+ int Res;
+ do
+ {
+ Res = write(OutFd,OutQueue.c_str(),OutQueue.length());
+ }
+ while (Res < 0 && errno == EINTR);
+
+ if (Res <= 0)
return MethodFailure();
-
- OutQueue.clear();
+
+ OutQueue.erase(0,Res);
if (OutQueue.empty() == true)
OutReady = false;
diff --git a/apt-pkg/algorithms.cc b/apt-pkg/algorithms.cc
index 8beb2d51c..e7b359981 100644
--- a/apt-pkg/algorithms.cc
+++ b/apt-pkg/algorithms.cc
@@ -1453,7 +1453,7 @@ void pkgPrioSortList(pkgCache &Cache,pkgCache::Version **List)
qsort(List,Count,sizeof(*List),PrioComp);
}
/*}}}*/
-// ListUpdate - update the cache files /*{{{*/
+// ListUpdate - construct Fetcher and update the cache files /*{{{*/
// ---------------------------------------------------------------------
/* This is a simple wrapper to update the cache. it will fetch stuff
* from the network (or any other sources defined in sources.list)
@@ -1462,7 +1462,6 @@ bool ListUpdate(pkgAcquireStatus &Stat,
pkgSourceList &List,
int PulseInterval)
{
- pkgAcquire::RunResult res;
pkgAcquire Fetcher;
if (Fetcher.Setup(&Stat, _config->FindDir("Dir::State::Lists")) == false)
return false;
@@ -1471,11 +1470,24 @@ bool ListUpdate(pkgAcquireStatus &Stat,
if (List.GetIndexes(&Fetcher) == false)
return false;
+ return AcquireUpdate(Fetcher, PulseInterval, true);
+}
+ /*}}}*/
+// AcquireUpdate - take Fetcher and update the cache files /*{{{*/
+// ---------------------------------------------------------------------
+/* This is a simple wrapper to update the cache with a provided acquire
+ * If you only need control over Status and the used SourcesList use
+ * ListUpdate method instead.
+ */
+bool AcquireUpdate(pkgAcquire &Fetcher, int const PulseInterval,
+ bool const RunUpdateScripts, bool const ListCleanup)
+{
// Run scripts
- RunScripts("APT::Update::Pre-Invoke");
-
- // check arguments
- if(PulseInterval>0)
+ if (RunUpdateScripts == true)
+ RunScripts("APT::Update::Pre-Invoke");
+
+ pkgAcquire::RunResult res;
+ if(PulseInterval > 0)
res = Fetcher.Run(PulseInterval);
else
res = Fetcher.Run();
@@ -1512,7 +1524,7 @@ bool ListUpdate(pkgAcquireStatus &Stat,
// Clean out any old list files
// Keep "APT::Get::List-Cleanup" name for compatibility, but
// this is really a global option for the APT library now
- if (!TransientNetworkFailure && !Failed &&
+ if (!TransientNetworkFailure && !Failed && ListCleanup == true &&
(_config->FindB("APT::Get::List-Cleanup",true) == true &&
_config->FindB("APT::List-Cleanup",true) == true))
{
@@ -1529,11 +1541,14 @@ bool ListUpdate(pkgAcquireStatus &Stat,
// Run the success scripts if all was fine
- if(!TransientNetworkFailure && !Failed)
- RunScripts("APT::Update::Post-Invoke-Success");
+ if (RunUpdateScripts == true)
+ {
+ if(!TransientNetworkFailure && !Failed)
+ RunScripts("APT::Update::Post-Invoke-Success");
- // Run the other scripts
- RunScripts("APT::Update::Post-Invoke");
+ // Run the other scripts
+ RunScripts("APT::Update::Post-Invoke");
+ }
return true;
}
/*}}}*/
diff --git a/apt-pkg/algorithms.h b/apt-pkg/algorithms.h
index 37eacf1f8..076542c20 100644
--- a/apt-pkg/algorithms.h
+++ b/apt-pkg/algorithms.h
@@ -147,5 +147,7 @@ bool pkgMinimizeUpgrade(pkgDepCache &Cache);
void pkgPrioSortList(pkgCache &Cache,pkgCache::Version **List);
bool ListUpdate(pkgAcquireStatus &progress, pkgSourceList &List, int PulseInterval=0);
-
+bool AcquireUpdate(pkgAcquire &Fetcher, int const PulseInterval = 0,
+ bool const RunUpdateScripts = true, bool const ListCleanup = true);
+
#endif
diff --git a/apt-pkg/aptconfiguration.cc b/apt-pkg/aptconfiguration.cc
index e1225b3e6..0fd470ed5 100644
--- a/apt-pkg/aptconfiguration.cc
+++ b/apt-pkg/aptconfiguration.cc
@@ -477,18 +477,18 @@ const Configuration::getCompressors(bool const Cached) {
setDefaultConfigurationForCompressors();
- compressors.push_back(Compressor(".", "", "", "", "", 1));
+ compressors.push_back(Compressor(".", "", "", NULL, NULL, 1));
if (_config->Exists("Dir::Bin::gzip") == false || FileExists(_config->FindFile("Dir::Bin::gzip")) == true)
compressors.push_back(Compressor("gzip",".gz","gzip","-9n","-d",2));
#ifdef HAVE_ZLIB
else
- compressors.push_back(Compressor("gzip",".gz","false", "", "", 2));
+ compressors.push_back(Compressor("gzip",".gz","false", NULL, NULL, 2));
#endif
if (_config->Exists("Dir::Bin::bzip2") == false || FileExists(_config->FindFile("Dir::Bin::bzip2")) == true)
compressors.push_back(Compressor("bzip2",".bz2","bzip2","-9","-d",3));
#ifdef HAVE_BZ2
else
- compressors.push_back(Compressor("bzip2",".bz2","false", "", "", 3));
+ compressors.push_back(Compressor("bzip2",".bz2","false", NULL, NULL, 3));
#endif
if (_config->Exists("Dir::Bin::xz") == false || FileExists(_config->FindFile("Dir::Bin::xz")) == true)
compressors.push_back(Compressor("xz",".xz","xz","-6","-d",4));
diff --git a/apt-pkg/contrib/fileutl.cc b/apt-pkg/contrib/fileutl.cc
index e9d1ba1ce..c8e685a5a 100644
--- a/apt-pkg/contrib/fileutl.cc
+++ b/apt-pkg/contrib/fileutl.cc
@@ -905,8 +905,6 @@ bool FileFd::Open(string FileName,unsigned int const Mode,CompressMode Compress,
bool FileFd::Open(string FileName,unsigned int const Mode,APT::Configuration::Compressor const &compressor, unsigned long const Perms)
{
Close();
- d = new FileFdPrivate;
- d->openmode = Mode;
Flags = AutoClose;
if ((Mode & WriteOnly) != WriteOnly && (Mode & (Atomic | Create | Empty | Exclusive)) != 0)
@@ -1000,8 +998,6 @@ bool FileFd::OpenDescriptor(int Fd, unsigned int const Mode, CompressMode Compre
bool FileFd::OpenDescriptor(int Fd, unsigned int const Mode, APT::Configuration::Compressor const &compressor, bool AutoClose)
{
Close();
- d = new FileFdPrivate;
- d->openmode = Mode;
Flags = (AutoClose) ? FileFd::AutoClose : 0;
iFd = Fd;
this->FileName = "";
@@ -1015,12 +1011,24 @@ bool FileFd::OpenDescriptor(int Fd, unsigned int const Mode, APT::Configuration:
}
bool FileFd::OpenInternDescriptor(unsigned int const Mode, APT::Configuration::Compressor const &compressor)
{
- d->compressor = compressor;
if (compressor.Name == "." || compressor.Binary.empty() == true)
return true;
+
+ if (d == NULL)
+ {
+ d = new FileFdPrivate();
+ d->openmode = Mode;
+ d->compressor = compressor;
+ }
+
#ifdef HAVE_ZLIB
- else if (compressor.Name == "gzip")
+ if (compressor.Name == "gzip")
{
+ if (d->gz != NULL)
+ {
+ gzclose(d->gz);
+ d->gz = NULL;
+ }
if ((Mode & ReadWrite) == ReadWrite)
d->gz = gzdopen(iFd, "r+");
else if ((Mode & WriteOnly) == WriteOnly)
@@ -1034,8 +1042,13 @@ bool FileFd::OpenInternDescriptor(unsigned int const Mode, APT::Configuration::C
}
#endif
#ifdef HAVE_BZ2
- else if (compressor.Name == "bzip2")
+ if (compressor.Name == "bzip2")
{
+ if (d->bz2 != NULL)
+ {
+ BZ2_bzclose(d->bz2);
+ d->bz2 = NULL;
+ }
if ((Mode & ReadWrite) == ReadWrite)
d->bz2 = BZ2_bzdopen(iFd, "r+");
else if ((Mode & WriteOnly) == WriteOnly)
@@ -1049,14 +1062,20 @@ bool FileFd::OpenInternDescriptor(unsigned int const Mode, APT::Configuration::C
}
#endif
+ // collect zombies here in case we reopen
+ if (d->compressor_pid > 0)
+ ExecWait(d->compressor_pid, "FileFdCompressor", true);
if ((Mode & ReadWrite) == ReadWrite)
+ {
+ Flags |= Fail;
return _error->Error("ReadWrite mode is not supported for file %s", FileName.c_str());
+ }
bool const Comp = (Mode & WriteOnly) == WriteOnly;
- // Handle 'decompression' of empty files
if (Comp == false)
{
+ // Handle 'decompression' of empty files
struct stat Buf;
fstat(iFd, &Buf);
if (Buf.st_size == 0 && S_ISFIFO(Buf.st_mode) == false)
@@ -1065,13 +1084,19 @@ bool FileFd::OpenInternDescriptor(unsigned int const Mode, APT::Configuration::C
// We don't need the file open - instead let the compressor open it
// as he properly knows better how to efficiently read from 'his' file
if (FileName.empty() == false)
+ {
close(iFd);
+ iFd = -1;
+ }
}
// Create a data pipe
int Pipe[2] = {-1,-1};
if (pipe(Pipe) != 0)
+ {
+ Flags |= Fail;
return _error->Errno("pipe",_("Failed to create subprocess IPC"));
+ }
for (int J = 0; J != 2; J++)
SetCloseExec(Pipe[J],true);
@@ -1133,7 +1158,7 @@ bool FileFd::OpenInternDescriptor(unsigned int const Mode, APT::Configuration::C
close(Pipe[0]);
else
close(Pipe[1]);
- if (Comp == true || FileName.empty() == true)
+ if ((Comp == true || FileName.empty() == true) && d->compressed_fd != -1)
close(d->compressed_fd);
return true;
@@ -1162,12 +1187,12 @@ bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual)
do
{
#ifdef HAVE_ZLIB
- if (d->gz != NULL)
+ if (d != NULL && d->gz != NULL)
Res = gzread(d->gz,To,Size);
else
#endif
#ifdef HAVE_BZ2
- if (d->bz2 != NULL)
+ if (d != NULL && d->bz2 != NULL)
Res = BZ2_bzread(d->bz2,To,Size);
else
#endif
@@ -1179,7 +1204,7 @@ bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual)
continue;
Flags |= Fail;
#ifdef HAVE_ZLIB
- if (d->gz != NULL)
+ if (d != NULL && d->gz != NULL)
{
int err;
char const * const errmsg = gzerror(d->gz, &err);
@@ -1188,7 +1213,7 @@ bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual)
}
#endif
#ifdef HAVE_BZ2
- if (d->bz2 != NULL)
+ if (d != NULL && d->bz2 != NULL)
{
int err;
char const * const errmsg = BZ2_bzerror(d->bz2, &err);
@@ -1201,7 +1226,8 @@ bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual)
To = (char *)To + Res;
Size -= Res;
- d->seekpos += Res;
+ if (d != NULL)
+ d->seekpos += Res;
if (Actual != 0)
*Actual += Res;
}
@@ -1229,7 +1255,7 @@ char* FileFd::ReadLine(char *To, unsigned long long const Size)
{
*To = '\0';
#ifdef HAVE_ZLIB
- if (d->gz != NULL)
+ if (d != NULL && d->gz != NULL)
return gzgets(d->gz, To, Size);
#endif
@@ -1260,12 +1286,12 @@ bool FileFd::Write(const void *From,unsigned long long Size)
do
{
#ifdef HAVE_ZLIB
- if (d->gz != NULL)
+ if (d != NULL && d->gz != NULL)
Res = gzwrite(d->gz,From,Size);
else
#endif
#ifdef HAVE_BZ2
- if (d->bz2 != NULL)
+ if (d != NULL && d->bz2 != NULL)
Res = BZ2_bzwrite(d->bz2,(void*)From,Size);
else
#endif
@@ -1276,7 +1302,7 @@ bool FileFd::Write(const void *From,unsigned long long Size)
{
Flags |= Fail;
#ifdef HAVE_ZLIB
- if (d->gz != NULL)
+ if (d != NULL && d->gz != NULL)
{
int err;
char const * const errmsg = gzerror(d->gz, &err);
@@ -1285,7 +1311,7 @@ bool FileFd::Write(const void *From,unsigned long long Size)
}
#endif
#ifdef HAVE_BZ2
- if (d->bz2 != NULL)
+ if (d != NULL && d->bz2 != NULL)
{
int err;
char const * const errmsg = BZ2_bzerror(d->bz2, &err);
@@ -1298,7 +1324,8 @@ bool FileFd::Write(const void *From,unsigned long long Size)
From = (char *)From + Res;
Size -= Res;
- d->seekpos += Res;
+ if (d != NULL)
+ d->seekpos += Res;
}
while (Res > 0 && Size > 0);
@@ -1336,11 +1363,11 @@ bool FileFd::Write(int Fd, const void *From, unsigned long long Size)
/* */
bool FileFd::Seek(unsigned long long To)
{
- if (d->pipe == true
+ if (d != NULL && (d->pipe == true
#ifdef HAVE_BZ2
- || d->bz2 != NULL
+ || d->bz2 != NULL
#endif
- )
+ ))
{
// Our poor man seeking in pipes is costly, so try to avoid it
unsigned long long seekpos = Tell();
@@ -1350,13 +1377,17 @@ bool FileFd::Seek(unsigned long long To)
return Skip(To - seekpos);
if ((d->openmode & ReadOnly) != ReadOnly)
+ {
+ Flags |= Fail;
return _error->Error("Reopen is only implemented for read-only files!");
+ }
#ifdef HAVE_BZ2
if (d->bz2 != NULL)
BZ2_bzclose(d->bz2);
#endif
- close(iFd);
- iFd = 0;
+ if (iFd != -1)
+ close(iFd);
+ iFd = -1;
if (TemporaryFileName.empty() == false)
iFd = open(TemporaryFileName.c_str(), O_RDONLY);
else if (FileName.empty() == false)
@@ -1367,11 +1398,17 @@ bool FileFd::Seek(unsigned long long To)
if (lseek(d->compressed_fd, 0, SEEK_SET) != 0)
iFd = d->compressed_fd;
if (iFd <= 0)
+ {
+ Flags |= Fail;
return _error->Error("Reopen is not implemented for pipes opened with FileFd::OpenDescriptor()!");
+ }
}
if (OpenInternDescriptor(d->openmode, d->compressor) == false)
+ {
+ Flags |= Fail;
return _error->Error("Seek on file %s because it couldn't be reopened", FileName.c_str());
+ }
if (To != 0)
return Skip(To);
@@ -1381,7 +1418,7 @@ bool FileFd::Seek(unsigned long long To)
}
int res;
#ifdef HAVE_ZLIB
- if (d->gz)
+ if (d != NULL && d->gz)
res = gzseek(d->gz,To,SEEK_SET);
else
#endif
@@ -1392,7 +1429,8 @@ bool FileFd::Seek(unsigned long long To)
return _error->Error("Unable to seek to %llu", To);
}
- d->seekpos = To;
+ if (d != NULL)
+ d->seekpos = To;
return true;
}
/*}}}*/
@@ -1401,11 +1439,11 @@ bool FileFd::Seek(unsigned long long To)
/* */
bool FileFd::Skip(unsigned long long Over)
{
- if (d->pipe == true
+ if (d != NULL && (d->pipe == true
#ifdef HAVE_BZ2
- || d->bz2 != NULL
+ || d->bz2 != NULL
#endif
- )
+ ))
{
d->seekpos += Over;
char buffer[1024];
@@ -1413,7 +1451,10 @@ bool FileFd::Skip(unsigned long long Over)
{
unsigned long long toread = std::min((unsigned long long) sizeof(buffer), Over);
if (Read(buffer, toread) == false)
+ {
+ Flags |= Fail;
return _error->Error("Unable to seek ahead %llu",Over);
+ }
Over -= toread;
}
return true;
@@ -1421,7 +1462,7 @@ bool FileFd::Skip(unsigned long long Over)
int res;
#ifdef HAVE_ZLIB
- if (d->gz != NULL)
+ if (d != NULL && d->gz != NULL)
res = gzseek(d->gz,Over,SEEK_CUR);
else
#endif
@@ -1431,7 +1472,8 @@ bool FileFd::Skip(unsigned long long Over)
Flags |= Fail;
return _error->Error("Unable to seek ahead %llu",Over);
}
- d->seekpos = res;
+ if (d != NULL)
+ d->seekpos = res;
return true;
}
@@ -1442,7 +1484,7 @@ bool FileFd::Skip(unsigned long long Over)
bool FileFd::Truncate(unsigned long long To)
{
#if defined HAVE_ZLIB || defined HAVE_BZ2
- if (d->gz != NULL || d->bz2 != NULL)
+ if (d != NULL && (d->gz != NULL || d->bz2 != NULL))
{
Flags |= Fail;
return _error->Error("Truncating compressed files is not implemented (%s)", FileName.c_str());
@@ -1466,23 +1508,27 @@ unsigned long long FileFd::Tell()
// seeking around, but not all users of FileFd use always Seek() and co
// so d->seekpos isn't always true and we can just use it as a hint if
// we have nothing else, but not always as an authority…
- if (d->pipe == true
+ if (d != NULL && (d->pipe == true
#ifdef HAVE_BZ2
- || d->bz2 != NULL
+ || d->bz2 != NULL
#endif
- )
+ ))
return d->seekpos;
off_t Res;
#ifdef HAVE_ZLIB
- if (d->gz != NULL)
+ if (d != NULL && d->gz != NULL)
Res = gztell(d->gz);
else
#endif
Res = lseek(iFd,0,SEEK_CUR);
if (Res == (off_t)-1)
+ {
+ Flags |= Fail;
_error->Errno("lseek","Failed to determine the current file position");
- d->seekpos = Res;
+ }
+ if (d != NULL)
+ d->seekpos = Res;
return Res;
}
/*}}}*/
@@ -1492,17 +1538,24 @@ unsigned long long FileFd::Tell()
unsigned long long FileFd::FileSize()
{
struct stat Buf;
- if (d->pipe == false && fstat(iFd,&Buf) != 0)
+ if ((d == NULL || d->pipe == false) && fstat(iFd,&Buf) != 0)
+ {
+ Flags |= Fail;
return _error->Errno("fstat","Unable to determine the file size");
+ }
// for compressor pipes st_size is undefined and at 'best' zero
- if (d->pipe == true || S_ISFIFO(Buf.st_mode))
+ if ((d != NULL && d->pipe == true) || S_ISFIFO(Buf.st_mode))
{
// we set it here, too, as we get the info here for free
// in theory the Open-methods should take care of it already
- d->pipe = true;
+ if (d != NULL)
+ d->pipe = true;
if (stat(FileName.c_str(), &Buf) != 0)
+ {
+ Flags |= Fail;
return _error->Errno("stat","Unable to determine the file size");
+ }
}
return Buf.st_size;
@@ -1517,11 +1570,11 @@ unsigned long long FileFd::Size()
// for compressor pipes st_size is undefined and at 'best' zero,
// so we 'read' the content and 'seek' back - see there
- if (d->pipe == true
+ if (d != NULL && (d->pipe == true
#ifdef HAVE_BZ2
- || (d->bz2 && size > 0)
+ || (d->bz2 && size > 0)
#endif
- )
+ ))
{
unsigned long long const oldSeek = Tell();
char ignore[1000];
@@ -1536,7 +1589,7 @@ unsigned long long FileFd::Size()
// only check gzsize if we are actually a gzip file, just checking for
// "gz" is not sufficient as uncompressed files could be opened with
// gzopen in "direct" mode as well
- else if (d->gz && !gzdirect(d->gz) && size > 0)
+ else if (d != NULL && d->gz && !gzdirect(d->gz) && size > 0)
{
off_t const oldPos = lseek(iFd,0,SEEK_CUR);
/* unfortunately zlib.h doesn't provide a gzsize(), so we have to do
@@ -1544,10 +1597,16 @@ unsigned long long FileFd::Size()
* bits of the file */
// FIXME: Size for gz-files is limited by 32bit… no largefile support
if (lseek(iFd, -4, SEEK_END) < 0)
- return _error->Errno("lseek","Unable to seek to end of gzipped file");
+ {
+ Flags |= Fail;
+ return _error->Errno("lseek","Unable to seek to end of gzipped file");
+ }
size = 0L;
if (read(iFd, &size, 4) != 4)
- return _error->Errno("read","Unable to read original size of gzipped file");
+ {
+ Flags |= Fail;
+ return _error->Errno("read","Unable to read original size of gzipped file");
+ }
#ifdef WORDS_BIGENDIAN
uint32_t tmp_size = size;
@@ -1557,7 +1616,10 @@ unsigned long long FileFd::Size()
#endif
if (lseek(iFd, oldPos, SEEK_SET) < 0)
- return _error->Errno("lseek","Unable to seek in gzipped file");
+ {
+ Flags |= Fail;
+ return _error->Errno("lseek","Unable to seek in gzipped file");
+ }
return size;
}
@@ -1572,20 +1634,23 @@ unsigned long long FileFd::Size()
time_t FileFd::ModificationTime()
{
struct stat Buf;
- if (d->pipe == false && fstat(iFd,&Buf) != 0)
+ if ((d == NULL || d->pipe == false) && fstat(iFd,&Buf) != 0)
{
+ Flags |= Fail;
_error->Errno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
return 0;
}
// for compressor pipes st_size is undefined and at 'best' zero
- if (d->pipe == true || S_ISFIFO(Buf.st_mode))
+ if ((d != NULL && d->pipe == true) || S_ISFIFO(Buf.st_mode))
{
// we set it here, too, as we get the info here for free
// in theory the Open-methods should take care of it already
- d->pipe = true;
+ if (d != NULL)
+ d->pipe = true;
if (stat(FileName.c_str(), &Buf) != 0)
{
+ Flags |= Fail;
_error->Errno("fstat","Unable to determine the modification time of file %s", FileName.c_str());
return 0;
}
@@ -1645,6 +1710,8 @@ bool FileFd::Close()
d = NULL;
}
+ if (Res == false)
+ Flags |= Fail;
return Res;
}
/*}}}*/
@@ -1655,7 +1722,10 @@ bool FileFd::Sync()
{
#ifdef _POSIX_SYNCHRONIZED_IO
if (fsync(iFd) != 0)
+ {
+ Flags |= Fail;
return _error->Errno("sync",_("Problem syncing the file"));
+ }
#endif
return true;
}
diff --git a/apt-pkg/deb/debindexfile.cc b/apt-pkg/deb/debindexfile.cc
index 5dc2a2ac2..76c740341 100644
--- a/apt-pkg/deb/debindexfile.cc
+++ b/apt-pkg/deb/debindexfile.cc
@@ -161,7 +161,7 @@ unsigned long debSourcesIndex::Size() const
/* we need to ignore errors here; if the lists are absent, just return 0 */
_error->PushToStack();
- FileFd f = FileFd (IndexFile("Sources"), FileFd::ReadOnly, FileFd::Extension);
+ FileFd f(IndexFile("Sources"), FileFd::ReadOnly, FileFd::Extension);
if (!f.Failed())
size = f.Size();
@@ -290,7 +290,7 @@ unsigned long debPackagesIndex::Size() const
/* we need to ignore errors here; if the lists are absent, just return 0 */
_error->PushToStack();
- FileFd f = FileFd (IndexFile("Packages"), FileFd::ReadOnly, FileFd::Extension);
+ FileFd f(IndexFile("Packages"), FileFd::ReadOnly, FileFd::Extension);
if (!f.Failed())
size = f.Size();
@@ -488,7 +488,7 @@ unsigned long debTranslationsIndex::Size() const
/* we need to ignore errors here; if the lists are absent, just return 0 */
_error->PushToStack();
- FileFd f = FileFd (IndexFile(Language), FileFd::ReadOnly, FileFd::Extension);
+ FileFd f(IndexFile(Language), FileFd::ReadOnly, FileFd::Extension);
if (!f.Failed())
size = f.Size();
diff --git a/debian/changelog b/debian/changelog
index 8eec902d0..3e36f0fa8 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,4 +1,4 @@
-apt (0.9.3) unstable; urgency=low
+apt (0.9.3) UNRELEASED; urgency=low
[ David Kalnischkies ]
* apt-pkg/contrib/strutl.cc:
@@ -34,8 +34,28 @@ apt (0.9.3) unstable; urgency=low
* apt-pkg/deb/deblistparser.cc:
- check length and containing chars for a given description md5sum
* ensure that apti18n.h is included last as advertised (Closes: #671623)
+ * apt-pkg/acquire-worker.cc:
+ - revert the use of FileFd::Write in OutFdReady as we don't want error
+ reports about EAGAIN here as we retry later. Thanks to YOSHINO Yoshihito
+ for the report. (Closes: #671721)
+ * apt-pkg/contrib/fileutl.cc:
+ - check that the fd which are closed are valid
+ - ensure that we do init d only once and especially not with its own
+ content as this causes some "interesting" hickups resulting in segfaults
+ as it seems (Closes: #554387, #670979)
+ - collect zombie (de)compressor processes on reopen
+ - ensure that in error conditions the Fail flag is set
+ - ensure that d is set before accessing it
+ * apt-pkg/aptconfiguration.cc:
+ - use NULL instead of "" for no (un)compress parameters
+ * apt-pkg/algorithms.cc:
+ - factor out of ListUpdate a AcquireUpdate to be able to provide your
+ own pkgAcquire fetcher to the wrapper
+ * apt-inst/deb/debfile.h:
+ - readd 'md5.h' to the uncleaned header includes to make qapt build
+ against us again unchanged to unblock transition (Closes: #669163)
- -- David Kalnischkies <kalnischkies@gmail.com> Sat, 05 May 2012 15:35:16 +0200
+ -- David Kalnischkies <kalnischkies@gmail.com> Mon, 07 May 2012 21:12:23 +0200
apt (0.9.2) unstable; urgency=low
diff --git a/methods/rred.cc b/methods/rred.cc
index 78d1595d4..7c65f8f92 100644
--- a/methods/rred.cc
+++ b/methods/rred.cc
@@ -15,7 +15,6 @@
#include <utime.h>
#include <stdio.h>
#include <errno.h>
-#include <zlib.h>
#include <apti18n.h>
/*}}}*/
/** \brief RredMethod - ed-style incremential patch method {{{
diff --git a/prepare-release b/prepare-release
index fd98c489f..821726ae2 100755
--- a/prepare-release
+++ b/prepare-release
@@ -3,6 +3,9 @@
VERSION=$(dpkg-parsechangelog | sed -n -e '/^Version:/s/^Version: //p')
DISTRIBUTION=$(dpkg-parsechangelog | sed -n -e '/^Distribution:/s/^Distribution: //p')
+LIBAPTPKGVERSION="$(awk -v ORS='.' '/^\#define APT_PKG_M/ {print $3}' apt-pkg/init.h | sed 's/\.$//')"
+LIBAPTINSTVERSION="$(egrep '^MAJOR=' apt-inst/makefile |cut -d '=' -f 2)"
+
if [ "$1" = 'pre-export' ]; then
libraryversioncheck() {
LIBRARY="$1"
@@ -17,8 +20,8 @@ if [ "$1" = 'pre-export' ]; then
fi
}
- libraryversioncheck 'libapt-pkg' "$(awk -v ORS='.' '/^\#define APT_PKG_M/ {print $3}' apt-pkg/init.h | sed 's/\.$//')"
- libraryversioncheck 'libapt-inst' "$(egrep '^MAJOR=' apt-inst/makefile |cut -d '=' -f 2)"
+ libraryversioncheck 'libapt-pkg' "$LIBAPTPKGVERSION"
+ libraryversioncheck 'libapt-inst' "$LIBAPTINSTVERSION"
if [ "$DISTRIBUTION" = 'sid' ]; then
@@ -43,12 +46,32 @@ elif [ "$1" = 'post-build' ]; then
else
echo >&2 'REMEMBER: Change to a valid distribution before release'
fi
+elif [ "$1" = 'library' ]; then
+ librarysymbols() {
+ echo "Checking $1 in version $2"
+ tmpfile=$(mktemp)
+ dpkg-gensymbols -p${1}${2} -ebuild/bin/${1}.so.${2} -Idebian/${1}${2}.symbols -O/dev/null 2> /dev/null > $tmpfile
+ echo '=== Missing symbols:'
+ grep '^+#MISSING' $tmpfile
+ echo '=== New symbols:'
+ grep '^+ ' $tmpfile | cut -d' ' -f 2 | cut -d'@' -f 1 | c++filt | while read line; do
+ echo " (c++)\"${line}@Base\" $VERSION"
+ done | sort -u
+ rm $tmpfile
+ }
+ librarysymbols 'libapt-pkg' "${LIBAPTPKGVERSION}"
+ echo
+ librarysymbols 'libapt-inst' "${LIBAPTINSTVERSION}"
else
echo >&1 "Usage:\t$0 pre-export
\t$0 post-build
+\t$0 library
If you use »bzr builddeb« you can leave this script alone as it will
be run at the right places auto-magically. Otherwise you should use
»pre-export« to update po and pot files as well as version numbering.
-»post-build« can be used to run some more or less useful checks later on."
+»post-build« can be used to run some more or less useful checks later on.
+
+»library« isn't run automatically but can be useful for maintaining the
+(more or less experimental) symbols files we provide"
fi
diff --git a/test/integration/framework b/test/integration/framework
index b80b02922..5a0e1070f 100644
--- a/test/integration/framework
+++ b/test/integration/framework
@@ -107,6 +107,10 @@ aptitude() {
LD_LIBRARY_PATH=${BUILDDIRECTORY} $(which aptitude) $*
fi
}
+gdb() {
+ echo "gdb: run »$*«"
+ APT_CONFIG=aptconfig.conf LD_LIBRARY_PATH=${BUILDDIRECTORY} $(which gdb) ${BUILDDIRECTORY}/$1
+}
addtrap() {
CURRENTTRAP="$CURRENTTRAP $1"