summaryrefslogtreecommitdiff
path: root/methods
diff options
context:
space:
mode:
authorMichael Vogt <mvo@debian.org>2010-07-30 12:45:00 +0200
committerMichael Vogt <mvo@debian.org>2010-07-30 12:45:00 +0200
commitb5cabd30cc1be1c6c1ca675ceb359bab577c1905 (patch)
tree2872bb0149a8123c9abb0c80462f9bb151d11d91 /methods
parent3000cb123abe00418ccc2e388b6f93b76fe5514d (diff)
parente3326595301fc7bd1ee025a9dbb09ca51a08f5fa (diff)
* merge of the debian-expermental-ma branch
* debian/control: - add dependency on zlib-dev for libapt-pkg-dev * apt-pkg/cacheset.cc: - [ABI BREAK] add an ErrorType option to CacheSetHelper * cmdline/apt-cache.cc: - use Notice instead of Error in the CacheSetHelper messages for compat reasons. Otherwise tools like sbuild blow up - return success in show if a virtual package was given * debian/control: - remove libcurl3-gnutls-dev alternative as the package is gone - increase needed version of libcurl4-gnutls-dev to >= 7.19.0 as we use CURLOPT_{ISSUERCERT,CRLFILE} (Closes: #589642) * apt-pkg/deb/dpkgpm.cc: - Write architecture information to history file. - Add to history whether a change was automatic or not. * apt-pkg/contrib/fileutl.cc: - Add FileFd::OpenDescriptor() (needed for python-apt's #383617). * cmdline/apt-get.cc: - Support large filesystems by using statvfs64() instead of statvfs() and statfs64() instead of statfs() (Closes: #590513). * apt-pkg/cdrom.cc: - Use link() instead of rename() for creating the CD database backup; otherwise there would be a short time without any database. * apt-pkg/depcache.cc: - handle "circular" conflicts for "all" packages correctly * cmdline/apt-cache.cc: - be able to omit dependency types in (r)depends (Closes: #319006) - show in (r)depends the canidate per default instead of newest - share the (r)depends code instead of codecopy * apt-pkg/cacheset.cc: - move them back to the library as they look stable now - add a 'newest' pseudo target release as in pkg/newest * apt-pkg/pkgcache.cc: - prefer non-virtual packages in FindPreferredPkg (Closes: #590041) * test/integration/*: - add with bug#590041 testcase a small test "framework" * apt-pkg/orderlist.cc: - try to install another or-group member in DepRemove before breaking the or group (Closes: #590438) - configure also the replacement before remove by adding Immediate flag * apt-pkg/contrib/error.{cc,h} - docstring cleanup - add inline DumpError() to avoid subtle API break * apt-pkg/contrib/error.{cc,h}: - remove constness of va_list parameter to fix build on amd64 and co Thanks Eric Valette! (Closes: #588610) * apt-pkg/deb/debmetaindex.cc: - do not query each architecture for flat file archives - fix typo preventing display of architecture in Info() * methods/bzip2.cc: - add a copycat of the old gzip.cc as we need it for bzip2 and lzma * debian/rules: - Make DEB_BUILD_OPTIONS=noopt actually work by passing the right CXXFLAGS. * apt-pkg/contrib/fileutl.{h,cc}: - Add support for reading of gzipped files with the new "ReadOnlyGzip" OpenMode. (Closes: #188407) - Link against zlib (in apt-pkg/makefile) and add zlib build dependency. - [ABI BREAK] This adds a new private member to FileFd, but its initialization is in the public header file. * configure.in: - Check for zlib library and headers. * apt-pkg/acquire-item.cc, apt-pkg/deb/debindexfile.cc, apt-pkg/deb/debrecords.cc, apt-pkg/deb/debsrcrecords.h, cmdline/apt-cache.cc: - Open Packages, Sources, and Translations indexes in "ReadOnlyGzip" mode. * apt-pkg/deb/debindexfile.cc: - If we do not find uncompressed package/source/translation indexes, look for gzip compressed ones. * apt-pkg/acquire-item.cc: - If the Acquire::GzipIndexes option is true and we download a gzipped index file, keep it as it is (and rename to .gz) instead of uncompressing it. * doc/apt.conf.5.xml: - Document the new Acquire::GzipIndexes option. * doc/po/apt-doc.pot, doc/po/de.po: - German translation of new Acquire::GzipIndexes option. * Add test/test-indexes.sh: - Test behaviour of index retrieval and usage, in particular with uncompressed and gzip compressed indexes. * methods/gzip.cc: With FileFd now being able to read gzipped files, there is no need for the gzip method any more to spawn an external gzip process. Rewrite it to use FileFd directly, which makes the code a lot simpler, and also using less memory and overhead. * doc/apt.conf.5.xml: - add and document APT::Cache-{Start,Grow,Limit} options for mmap control * apt-pkg/contrib/fileutl.cc: - do not fail von double close() * cmdline/cacheset.cc: - doesn't include it in the library for now as it is too volatile - get the candidate either from an already built depcache or use the policy which is a bit faster than depcache generation - get packages by task^ with FromTask() - only print errors if all tries to get a package by string failed - factor out code to get a single package FromName() - check in Grouped* first without modifier interpretation * cmdline/apt-get.cc: - use the cachsets in the install commands - make the specify order of packages irrelevant (Closes: #196021) * apt-pkg/orderlist.cc: - untouched packages are never missing * apt-pkg/packagemanager.cc: - packages that are not touched doesn't need to be unpacked * debian/control: - remove intltool's dependency as it is an ubuntu artefact * apt-pkg/depcache.cc: - SetCandidateVer for all pseudo packages - SetReInstall for the "all" package of a pseudo package - use the new MatchAgainstConfig for the DefaultRootSetFunc - always mark the all package if a pseudo package is marked for install * apt-pkg/contrib/error.{cc,h}: - complete rewrite but use the same API - add NOTICE and DEBUG as new types of a message - add a simple stack handling to be able to delay error handling * apt-pkg/aptconfiguration.cc: - show a deprecation notice for APT::Acquire::Translation * apt-pkg/contrib/configuration.{cc,h}: - add a wrapper to match strings against configurable regex patterns * apt-pkg/contrib/fileutl.cc: - show notice about ignored file instead of being always silent - add a Dir::Ignore-Files-Silently list option to control the notice * apt-pkg/policy.h: - add another round of const& madness as the previous round accidentally NOT overrides the virtual GetCandidateVer() method (Closes: #587725) * apt-pkg/pkgcachegen.{cc,h}: - make the used MMap moveable (and therefore dynamic resizeable) by applying (some) mad pointer magic (Closes: #195018) * apt-pkg/deb/dpkgpm.cc: - make the apt/term.log output unbuffered (thanks to Matt Zimmerman) * methods/ftp.h: - Handle different logins on the same server (Closes: #586904). * apt-pkg/deb/deblistparser.cc: - Handle architecture wildcards (Closes: #547724). * apt-pkg/versionmatch.cc: - Support matching pins by regular expressions or glob() like patterns, regular expressions have to be put between to slashes; for example, /.*/. * apt-pkg/contrib/fileutl.cc: - Make FileFd replace files atomically in WriteTemp mode (for cache, etc). * debian/control: - Set Standards-Version to 3.9.0 * apt-pkg/cachefile.h: - make pkgPolicy public again, libapt-pkg-perl (and probably others) get unhappy without that * merge the remaining Ubuntu change: - on gpg verification failure warn and restore the last known good state - on failure display the IP of the server (useful for servers that use round robin DNS) - support Original-Maintainer in RewritePackageOrder - enable cdrom autodetection via libudev by default - show message about Vcs in use when apt-get source is run for packages maintained in a Vcs - better support transitional packages with mark auto-installed. when the transitional package is in "oldlibs" the new package is not marked auto installed (same is true for section metapackages) - provide new "deb mirror://archive.foo/mirrors.list sid main" method expects a list of mirrors (generated on the server e.g. via geoip) and will use that, including cycle on failure - write apport crash file on package failure (disabled by default on debian until apport is available) - support mirror failure reporting (disabled by default on debian) * apt-pkg/deb/dpkgpm.cc: - write Disappeared also to the history.log - forward manual-installed bit on package disappearance * apt-pkg/deb/debsystem.cc: - add better config item for extended_states file * apt-pkg/pkgcache.h: - switch {,Install-}Size to unsigned long long * apt-pkg/depcache.cc: - do the autoremove mark process also for required packages to handle these illegally depending on lower priority packages (Closes: #583517) - try harder to find the other pseudo versions for autoremove multiarch - correct "Dangerous iterator usage" pointed out by cppcheck - deal with long long, not with int to remove 2GB Limit (LP: #250909) - deprecate AddSize with Multiplier as it is unused and switch to boolean instead to handle the sizes more gracefully. - switch i{Download,Usr}Size from double to (un)signed long long * apt-pkg/aptconfiguration.cc: - remove duplicate architectures in getArchitectures() * apt-pkg/indexrecords.{cc,h}: - backport forgotten Valid-Until patch from the obsolete experimental branch to prevent replay attacks better, thanks to Thomas Viehmann for the initial patch! (Closes: #499897) - add a constant Exists check for MetaKeys * apt-pkg/acquire-item.cc: - do not try PDiff if it is not listed in the Meta file - sent Last-Modified header also for Translation files * apt-pkg/cacheiterator.h: - let pkgCache::Iterator inherent std::iterator * ftparchive/writer.h: - add a virtual destructor to FTWScanner class (for cppcheck) * apt-pkg/cacheset.{cc,h}: - add simple wrapper around std::set for cache structures - move regex magic from apt-get to new FromRegEx method - move cmdline parsing from apt-cache to new FromCommandLine method - support special release-modifier 'installed' and 'candidate' * apt-pkg/contrib/cmdline.cc: - fix segfault in SaveInConfig caused by writing over char[] sizes * apt-pkg/pkgcache.cc: - get the best matching arch package from a group with FindPreferredPkg * cmdline/apt-cache.cc: - make the search multiarch compatible by using GrpIterator instead - use pkgCacheFile and the new CacheSets all over the place - add --target-release option (Closes: #115520) - accept pkg/release and pkg=version in show and co. (Closes: #236270) - accept package versions in the unmet command * cmdline/apt-get.cc: - use unsigned long long instead of double to store values it gets * apt-pkg/cachefile.{cc,h}: - split Open() into submethods to be able to build only parts - make the OpProgress optional in the Cache buildprocess - store also the SourceList we use internally for export * doc/apt.conf.5.xml: - document the new Valid-Until related options * apt-pkg/contrib/strutl.cc: - split StrToTime() into HTTP1.1 and FTP date parser methods and use strptime() instead of some self-made scanf mangling - use the portable timegm shown in his manpage instead of a strange looking code copycat from wget * ftparchive/writer.cc: - add ValidTime option to generate a Valid-Until header in Release file * apt-pkg/policy.cc: - get the candidate right for a not-installed pseudo package if his non-pseudo friend is installed * apt-pkg/indexcopy.cc: - move the gpg codecopy to a new method and use it also in methods/gpgv.cc * cmdline/apt-get.cc: - rerun dpkg-source in source if --fix-broken is given (Closes: #576752) - don't suggest held packages as they are installed (Closes: #578135) - handle multiple --{tar,diff,dsc}-only options correctly - show at the end of the install process a list of disappeared packages * cmdline/apt-cache.cc: - use GroupCount for package names in stats and add a package struct line * methods/rred.cc: - use the patchfile modification time instead of the one from the "old" file - thanks to Philipp Weis for noticing! (Closes: #571541) * debian/rules: - remove targets referring to CVS or arch as they are useless - use $(CURDIR) instead of $(pwd) - use dpkg-buildflags if available for CXXFLAGS * README.arch: - remove the file completely as it has no use nowadays * apt-pkg/depcache.cc: - be doublesure that the killer query is empty before starting reinstall * methods/gpgv.cc: - remove the keyrings count limit by using vector magic * contrib/mmap.cc: - clarify "MMap reached size limit" error message, thanks Ivan Masár! * doc/apt.ent - add entities for the current oldstable/stable/testing codenames * doc/sources.list.5.xml: - use stable-codename instead of stable in the examples (Closes: #531492) * doc/apt_preferences.5.xml: - adapt some examples here to use current codenames as well - add "NotAutomatic: yes" handling, thanks Osamu Aoki (Closes: #490347) * debian/libapt-pkg-doc.doc-base.cache: - remove yet another reference to the removed cache.sgml * doc/apt-get.8.xml: - do not say explicit target_release_{name,version,codename}, it should be clear by itself and 'man' can break lines again (Closes: #566166) - remove the gnome-apt reference as it is removed from unstable * apt-pkg/deb/dpkgpm.cc: - add 'disappear' to the known processing states, thanks Jonathan Nieder * apt-pkg/packagemanager.h: - export info about disappeared packages with GetDisappearedPackages() * methods/http.{cc,h}: - code cleanup, use enums instead of magic ints * debian/rules: - spell out some less known options to reduce manpage consultation-rate - Use POSIX command substitution: $(<command sequence>) - Remove EOL whitespace (Closes: #577804) * apt-pkg/acquire-item.cc: - Fix pkgAcqFile::Custom600Headers() to always return something. * apt-pkg/depcache.cc: - rewrite the pseudo package reinstaller to be more intelligent in his package choices * apt-pkg/packagemanager.cc: - don't try to "unpack" pseudo packages twice * apt-pkg/contrib/fileutl.cc: - add a parent-guarded "mkdir -p" as CreateDirectory() * apt-pkg/acquire.{cc,h}: - add a delayed constructor with Setup() for success reporting - check for and create directories in Setup if needed instead of error out unfriendly in the Constructor (Closes: #523920, #525783) - optional handle a lock file in Setup() * apt-pkg/acquire-item.cc: - Acquire::ForceHash to force method for expected hash * cmdline/apt-get.cc: - remove the lock file handling and let Acquire take care of it instead - display MD5Sum in --print-uris if not forced to use another method instead of displaying the strongest available (Closes: #576420) - regex for package names executed on Grp- not PkgIterator - show non-candidates as fallback for virtual packages (Closes: #578385) - set also "all" to this version for pseudo packages in TryToChangeVer * apt-pkg/deb/dpkgpm.cc: - remove Chroot-Directory from files passed to install commands. Thanks to Kel Modderman for report & patch! (Closes: #577226) * ftparchive/writer.cc: - remove 999 chars Files and Checksums rewrite limit (Closes: #577759) * cmdline/apt-cache.cc: - align Installed and Candidate Version in policy so they can be compared easier, thanks Ralf Gesellensetter for the pointer! (Closes: #578657) * doc/apt.ent: - Add a note about APT_CONFIG in the -c description (Closes: #578267) * doc/po/de.po: - correct typos in german apt_preferences manpage, thanks Chris Leick! * apt-pkg/sourcelist.cc: - be less strict and accept [option=value] as well * apt-pkg/contrib/configuration.cc: - error out if #clear directive has no argument * doc/files.sgml: - sync documentation with status quo, regarding files/directories in use, extended_states and uri schemes. * doc/cache.sgml: - drop the file in favor of inplace documentation with doxygen * apt-pkg/pkgcache.h: - enhance the Groups ABI by providing a ID as the other structs does - check also the size of the Group struct then checking for the others * cmdline/apt-get.cc: - replace backticks with single quotes around fix-broken command in the broken packages message. (Closes: #577168) * dselect/install: - modernize if-statements not to use 'x' (Closes: #577117) - replace backticks with POSIX $() (Closes: #577116) * cmdline/apt-get.cc: - fix crash when pkg.VersionList() is empty - install html doxygen in libapt-pkg-doc * debian/control: - build-depend on doxygen * apt-pkg/contrib/weakptr.h: - add a class WeakPointable which allows one to register weak pointers to an object which will be set to NULL when the object is deallocated. * [ABI break] apt-pkg/acquire{-worker,-item,}.h: - subclass pkgAcquire::{Worker,Item,ItemDesc} from WeakPointable. * apt-pkg/pkgcache.cc: - Merge fix from David to correct handling in single-arch environments. * cmdline/apt-cache.cc: - Add a showauto command to apt-cache. * cmdline/apt-get.cc: - Add apt-get markauto and unmarkauto commands. * [BREAK] merge MultiArch-ABI. We don't support MultiArch, but we support the usage of the new ABI so libapt users can start to prepare for MultiArch (Closes: #536029) * Ignore :qualifiers after package name in build dependencies in the library by default, but try to honour them in apt-get as we have some sort of MultiArch support ready (Closes: #558103) * add translation of the manpages to PT (portuguese) Thanks to Américo Monteiro! * Switch to dpkg-source 3.0 (native) format * apt-pkg/depcache.cc: - remove Auto-Installed information from extended_states together with the package itself (Closes: #572364) * cmdline/apt-mark: - don't crash if no arguments are given (Closes: #570962) * debian/control: - remove some years old and obsolete Replaces - add automake/conf build-depends/conflicts as recommend by the autotools-dev README (Closes: #572615) * apt-pkg/contrib/mmap.{h,cc}: - add char[] fallback for filesystems without shared writable mmap() like JFFS2. Thanks to Marius Vollmer for writing and to Loïc Minier for pointing to the patch! (Closes: #314334) * doc/apt_preferences.5.xml: - fix two typos and be more verbose in the novice warning. Thanks to Osamu Aoki for pointing it out! (Closes: #567669) - fix a=sid vs. n=sid typo, thanks Ansgar Burchardt! - origin can be used to match a hostname (Closes: #352667) - remove wrong pin-priority is optional remark (Closes: #574944) * apt-pkg/deb/dpkgpm.cc: - fix error message construction in OpenLog() - if available store the Commandline in the history * cmdline/apt-get.cc: - add a --only-upgrade flag to install command (Closes: #572259) - fix memory leaks in error conditions in DoSource() - try version match in FindSrc first exact than fuzzy (LP: #551178) * apt-pkg/contrib/cmndline.cc: - save Commandline in Commandline::AsString for logging * apt-pkg/deb/debversion.cc: - consider absent of debian revision equivalent to 0 (Closes: #573592) * doc/makefile, doc/*: - generate subdirectories for building the manpages in on the fly depending on the po files we have. * apt-pkg/pkgcachegen.cc: - merge versions correctly even if multiple different versions with the same version number are available. Thanks to Magnus Holmgren for the patch! (Closes: #351056) * ftparchive/writer.cc: - write LongDescriptions if they shouldn't be included in Packages file into i18n/Translation-en by default. * doc/po/de.po: - correct a few typos in the german manpage translation. Thanks to Chris Leick and Georg Koppen! (Closes: #574962) * apt-pkg/contrib/strutl.cc: - convert all toupper calls to tolower_ascii for a little speedup * apt-pkg/contrib/strutl.cc: - always escape '%' (LP: #130289) (Closes: #500560) - unescape '%' sequence only if followed by 2 hex digit - username/password are urlencoded in proxy string (RFC 3986)
Diffstat (limited to 'methods')
-rw-r--r--methods/bzip2.cc177
-rw-r--r--methods/connect.cc23
-rw-r--r--methods/copy.cc1
-rw-r--r--methods/ftp.cc3
-rw-r--r--methods/ftp.h2
-rw-r--r--methods/gpgv.cc100
-rw-r--r--methods/gzip.cc63
-rw-r--r--methods/http.cc69
-rw-r--r--methods/http.h43
-rw-r--r--methods/http_main.cc20
-rw-r--r--methods/makefile30
-rw-r--r--methods/mirror.cc372
-rw-r--r--methods/mirror.h55
-rw-r--r--methods/rred.cc19
-rw-r--r--methods/rsh.cc3
15 files changed, 756 insertions, 224 deletions
diff --git a/methods/bzip2.cc b/methods/bzip2.cc
new file mode 100644
index 000000000..5da214bfc
--- /dev/null
+++ b/methods/bzip2.cc
@@ -0,0 +1,177 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+/* ######################################################################
+
+ Bzip2 method - Take a file URI in and decompress it into the target
+ file.
+
+ While the method is named "bzip2" it handles also other compression
+ types as it calls binaries based on the name of the method,
+ so it can also be used to handle gzip, lzma and others if named
+ correctly.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/acquire-method.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/hashes.h>
+
+#include <sys/stat.h>
+#include <unistd.h>
+#include <utime.h>
+#include <stdio.h>
+#include <errno.h>
+#include <apti18n.h>
+ /*}}}*/
+
+const char *Prog;
+
+class Bzip2Method : public pkgAcqMethod
+{
+ virtual bool Fetch(FetchItem *Itm);
+
+ public:
+
+ Bzip2Method() : pkgAcqMethod("1.1",SingleInstance | SendConfig) {};
+};
+
+
+// Bzip2Method::Fetch - Decompress the passed URI /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool Bzip2Method::Fetch(FetchItem *Itm)
+{
+ URI Get = Itm->Uri;
+ string Path = Get.Host + Get.Path; // To account for relative paths
+
+ string GzPathOption = "Dir::bin::"+string(Prog);
+
+ FetchResult Res;
+ Res.Filename = Itm->DestFile;
+ URIStart(Res);
+
+ // Open the source and destination files
+ FileFd From(Path,FileFd::ReadOnly);
+
+ // if the file is empty, just rename it and return
+ if(From.Size() == 0)
+ {
+ rename(Path.c_str(), Itm->DestFile.c_str());
+ return true;
+ }
+
+ int GzOut[2];
+ if (pipe(GzOut) < 0)
+ return _error->Errno("pipe",_("Couldn't open pipe for %s"),Prog);
+
+ // Fork bzip2
+ pid_t Process = ExecFork();
+ if (Process == 0)
+ {
+ close(GzOut[0]);
+ dup2(From.Fd(),STDIN_FILENO);
+ dup2(GzOut[1],STDOUT_FILENO);
+ From.Close();
+ close(GzOut[1]);
+ SetCloseExec(STDIN_FILENO,false);
+ SetCloseExec(STDOUT_FILENO,false);
+
+ const char *Args[3];
+ string Tmp = _config->Find(GzPathOption,Prog);
+ Args[0] = Tmp.c_str();
+ Args[1] = "-d";
+ Args[2] = 0;
+ execvp(Args[0],(char **)Args);
+ _exit(100);
+ }
+ From.Close();
+ close(GzOut[1]);
+
+ FileFd FromGz(GzOut[0]); // For autoclose
+ FileFd To(Itm->DestFile,FileFd::WriteEmpty);
+ To.EraseOnFailure();
+ if (_error->PendingError() == true)
+ return false;
+
+ // Read data from bzip2, generate checksums and write
+ Hashes Hash;
+ bool Failed = false;
+ while (1)
+ {
+ unsigned char Buffer[4*1024];
+ unsigned long Count;
+
+ Count = read(GzOut[0],Buffer,sizeof(Buffer));
+ if (Count < 0 && errno == EINTR)
+ continue;
+
+ if (Count < 0)
+ {
+ _error->Errno("read", _("Read error from %s process"),Prog);
+ Failed = true;
+ break;
+ }
+
+ if (Count == 0)
+ break;
+
+ Hash.Add(Buffer,Count);
+ if (To.Write(Buffer,Count) == false)
+ {
+ Failed = true;
+ FromGz.Close();
+ break;
+ }
+ }
+
+ // Wait for bzip2 to finish
+ if (ExecWait(Process,_config->Find(GzPathOption,Prog).c_str(),false) == false)
+ {
+ To.OpFail();
+ return false;
+ }
+
+ To.Close();
+
+ if (Failed == true)
+ return false;
+
+ // Transfer the modification times
+ struct stat Buf;
+ if (stat(Path.c_str(),&Buf) != 0)
+ return _error->Errno("stat",_("Failed to stat"));
+
+ struct utimbuf TimeBuf;
+ TimeBuf.actime = Buf.st_atime;
+ TimeBuf.modtime = Buf.st_mtime;
+ if (utime(Itm->DestFile.c_str(),&TimeBuf) != 0)
+ return _error->Errno("utime",_("Failed to set modification time"));
+
+ if (stat(Itm->DestFile.c_str(),&Buf) != 0)
+ return _error->Errno("stat",_("Failed to stat"));
+
+ // Return a Done response
+ Res.LastModified = Buf.st_mtime;
+ Res.Size = Buf.st_size;
+ Res.TakeHashes(Hash);
+
+ URIDone(Res);
+
+ return true;
+}
+ /*}}}*/
+
+int main(int argc, char *argv[])
+{
+ setlocale(LC_ALL, "");
+
+ Bzip2Method Mth;
+
+ Prog = strrchr(argv[0],'/');
+ Prog++;
+
+ return Mth.Run();
+}
diff --git a/methods/connect.cc b/methods/connect.cc
index 2f6b4833e..a5af1f1a6 100644
--- a/methods/connect.cc
+++ b/methods/connect.cc
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
+#include <sstream>
#include<set>
#include<string>
@@ -70,19 +71,17 @@ static bool DoConnect(struct addrinfo *Addr,string Host,
Owner->Status(_("Connecting to %s (%s)"),Host.c_str(),Name);
// if that addr did timeout before, we do not try it again
- if(bad_addr.find(string(Name)) != bad_addr.end())
+ if(bad_addr.find(string(Name)) != bad_addr.end())
return false;
/* If this is an IP rotation store the IP we are using.. If something goes
wrong this will get tacked onto the end of the error message */
if (LastHostAddr->ai_next != 0)
{
- char Name2[NI_MAXHOST + NI_MAXSERV + 10];
- snprintf(Name2,sizeof(Name2),_("[IP: %s %s]"),Name,Service);
- Owner->SetFailExtraMsg(string(Name2));
- }
- else
- Owner->SetFailExtraMsg("");
+ std::stringstream ss;
+ ioprintf(ss, _("[IP: %s %s]"),Name,Service);
+ Owner->SetIP(ss.str());
+ }
// Get a socket
if ((Fd = socket(Addr->ai_family,Addr->ai_socktype,
@@ -100,7 +99,7 @@ static bool DoConnect(struct addrinfo *Addr,string Host,
nonblocking */
if (WaitFd(Fd,true,TimeOut) == false) {
bad_addr.insert(bad_addr.begin(), string(Name));
- Owner->SetFailExtraMsg("\nFailReason: Timeout");
+ Owner->SetFailReason("Timeout");
return _error->Error(_("Could not connect to %s:%s (%s), "
"connection timed out"),Host.c_str(),Service,Name);
}
@@ -115,9 +114,9 @@ static bool DoConnect(struct addrinfo *Addr,string Host,
{
errno = Err;
if(errno == ECONNREFUSED)
- Owner->SetFailExtraMsg("\nFailReason: ConnectionRefused");
+ Owner->SetFailReason("ConnectionRefused");
else if (errno == ETIMEDOUT)
- Owner->SetFailExtraMsg("\nFailReason: ConnectionTimedOut");
+ Owner->SetFailReason("ConnectionTimedOut");
bad_addr.insert(bad_addr.begin(), string(Name));
return _error->Errno("connect",_("Could not connect to %s:%s (%s)."),Host.c_str(),
Service,Name);
@@ -184,13 +183,13 @@ bool Connect(string Host,int Port,const char *Service,int DefPort,int &Fd,
continue;
}
bad_addr.insert(bad_addr.begin(), Host);
- Owner->SetFailExtraMsg("\nFailReason: ResolveFailure");
+ Owner->SetFailReason("ResolveFailure");
return _error->Error(_("Could not resolve '%s'"),Host.c_str());
}
if (Res == EAI_AGAIN)
{
- Owner->SetFailExtraMsg("\nFailReason: TmpResolveFailure");
+ Owner->SetFailReason("TmpResolveFailure");
return _error->Error(_("Temporary failure resolving '%s'"),
Host.c_str());
}
diff --git a/methods/copy.cc b/methods/copy.cc
index 72896b4c0..027b59f46 100644
--- a/methods/copy.cc
+++ b/methods/copy.cc
@@ -84,6 +84,7 @@ bool CopyMethod::Fetch(FetchItem *Itm)
FileFd Fd(Res.Filename, FileFd::ReadOnly);
Hash.AddFD(Fd.Fd(), Fd.Size());
Res.TakeHashes(Hash);
+
URIDone(Res);
return true;
}
diff --git a/methods/ftp.cc b/methods/ftp.cc
index 3e1725823..97248f900 100644
--- a/methods/ftp.cc
+++ b/methods/ftp.cc
@@ -661,8 +661,7 @@ bool FTPConn::ModTime(const char *Path, time_t &Time)
return true;
// Parse it
- StrToTime(Msg,Time);
- return true;
+ return FTPMDTMStrToTime(Msg.c_str(), Time);
}
/*}}}*/
// FTPConn::CreateDataFd - Get a data connection /*{{{*/
diff --git a/methods/ftp.h b/methods/ftp.h
index 1bcea41b6..d7f1f7fbe 100644
--- a/methods/ftp.h
+++ b/methods/ftp.h
@@ -40,7 +40,7 @@ class FTPConn
public:
- bool Comp(URI Other) {return Other.Host == ServerName.Host && Other.Port == ServerName.Port;};
+ bool Comp(URI Other) {return Other.Host == ServerName.Host && Other.Port == ServerName.Port && Other.User == ServerName.User && Other.Password == ServerName.Password; };
// Raw connection IO
bool ReadResp(unsigned int &Ret,string &Text);
diff --git a/methods/gpgv.cc b/methods/gpgv.cc
index c58e6cc45..018e4f622 100644
--- a/methods/gpgv.cc
+++ b/methods/gpgv.cc
@@ -2,6 +2,7 @@
#include <apt-pkg/acquire-method.h>
#include <apt-pkg/strutl.h>
#include <apt-pkg/fileutl.h>
+#include <apt-pkg/indexcopy.h>
#include <apti18n.h>
#include <utime.h>
@@ -12,6 +13,8 @@
#include <iostream>
#include <sstream>
+#include <vector>
+
#define GNUPGPREFIX "[GNUPG:]"
#define GNUPGBADSIG "[GNUPG:] BADSIG"
#define GNUPGNOPUBKEY "[GNUPG:] NO_PUBKEY"
@@ -52,107 +55,29 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
if (Debug == true)
std::clog << "inside VerifyGetSigners" << std::endl;
- pid_t pid;
int fd[2];
- FILE *pipein;
- int status;
- string const gpgvpath = _config->Find("Dir::Bin::gpg", "/usr/bin/gpgv");
- // FIXME: remove support for deprecated APT::GPGV setting
- string const trustedFile = _config->FindFile("Dir::Etc::Trusted",
- _config->Find("APT::GPGV::TrustedKeyring", "/etc/apt/trusted.gpg").c_str());
- string const trustedPath = _config->FindDir("Dir::Etc::TrustedParts", "/etc/apt/trusted.gpg.d");
- if (Debug == true)
- {
- std::clog << "gpgv path: " << gpgvpath << std::endl;
- std::clog << "Keyring file: " << trustedFile << std::endl;
- std::clog << "Keyring path: " << trustedPath << std::endl;
- }
-
- vector<string> keyrings = GetListOfFilesInDir(trustedPath, "gpg", false);
- if (FileExists(trustedFile) == true)
- keyrings.push_back(trustedFile);
-
- if (keyrings.empty() == true)
- {
- // TRANSLATOR: %s is the trusted keyring parts directory
- ioprintf(ret, _("No keyring installed in %s."), trustedPath.c_str());
- return ret.str();
- }
if (pipe(fd) < 0)
return "Couldn't create pipe";
- pid = fork();
+ pid_t pid = fork();
if (pid < 0)
return string("Couldn't spawn new process") + strerror(errno);
else if (pid == 0)
{
- const char *Args[400];
- unsigned int i = 0;
-
- Args[i++] = gpgvpath.c_str();
- Args[i++] = "--status-fd";
- Args[i++] = "3";
- Args[i++] = "--ignore-time-conflict";
- for (vector<string>::const_iterator K = keyrings.begin();
- K != keyrings.end(); ++K)
- {
- Args[i++] = "--keyring";
- Args[i++] = K->c_str();
- // check overflow (minus a bit of extra space at the end)
- if(i >= sizeof(Args)/sizeof(char*)-5) {
- std::clog << _("E: Too many keyrings should be passed to gpgv. Exiting.") << std::endl;
- exit(111);
- }
- }
-
- Configuration::Item const *Opts;
- Opts = _config->Tree("Acquire::gpgv::Options");
- if (Opts != 0)
- {
- Opts = Opts->Child;
- for (; Opts != 0; Opts = Opts->Next)
- {
- if (Opts->Value.empty() == true)
- continue;
- Args[i++] = Opts->Value.c_str();
- // check overflow (minus a bit of extra space at the end)
- if(i >= sizeof(Args)/sizeof(char*)-5) {
- std::clog << _("E: Argument list from Acquire::gpgv::Options too long. Exiting.") << std::endl;
- exit(111);
- }
- }
- }
- Args[i++] = file;
- Args[i++] = outfile;
- Args[i++] = NULL;
-
- if (Debug == true)
+ if (SigVerify::RunGPGV(outfile, file, 3, fd) == false)
{
- std::clog << "Preparing to exec: " << gpgvpath;
- for(unsigned int j=0;Args[j] != NULL; j++)
- std::clog << " " << Args[j];
- std::clog << std::endl;
+ // TRANSLATOR: %s is the trusted keyring parts directory
+ ioprintf(ret, _("No keyring installed in %s."),
+ _config->FindDir("Dir::Etc::TrustedParts", "/etc/apt/trusted.gpg.d").c_str());
+ return ret.str();
}
- int const nullfd = open("/dev/null", O_RDONLY);
- close(fd[0]);
- // Redirect output to /dev/null; we read from the status fd
- dup2(nullfd, STDOUT_FILENO);
- dup2(nullfd, STDERR_FILENO);
- // Redirect the pipe to the status fd (3)
- dup2(fd[1], 3);
-
- putenv((char *)"LANG=");
- putenv((char *)"LC_ALL=");
- putenv((char *)"LC_MESSAGES=");
- execvp(gpgvpath.c_str(), (char **)Args);
-
exit(111);
}
close(fd[1]);
- pipein = fdopen(fd[0], "r");
-
+ FILE *pipein = fdopen(fd[0], "r");
+
// Loop over the output of gpgv, and check the signatures.
size_t buffersize = 64;
char *buffer = (char *) malloc(buffersize);
@@ -225,6 +150,7 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
}
fclose(pipein);
+ int status;
waitpid(pid, &status, 0);
if (Debug == true)
{
@@ -243,7 +169,7 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
}
else if (WEXITSTATUS(status) == 111)
{
- ioprintf(ret, _("Could not execute '%s' to verify signature (is gpgv installed?)"), gpgvpath.c_str());
+ ioprintf(ret, _("Could not execute 'gpgv' to verify signature (is gpgv installed?)"));
return ret.str();
}
else
diff --git a/methods/gzip.cc b/methods/gzip.cc
index f732c0b86..72e3ac909 100644
--- a/methods/gzip.cc
+++ b/methods/gzip.cc
@@ -23,8 +23,6 @@
#include <apti18n.h>
/*}}}*/
-const char *Prog;
-
class GzipMethod : public pkgAcqMethod
{
virtual bool Fetch(FetchItem *Itm);
@@ -43,14 +41,12 @@ bool GzipMethod::Fetch(FetchItem *Itm)
URI Get = Itm->Uri;
string Path = Get.Host + Get.Path; // To account for relative paths
- string GzPathOption = "Dir::bin::"+string(Prog);
-
FetchResult Res;
Res.Filename = Itm->DestFile;
URIStart(Res);
// Open the source and destination files
- FileFd From(Path,FileFd::ReadOnly);
+ FileFd From(Path,FileFd::ReadOnlyGzip);
// if the file is empty, just rename it and return
if(From.Size() == 0)
@@ -59,40 +55,12 @@ bool GzipMethod::Fetch(FetchItem *Itm)
return true;
}
- int GzOut[2];
- if (pipe(GzOut) < 0)
- return _error->Errno("pipe",_("Couldn't open pipe for %s"),Prog);
-
- // Fork gzip
- pid_t Process = ExecFork();
- if (Process == 0)
- {
- close(GzOut[0]);
- dup2(From.Fd(),STDIN_FILENO);
- dup2(GzOut[1],STDOUT_FILENO);
- From.Close();
- close(GzOut[1]);
- SetCloseExec(STDIN_FILENO,false);
- SetCloseExec(STDOUT_FILENO,false);
-
- const char *Args[3];
- string Tmp = _config->Find(GzPathOption,Prog);
- Args[0] = Tmp.c_str();
- Args[1] = "-d";
- Args[2] = 0;
- execvp(Args[0],(char **)Args);
- _exit(100);
- }
- From.Close();
- close(GzOut[1]);
-
- FileFd FromGz(GzOut[0]); // For autoclose
FileFd To(Itm->DestFile,FileFd::WriteEmpty);
To.EraseOnFailure();
if (_error->PendingError() == true)
return false;
- // Read data from gzip, generate checksums and write
+ // Read data from source, generate checksums and write
Hashes Hash;
bool Failed = false;
while (1)
@@ -100,36 +68,23 @@ bool GzipMethod::Fetch(FetchItem *Itm)
unsigned char Buffer[4*1024];
unsigned long Count;
- Count = read(GzOut[0],Buffer,sizeof(Buffer));
- if (Count < 0 && errno == EINTR)
- continue;
-
- if (Count < 0)
+ if (!From.Read(Buffer,sizeof(Buffer),&Count))
{
- _error->Errno("read", _("Read error from %s process"),Prog);
- Failed = true;
- break;
+ To.OpFail();
+ return false;
}
-
if (Count == 0)
break;
-
+
Hash.Add(Buffer,Count);
if (To.Write(Buffer,Count) == false)
{
Failed = true;
- FromGz.Close();
break;
}
}
- // Wait for gzip to finish
- if (ExecWait(Process,_config->Find(GzPathOption,Prog).c_str(),false) == false)
- {
- To.OpFail();
- return false;
- }
-
+ From.Close();
To.Close();
if (Failed == true)
@@ -165,9 +120,5 @@ int main(int argc, char *argv[])
setlocale(LC_ALL, "");
GzipMethod Mth;
-
- Prog = strrchr(argv[0],'/');
- Prog++;
-
return Mth.Run();
}
diff --git a/methods/http.cc b/methods/http.cc
index b05444691..9fa74bffa 100644
--- a/methods/http.cc
+++ b/methods/http.cc
@@ -67,7 +67,7 @@ unsigned long CircleBuf::BwReadLimit=0;
unsigned long CircleBuf::BwTickReadData=0;
struct timeval CircleBuf::BwReadTick={0,0};
const unsigned int CircleBuf::BW_HZ=10;
-
+
// CircleBuf::CircleBuf - Circular input buffer /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -378,7 +378,7 @@ bool ServerState::Close()
// ---------------------------------------------------------------------
/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
parse error occurred */
-int ServerState::RunHeaders()
+ServerState::RunHeadersResult ServerState::RunHeaders()
{
State = Header;
@@ -407,7 +407,7 @@ int ServerState::RunHeaders()
string::const_iterator J = I;
for (; J != Data.end() && *J != '\n' && *J != '\r';J++);
if (HeaderLine(string(I,J)) == false)
- return 2;
+ return RUN_HEADERS_PARSE_ERROR;
I = J;
}
@@ -419,11 +419,11 @@ int ServerState::RunHeaders()
if (Encoding == Closes && HaveContent == true)
Persistent = false;
- return 0;
+ return RUN_HEADERS_OK;
}
while (Owner->Go(false,this) == true);
- return 1;
+ return RUN_HEADERS_IO_ERROR;
}
/*}}}*/
// ServerState::RunData - Transfer the data from the socket /*{{{*/
@@ -631,7 +631,7 @@ bool ServerState::HeaderLine(string Line)
if (stringcasecmp(Tag,"Last-Modified:") == 0)
{
- if (StrToTime(Val,Date) == false)
+ if (RFC1123StrToTime(Val.c_str(), Date) == false)
return _error->Error(_("Unknown date format"));
return true;
}
@@ -914,15 +914,10 @@ bool HttpMethod::ServerDie(ServerState *Srv)
// HttpMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/
// ---------------------------------------------------------------------
/* We look at the header data we got back from the server and decide what
- to do. Returns
- 0 - File is open,
- 1 - IMS hit
- 3 - Unrecoverable error
- 4 - Error with error content page
- 5 - Unrecoverable non-server error (close the connection)
- 6 - Try again with a new or changed URI
+ to do. Returns DealWithHeadersResult (see http.h for details).
*/
-int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
+HttpMethod::DealWithHeadersResult
+HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
{
// Not Modified
if (Srv->Result == 304)
@@ -930,7 +925,7 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
unlink(Queue->DestFile.c_str());
Res.IMSHit = true;
Res.LastModified = Queue->LastModified;
- return 1;
+ return IMS_HIT;
}
/* Redirect
@@ -949,7 +944,7 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
if (!Srv->Location.empty())
{
NextURI = Srv->Location;
- return 6;
+ return TRY_AGAIN_OR_REDIRECT;
}
/* else pass through for error message */
}
@@ -958,10 +953,13 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
failure */
if (Srv->Result < 200 || Srv->Result >= 300)
{
+ char err[255];
+ snprintf(err,sizeof(err)-1,"HttpError%i",Srv->Result);
+ SetFailReason(err);
_error->Error("%u %s",Srv->Result,Srv->Code);
if (Srv->HaveContent == true)
- return 4;
- return 3;
+ return ERROR_WITH_CONTENT_PAGE;
+ return ERROR_UNRECOVERABLE;
}
// This is some sort of 2xx 'data follows' reply
@@ -972,7 +970,7 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
delete File;
File = new FileFd(Queue->DestFile,FileFd::WriteAny);
if (_error->PendingError() == true)
- return 5;
+ return ERROR_NOT_FROM_SERVER;
FailFile = Queue->DestFile;
FailFile.c_str(); // Make sure we dont do a malloc in the signal handler
@@ -1000,13 +998,13 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
if (Srv->In.Hash->AddFD(File->Fd(),Srv->StartPos) == false)
{
_error->Errno("read",_("Problem hashing file"));
- return 5;
+ return ERROR_NOT_FROM_SERVER;
}
lseek(File->Fd(),0,SEEK_END);
}
SetNonBlock(File->Fd(),true);
- return 0;
+ return FILE_IS_OPEN;
}
/*}}}*/
// HttpMethod::SigTerm - Handle a fatal signal /*{{{*/
@@ -1147,11 +1145,11 @@ int HttpMethod::Loop()
// Fetch the next URL header data from the server.
switch (Server->RunHeaders())
{
- case 0:
+ case ServerState::RUN_HEADERS_OK:
break;
// The header data is bad
- case 2:
+ case ServerState::RUN_HEADERS_PARSE_ERROR:
{
_error->Error(_("Bad header data"));
Fail(true);
@@ -1161,7 +1159,7 @@ int HttpMethod::Loop()
// The server closed a connection during the header get..
default:
- case 1:
+ case ServerState::RUN_HEADERS_IO_ERROR:
{
FailCounter++;
_error->Discard();
@@ -1185,7 +1183,7 @@ int HttpMethod::Loop()
switch (DealWithHeaders(Res,Server))
{
// Ok, the file is Open
- case 0:
+ case FILE_IS_OPEN:
{
URIStart(Res);
@@ -1238,21 +1236,21 @@ int HttpMethod::Loop()
}
// IMS hit
- case 1:
+ case IMS_HIT:
{
URIDone(Res);
break;
}
// Hard server error, not found or something
- case 3:
+ case ERROR_UNRECOVERABLE:
{
Fail();
break;
}
// Hard internal error, kill the connection and fail
- case 5:
+ case ERROR_NOT_FROM_SERVER:
{
delete File;
File = 0;
@@ -1264,7 +1262,7 @@ int HttpMethod::Loop()
}
// We need to flush the data, the header is like a 404 w/ error text
- case 4:
+ case ERROR_WITH_CONTENT_PAGE:
{
Fail();
@@ -1277,7 +1275,7 @@ int HttpMethod::Loop()
}
// Try again with a new URL
- case 6:
+ case TRY_AGAIN_OR_REDIRECT:
{
// Clear rest of response if there is content
if (Server->HaveContent)
@@ -1371,15 +1369,4 @@ bool HttpMethod::AutoDetectProxy()
}
/*}}}*/
-int main()
-{
- setlocale(LC_ALL, "");
- // ignore SIGPIPE, this can happen on write() if the socket
- // closes the connection (this is dealt with via ServerDie())
- signal(SIGPIPE, SIG_IGN);
-
- HttpMethod Mth;
- return Mth.Loop();
-}
-
diff --git a/methods/http.h b/methods/http.h
index ceee36cbe..0bc019e77 100644
--- a/methods/http.h
+++ b/methods/http.h
@@ -13,7 +13,7 @@
#define MAXLEN 360
-#include <iostream>
+#include <apt-pkg/hashes.h>
using std::cout;
using std::endl;
@@ -117,7 +117,19 @@ struct ServerState
void Reset() {Major = 0; Minor = 0; Result = 0; Size = 0; StartPos = 0;
Encoding = Closes; time(&Date); ServerFd = -1;
Pipeline = true;};
- int RunHeaders();
+
+ /** \brief Result of the header acquire */
+ enum RunHeadersResult {
+ /** \brief Header ok */
+ RUN_HEADERS_OK,
+ /** \brief IO error while retrieving */
+ RUN_HEADERS_IO_ERROR,
+ /** \brief Parse error after retrieving */
+ RUN_HEADERS_PARSE_ERROR,
+ };
+ /** \brief Get the headers before the data */
+ RunHeadersResult RunHeaders();
+ /** \brief Transfer the data from the socket */
bool RunData();
bool Open();
@@ -133,10 +145,28 @@ class HttpMethod : public pkgAcqMethod
bool Go(bool ToFile,ServerState *Srv);
bool Flush(ServerState *Srv);
bool ServerDie(ServerState *Srv);
- int DealWithHeaders(FetchResult &Res,ServerState *Srv);
+
+ /** \brief Result of the header parsing */
+ enum DealWithHeadersResult {
+ /** \brief The file is open and ready */
+ FILE_IS_OPEN,
+ /** \brief We got a IMS hit, the file has not changed */
+ IMS_HIT,
+ /** \brief The server reported a unrecoverable error */
+ ERROR_UNRECOVERABLE,
+ /** \brief The server reported a error with a error content page */
+ ERROR_WITH_CONTENT_PAGE,
+ /** \brief A error on the client side */
+ ERROR_NOT_FROM_SERVER,
+ /** \brief A redirect or retry request */
+ TRY_AGAIN_OR_REDIRECT
+ };
+ /** \brief Handle the retrieved header data */
+ DealWithHeadersResult DealWithHeaders(FetchResult &Res,ServerState *Srv);
+
+ /** \brief Try to AutoDetect the proxy */
bool AutoDetectProxy();
- virtual bool Fetch(FetchItem *);
virtual bool Configuration(string Message);
// In the event of a fatal signal this file will be closed and timestamped.
@@ -144,10 +174,13 @@ class HttpMethod : public pkgAcqMethod
static int FailFd;
static time_t FailTime;
static void SigTerm(int);
+
+ protected:
+ virtual bool Fetch(FetchItem *);
string NextURI;
string AutoDetectProxyCmd;
-
+
public:
friend class ServerState;
diff --git a/methods/http_main.cc b/methods/http_main.cc
new file mode 100644
index 000000000..7815c2fc1
--- /dev/null
+++ b/methods/http_main.cc
@@ -0,0 +1,20 @@
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/acquire-method.h>
+#include <signal.h>
+
+#include "connect.h"
+#include "rfc2553emu.h"
+#include "http.h"
+
+
+int main()
+{
+ setlocale(LC_ALL, "");
+
+ // ignore SIGPIPE, this can happen on write() if the socket
+ // closes the connection (this is dealt with via ServerDie())
+ signal(SIGPIPE, SIG_IGN);
+
+ HttpMethod Mth;
+ return Mth.Loop();
+}
diff --git a/methods/makefile b/methods/makefile
index 7bcae6b9b..d94a85340 100644
--- a/methods/makefile
+++ b/methods/makefile
@@ -48,7 +48,7 @@ include $(PROGRAM_H)
PROGRAM=http
SLIBS = -lapt-pkg $(SOCKETLIBS) $(INTLLIBS)
LIB_MAKES = apt-pkg/makefile
-SOURCE = http.cc rfc2553emu.cc connect.cc
+SOURCE = http.cc http_main.cc rfc2553emu.cc connect.cc
include $(PROGRAM_H)
# The https method
@@ -79,22 +79,32 @@ LIB_MAKES = apt-pkg/makefile
SOURCE = rsh.cc
include $(PROGRAM_H)
-# SSH and bzip2 method symlink
-binary: $(BIN)/ssh $(BIN)/bzip2 $(BIN)/lzma
-veryclean: clean-$(BIN)/ssh clean-$(BIN)/bzip2 clean-$(BIN)/lzma
+# The mirror method
+PROGRAM=mirror
+SLIBS = -lapt-pkg $(SOCKETLIBS)
+LIB_MAKES = apt-pkg/makefile
+SOURCE = mirror.cc http.cc rfc2553emu.cc connect.cc
+include $(PROGRAM_H)
+
+# The gzip method
+PROGRAM=bzip2
+SLIBS = -lapt-pkg $(INTLLIBS)
+LIB_MAKES = apt-pkg/makefile
+SOURCE = bzip2.cc
+include $(PROGRAM_H)
+
+# SSH and lzma method symlink
+binary: $(BIN)/ssh $(BIN)/lzma
+veryclean: clean-$(BIN)/ssh clean-$(BIN)/lzma
+
$(BIN)/ssh:
echo "Installing ssh method link"
ln -fs rsh $(BIN)/ssh
clean-$(BIN)/ssh:
-rm $(BIN)/ssh
-$(BIN)/bzip2:
- echo "Installing bzip2 method link"
- ln -fs gzip $(BIN)/bzip2
$(BIN)/lzma:
echo "Installing lzma method link"
- ln -fs gzip $(BIN)/lzma
-clean-$(BIN)/bzip2:
- -rm $(BIN)/bzip2
+ ln -fs bzip2 $(BIN)/lzma
clean-$(BIN)/lzma:
-rm $(BIN)/lzma
diff --git a/methods/mirror.cc b/methods/mirror.cc
new file mode 100644
index 000000000..e8873d97b
--- /dev/null
+++ b/methods/mirror.cc
@@ -0,0 +1,372 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: mirror.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $
+/* ######################################################################
+
+ Mirror Aquire Method - This is the Mirror aquire method for APT.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/acquire-method.h>
+#include <apt-pkg/acquire-item.h>
+#include <apt-pkg/acquire.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/hashes.h>
+#include <apt-pkg/sourcelist.h>
+
+#include <fstream>
+#include <iostream>
+#include <stdarg.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <dirent.h>
+
+using namespace std;
+
+#include<sstream>
+
+#include "mirror.h"
+#include "http.h"
+#include "apti18n.h"
+ /*}}}*/
+
+/* Done:
+ * - works with http (only!)
+ * - always picks the first mirror from the list
+ * - call out to problem reporting script
+ * - supports "deb mirror://host/path/to/mirror-list/// dist component"
+ * - uses pkgAcqMethod::FailReason() to have a string representation
+ * of the failure that is also send to LP
+ *
+ * TODO:
+ * - deal with runing as non-root because we can't write to the lists
+ dir then -> use the cached mirror file
+ * - better method to download than having a pkgAcquire interface here
+ * and better error handling there!
+ * - support more than http
+ * - testing :)
+ */
+
+MirrorMethod::MirrorMethod()
+ : HttpMethod(), DownloadedMirrorFile(false)
+{
+};
+
+// HttpMethod::Configuration - Handle a configuration message /*{{{*/
+// ---------------------------------------------------------------------
+/* We stash the desired pipeline depth */
+bool MirrorMethod::Configuration(string Message)
+{
+ if (pkgAcqMethod::Configuration(Message) == false)
+ return false;
+ Debug = _config->FindB("Debug::Acquire::mirror",false);
+
+ return true;
+}
+ /*}}}*/
+
+// clean the mirrors dir based on ttl information
+bool MirrorMethod::Clean(string Dir)
+{
+ vector<metaIndex *>::const_iterator I;
+
+ if(Debug)
+ clog << "MirrorMethod::Clean(): " << Dir << endl;
+
+ if(Dir == "/")
+ return _error->Error("will not clean: '/'");
+
+ // read sources.list
+ pkgSourceList list;
+ list.ReadMainList();
+
+ DIR *D = opendir(Dir.c_str());
+ if (D == 0)
+ return _error->Errno("opendir",_("Unable to read %s"),Dir.c_str());
+
+ string StartDir = SafeGetCWD();
+ if (chdir(Dir.c_str()) != 0)
+ {
+ closedir(D);
+ return _error->Errno("chdir",_("Unable to change to %s"),Dir.c_str());
+ }
+
+ for (struct dirent *Dir = readdir(D); Dir != 0; Dir = readdir(D))
+ {
+ // Skip some files..
+ if (strcmp(Dir->d_name,"lock") == 0 ||
+ strcmp(Dir->d_name,"partial") == 0 ||
+ strcmp(Dir->d_name,".") == 0 ||
+ strcmp(Dir->d_name,"..") == 0)
+ continue;
+
+ // see if we have that uri
+ for(I=list.begin(); I != list.end(); I++)
+ {
+ string uri = (*I)->GetURI();
+ if(uri.find("mirror://") != 0)
+ continue;
+ string BaseUri = uri.substr(0,uri.size()-1);
+ if (URItoFileName(BaseUri) == Dir->d_name)
+ break;
+ }
+ // nothing found, nuke it
+ if (I == list.end())
+ unlink(Dir->d_name);
+ };
+
+ chdir(StartDir.c_str());
+ closedir(D);
+ return true;
+}
+
+
+bool MirrorMethod::DownloadMirrorFile(string mirror_uri_str)
+{
+ if(Debug)
+ clog << "MirrorMethod::DownloadMirrorFile(): " << endl;
+
+ // not that great to use pkgAcquire here, but we do not have
+ // any other way right now
+ string fetch = BaseUri;
+ fetch.replace(0,strlen("mirror://"),"http://");
+
+ pkgAcquire Fetcher;
+ new pkgAcqFile(&Fetcher, fetch, "", 0, "", "", "", MirrorFile);
+ bool res = (Fetcher.Run() == pkgAcquire::Continue);
+ if(res)
+ DownloadedMirrorFile = true;
+ Fetcher.Shutdown();
+ return res;
+}
+
+/* convert a the Queue->Uri back to the mirror base uri and look
+ * at all mirrors we have for this, this is needed as queue->uri
+ * may point to different mirrors (if TryNextMirror() was run)
+ */
+void MirrorMethod::CurrentQueueUriToMirror()
+{
+ // already in mirror:// style so nothing to do
+ if(Queue->Uri.find("mirror://") == 0)
+ return;
+
+ // find current mirror and select next one
+ for (vector<string>::const_iterator mirror = AllMirrors.begin();
+ mirror != AllMirrors.end(); ++mirror)
+ {
+ if (Queue->Uri.find(*mirror) == 0)
+ {
+ Queue->Uri.replace(0, mirror->length(), BaseUri);
+ return;
+ }
+ }
+ _error->Error("Internal error: Failed to convert %s back to %s",
+ Queue->Uri.c_str(), BaseUri.c_str());
+}
+
+bool MirrorMethod::TryNextMirror()
+{
+ // find current mirror and select next one
+ for (vector<string>::const_iterator mirror = AllMirrors.begin();
+ mirror != AllMirrors.end(); ++mirror)
+ {
+ if (Queue->Uri.find(*mirror) != 0)
+ continue;
+
+ vector<string>::const_iterator nextmirror = mirror + 1;
+ if (nextmirror != AllMirrors.end())
+ break;
+ Queue->Uri.replace(0, mirror->length(), *nextmirror);
+ if (Debug)
+ clog << "TryNextMirror: " << Queue->Uri << endl;
+ return true;
+ }
+
+ if (Debug)
+ clog << "TryNextMirror could not find another mirror to try" << endl;
+
+ return false;
+}
+
+bool MirrorMethod::InitMirrors()
+{
+ // if we do not have a MirrorFile, fallback
+ if(!FileExists(MirrorFile))
+ {
+ // FIXME: fallback to a default mirror here instead
+ // and provide a config option to define that default
+ return _error->Error(_("No mirror file '%s' found "), MirrorFile.c_str());
+ }
+
+ // FIXME: make the mirror selection more clever, do not
+ // just use the first one!
+ // BUT: we can not make this random, the mirror has to be
+ // stable accross session, because otherwise we can
+ // get into sync issues (got indexfiles from mirror A,
+ // but packages from mirror B - one might be out of date etc)
+ ifstream in(MirrorFile.c_str());
+ string s;
+ while (!in.eof())
+ {
+ getline(in, s);
+ if (s.size() > 0)
+ AllMirrors.push_back(s);
+ }
+ Mirror = AllMirrors[0];
+ UsedMirror = Mirror;
+ return true;
+}
+
+string MirrorMethod::GetMirrorFileName(string mirror_uri_str)
+{
+ /*
+ - a mirror_uri_str looks like this:
+ mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors/dists/feisty/Release.gpg
+
+ - the matching source.list entry
+ deb mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors feisty main
+
+ - we actually want to go after:
+ http://people.ubuntu.com/~mvo/apt/mirror/mirrors
+
+ And we need to save the BaseUri for later:
+ - mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors
+
+ FIXME: what if we have two similar prefixes?
+ mirror://people.ubuntu.com/~mvo/mirror
+ mirror://people.ubuntu.com/~mvo/mirror2
+ then mirror_uri_str looks like:
+ mirror://people.ubuntu.com/~mvo/apt/mirror/dists/feisty/Release.gpg
+ mirror://people.ubuntu.com/~mvo/apt/mirror2/dists/feisty/Release.gpg
+ we search sources.list and find:
+ mirror://people.ubuntu.com/~mvo/apt/mirror
+ in both cases! So we need to apply some domain knowledge here :( and
+ check for /dists/ or /Release.gpg as suffixes
+ */
+ string name;
+ if(Debug)
+ std::cerr << "GetMirrorFileName: " << mirror_uri_str << std::endl;
+
+ // read sources.list and find match
+ vector<metaIndex *>::const_iterator I;
+ pkgSourceList list;
+ list.ReadMainList();
+ for(I=list.begin(); I != list.end(); I++)
+ {
+ string uristr = (*I)->GetURI();
+ if(Debug)
+ std::cerr << "Checking: " << uristr << std::endl;
+ if(uristr.substr(0,strlen("mirror://")) != string("mirror://"))
+ continue;
+ // find matching uri in sources.list
+ if(mirror_uri_str.substr(0,uristr.size()) == uristr)
+ {
+ if(Debug)
+ std::cerr << "found BaseURI: " << uristr << std::endl;
+ BaseUri = uristr.substr(0,uristr.size()-1);
+ }
+ }
+ // get new file
+ name = _config->FindDir("Dir::State::mirrors") + URItoFileName(BaseUri);
+
+ if(Debug)
+ {
+ cerr << "base-uri: " << BaseUri << endl;
+ cerr << "mirror-file: " << name << endl;
+ }
+ return name;
+}
+
+// MirrorMethod::Fetch - Fetch an item /*{{{*/
+// ---------------------------------------------------------------------
+/* This adds an item to the pipeline. We keep the pipeline at a fixed
+ depth. */
+bool MirrorMethod::Fetch(FetchItem *Itm)
+{
+ if(Debug)
+ clog << "MirrorMethod::Fetch()" << endl;
+
+ // the http method uses Fetch(0) as a way to update the pipeline,
+ // just let it do its work in this case - Fetch() with a valid
+ // Itm will always run before the first Fetch(0)
+ if(Itm == NULL)
+ return HttpMethod::Fetch(Itm);
+
+ // if we don't have the name of the mirror file on disk yet,
+ // calculate it now (can be derived from the uri)
+ if(MirrorFile.empty())
+ MirrorFile = GetMirrorFileName(Itm->Uri);
+
+ // download mirror file once (if we are after index files)
+ if(Itm->IndexFile && !DownloadedMirrorFile)
+ {
+ Clean(_config->FindDir("Dir::State::mirrors"));
+ DownloadMirrorFile(Itm->Uri);
+ }
+
+ if(AllMirrors.empty()) {
+ if(!InitMirrors()) {
+ // no valid mirror selected, something went wrong downloading
+ // from the master mirror site most likely and there is
+ // no old mirror file availalbe
+ return false;
+ }
+ }
+
+ if(Itm->Uri.find("mirror://") != string::npos)
+ Itm->Uri.replace(0,BaseUri.size(), Mirror);
+
+ if(Debug)
+ clog << "Fetch: " << Itm->Uri << endl << endl;
+
+ // now run the real fetcher
+ return HttpMethod::Fetch(Itm);
+};
+
+void MirrorMethod::Fail(string Err,bool Transient)
+{
+ // FIXME: TryNextMirror is not ideal for indexfile as we may
+ // run into auth issues
+
+ if (Debug)
+ clog << "Failure to get " << Queue->Uri << endl;
+
+ // try the next mirror on fail (if its not a expected failure,
+ // e.g. translations are ok to ignore)
+ if (!Queue->FailIgnore && TryNextMirror())
+ return;
+
+ // all mirrors failed, so bail out
+ string s;
+ strprintf(s, _("[Mirror: %s]"), Mirror.c_str());
+ SetIP(s);
+
+ CurrentQueueUriToMirror();
+ pkgAcqMethod::Fail(Err, Transient);
+}
+
+void MirrorMethod::URIStart(FetchResult &Res)
+{
+ CurrentQueueUriToMirror();
+ pkgAcqMethod::URIStart(Res);
+}
+
+void MirrorMethod::URIDone(FetchResult &Res,FetchResult *Alt)
+{
+ CurrentQueueUriToMirror();
+ pkgAcqMethod::URIDone(Res, Alt);
+}
+
+
+int main()
+{
+ setlocale(LC_ALL, "");
+
+ MirrorMethod Mth;
+
+ return Mth.Loop();
+}
+
+
diff --git a/methods/mirror.h b/methods/mirror.h
new file mode 100644
index 000000000..0a3ea6e92
--- /dev/null
+++ b/methods/mirror.h
@@ -0,0 +1,55 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $
+// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $
+/* ######################################################################
+
+ MIRROR Aquire Method - This is the MIRROR aquire method for APT.
+
+ ##################################################################### */
+ /*}}}*/
+
+#ifndef APT_MIRROR_H
+#define APT_MIRROR_H
+
+
+#include <iostream>
+
+using std::cout;
+using std::cerr;
+using std::endl;
+
+#include "http.h"
+
+class MirrorMethod : public HttpMethod
+{
+ FetchResult Res;
+ // we simply transform between BaseUri and Mirror
+ string BaseUri; // the original mirror://... url
+ string Mirror; // the selected mirror uri (http://...)
+ vector<string> AllMirrors; // all available mirrors
+ string MirrorFile; // the file that contains the list of mirrors
+ bool DownloadedMirrorFile; // already downloaded this session
+
+ bool Debug;
+
+ protected:
+ bool DownloadMirrorFile(string uri);
+ string GetMirrorFileName(string uri);
+ bool InitMirrors();
+ bool TryNextMirror();
+ void CurrentQueueUriToMirror();
+ bool Clean(string dir);
+
+ // we need to overwrite those to transform the url back
+ virtual void Fail(string Why, bool Transient = false);
+ virtual void URIStart(FetchResult &Res);
+ virtual void URIDone(FetchResult &Res,FetchResult *Alt = 0);
+ virtual bool Configuration(string Message);
+
+ public:
+ MirrorMethod();
+ virtual bool Fetch(FetchItem *Itm);
+};
+
+
+#endif
diff --git a/methods/rred.cc b/methods/rred.cc
index 262c78cab..f42c7a072 100644
--- a/methods/rred.cc
+++ b/methods/rred.cc
@@ -477,23 +477,26 @@ bool RredMethod::Fetch(FetchItem *Itm) /*{{{*/
Patch.Close();
To.Close();
- // Transfer the modification times
- struct stat Buf;
- if (stat(Path.c_str(),&Buf) != 0)
+ /* Transfer the modification times from the patch file
+ to be able to see in which state the file should be
+ and use the access time from the "old" file */
+ struct stat BufBase, BufPatch;
+ if (stat(Path.c_str(),&BufBase) != 0 ||
+ stat(string(Path+".ed").c_str(),&BufPatch) != 0)
return _error->Errno("stat",_("Failed to stat"));
struct utimbuf TimeBuf;
- TimeBuf.actime = Buf.st_atime;
- TimeBuf.modtime = Buf.st_mtime;
+ TimeBuf.actime = BufBase.st_atime;
+ TimeBuf.modtime = BufPatch.st_mtime;
if (utime(Itm->DestFile.c_str(),&TimeBuf) != 0)
return _error->Errno("utime",_("Failed to set modification time"));
- if (stat(Itm->DestFile.c_str(),&Buf) != 0)
+ if (stat(Itm->DestFile.c_str(),&BufBase) != 0)
return _error->Errno("stat",_("Failed to stat"));
// return done
- Res.LastModified = Buf.st_mtime;
- Res.Size = Buf.st_size;
+ Res.LastModified = BufBase.st_mtime;
+ Res.Size = BufBase.st_size;
Res.TakeHashes(Hash);
URIDone(Res);
diff --git a/methods/rsh.cc b/methods/rsh.cc
index f0ccfc42d..97b4ef151 100644
--- a/methods/rsh.cc
+++ b/methods/rsh.cc
@@ -278,8 +278,7 @@ bool RSHConn::ModTime(const char *Path, time_t &Time)
return false;
// Parse it
- StrToTime(Msg,Time);
- return true;
+ return FTPMDTMStrToTime(Msg.c_str(), Time);
}
/*}}}*/
// RSHConn::Get - Get a file /*{{{*/