summaryrefslogtreecommitdiff
path: root/methods
diff options
context:
space:
mode:
Diffstat (limited to 'methods')
-rw-r--r--methods/CMakeLists.txt35
-rw-r--r--methods/aptmethod.h151
-rw-r--r--methods/basehttp.cc829
-rw-r--r--methods/basehttp.h173
-rw-r--r--methods/cdrom.cc285
-rw-r--r--methods/connect.cc342
-rw-r--r--methods/connect.h21
-rw-r--r--methods/copy.cc93
-rw-r--r--methods/file.cc132
-rw-r--r--methods/ftp.cc1154
-rw-r--r--methods/ftp.h91
-rw-r--r--methods/gpgv.cc452
-rw-r--r--methods/http.cc1014
-rw-r--r--methods/http.h142
-rw-r--r--methods/http_main.cc17
-rw-r--r--methods/https.cc548
-rw-r--r--methods/https.h88
-rw-r--r--methods/mirror.cc470
-rw-r--r--methods/mirror.h57
-rw-r--r--methods/rfc2553emu.cc245
-rw-r--r--methods/rfc2553emu.h113
-rw-r--r--methods/rred.cc785
-rw-r--r--methods/rsh.cc548
-rw-r--r--methods/rsh.h76
-rw-r--r--methods/store.cc146
25 files changed, 8007 insertions, 0 deletions
diff --git a/methods/CMakeLists.txt b/methods/CMakeLists.txt
new file mode 100644
index 000000000..a74c2ce07
--- /dev/null
+++ b/methods/CMakeLists.txt
@@ -0,0 +1,35 @@
+# Create the executable targets
+add_executable(file file.cc)
+add_executable(copy copy.cc)
+add_executable(store store.cc)
+add_executable(gpgv gpgv.cc)
+add_executable(cdrom cdrom.cc)
+add_executable(http http.cc http_main.cc rfc2553emu.cc connect.cc basehttp.cc)
+add_executable(mirror mirror.cc http.cc rfc2553emu.cc connect.cc basehttp.cc)
+add_executable(https https.cc basehttp.cc)
+add_executable(ftp ftp.cc rfc2553emu.cc connect.cc)
+add_executable(rred rred.cc)
+add_executable(rsh rsh.cc)
+
+# Add target-specific header directories
+target_include_directories(https PRIVATE ${CURL_INCLUDE_DIRS})
+
+# Link the executables against the libraries
+target_link_libraries(file apt-pkg)
+target_link_libraries(copy apt-pkg)
+target_link_libraries(store apt-pkg)
+target_link_libraries(gpgv apt-pkg)
+target_link_libraries(cdrom apt-pkg)
+target_link_libraries(http apt-pkg)
+target_link_libraries(mirror apt-pkg ${RESOLV_LIBRARIES})
+target_link_libraries(https apt-pkg ${CURL_LIBRARIES})
+target_link_libraries(ftp apt-pkg)
+target_link_libraries(rred apt-pkg)
+target_link_libraries(rsh apt-pkg)
+
+# Install the library
+install(TARGETS file copy store gpgv cdrom http https ftp rred rsh mirror
+ RUNTIME DESTINATION ${CMAKE_INSTALL_LIBEXECDIR}/apt/methods)
+
+add_slaves(${CMAKE_INSTALL_LIBEXECDIR}/apt/methods store gzip lzma bzip2 xz)
+add_slaves(${CMAKE_INSTALL_LIBEXECDIR}/apt/methods rsh ssh)
diff --git a/methods/aptmethod.h b/methods/aptmethod.h
new file mode 100644
index 000000000..04c4fa99b
--- /dev/null
+++ b/methods/aptmethod.h
@@ -0,0 +1,151 @@
+#ifndef APT_APTMETHOD_H
+#define APT_APTMETHOD_H
+
+#include <apt-pkg/acquire-method.h>
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/error.h>
+
+#include <algorithm>
+#include <locale>
+#include <string>
+#include <vector>
+
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <apti18n.h>
+
+static bool hasDoubleColon(std::string const &n)
+{
+ return n.find("::") != std::string::npos;
+}
+
+class aptMethod : public pkgAcqMethod
+{
+protected:
+ std::string const Binary;
+
+public:
+ virtual bool Configuration(std::string Message) APT_OVERRIDE
+ {
+ if (pkgAcqMethod::Configuration(Message) == false)
+ return false;
+
+ std::string const conf = std::string("Binary::") + Binary;
+ _config->MoveSubTree(conf.c_str(), NULL);
+
+ DropPrivsOrDie();
+
+ return true;
+ }
+
+ bool CalculateHashes(FetchItem const * const Itm, FetchResult &Res) const APT_NONNULL(2)
+ {
+ Hashes Hash(Itm->ExpectedHashes);
+ FileFd Fd;
+ if (Fd.Open(Res.Filename, FileFd::ReadOnly) == false || Hash.AddFD(Fd) == false)
+ return false;
+ Res.TakeHashes(Hash);
+ return true;
+ }
+
+ void Warning(const char *Format,...)
+ {
+ va_list args;
+ va_start(args,Format);
+ PrintStatus("104 Warning", Format, args);
+ va_end(args);
+ }
+
+ std::vector<std::string> methodNames;
+ void setPostfixForMethodNames(char const * const postfix) APT_NONNULL(2)
+ {
+ methodNames.erase(std::remove_if(methodNames.begin(), methodNames.end(), hasDoubleColon), methodNames.end());
+ decltype(methodNames) toAdd;
+ for (auto && name: methodNames)
+ toAdd.emplace_back(name + "::" + postfix);
+ std::move(toAdd.begin(), toAdd.end(), std::back_inserter(methodNames));
+ }
+ bool DebugEnabled() const
+ {
+ if (methodNames.empty())
+ return false;
+ auto const sni = std::find_if_not(methodNames.crbegin(), methodNames.crend(), hasDoubleColon);
+ if (unlikely(sni == methodNames.crend()))
+ return false;
+ auto const ln = methodNames[methodNames.size() - 1];
+ // worst case: all three are the same
+ std::string confln, confsn, confpn;
+ strprintf(confln, "Debug::Acquire::%s", ln.c_str());
+ strprintf(confsn, "Debug::Acquire::%s", sni->c_str());
+ auto const pni = sni->substr(0, sni->find('+'));
+ strprintf(confpn, "Debug::Acquire::%s", pni.c_str());
+ return _config->FindB(confln,_config->FindB(confsn, _config->FindB(confpn, false)));
+ }
+ std::string ConfigFind(char const * const postfix, std::string const &defValue) const APT_NONNULL(2)
+ {
+ for (auto name = methodNames.rbegin(); name != methodNames.rend(); ++name)
+ {
+ std::string conf;
+ strprintf(conf, "Acquire::%s::%s", name->c_str(), postfix);
+ auto const value = _config->Find(conf);
+ if (value.empty() == false)
+ return value;
+ }
+ return defValue;
+ }
+ std::string ConfigFind(std::string const &postfix, std::string const &defValue) const
+ {
+ return ConfigFind(postfix.c_str(), defValue);
+ }
+ bool ConfigFindB(char const * const postfix, bool const defValue) const APT_NONNULL(2)
+ {
+ return StringToBool(ConfigFind(postfix, defValue ? "yes" : "no"), defValue);
+ }
+ int ConfigFindI(char const * const postfix, int const defValue) const APT_NONNULL(2)
+ {
+ char *End;
+ std::string const value = ConfigFind(postfix, "");
+ auto const Res = strtol(value.c_str(), &End, 0);
+ if (value.c_str() == End)
+ return defValue;
+ return Res;
+ }
+
+ bool TransferModificationTimes(char const * const From, char const * const To, time_t &LastModified) APT_NONNULL(2, 3)
+ {
+ if (strcmp(To, "/dev/null") == 0)
+ return true;
+
+ struct stat Buf2;
+ if (lstat(To, &Buf2) != 0 || S_ISLNK(Buf2.st_mode))
+ return true;
+
+ struct stat Buf;
+ if (stat(From, &Buf) != 0)
+ return _error->Errno("stat",_("Failed to stat"));
+
+ // we don't use utimensat here for compatibility reasons: #738567
+ struct timeval times[2];
+ times[0].tv_sec = Buf.st_atime;
+ LastModified = times[1].tv_sec = Buf.st_mtime;
+ times[0].tv_usec = times[1].tv_usec = 0;
+ if (utimes(To, times) != 0)
+ return _error->Errno("utimes",_("Failed to set modification time"));
+ return true;
+ }
+
+ aptMethod(std::string &&Binary, char const * const Ver, unsigned long const Flags) APT_NONNULL(3) :
+ pkgAcqMethod(Ver, Flags), Binary(Binary), methodNames({Binary})
+ {
+ try {
+ std::locale::global(std::locale(""));
+ } catch (...) {
+ setlocale(LC_ALL, "");
+ }
+ }
+};
+
+#endif
diff --git a/methods/basehttp.cc b/methods/basehttp.cc
new file mode 100644
index 000000000..d7d9bccd0
--- /dev/null
+++ b/methods/basehttp.cc
@@ -0,0 +1,829 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+/* ######################################################################
+
+ HTTP and HTTPS share a lot of common code and these classes are
+ exactly the dumping ground for this common code
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/strutl.h>
+
+#include <ctype.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+#include <iostream>
+#include <limits>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "basehttp.h"
+
+#include <apti18n.h>
+ /*}}}*/
+using namespace std;
+
+string BaseHttpMethod::FailFile;
+int BaseHttpMethod::FailFd = -1;
+time_t BaseHttpMethod::FailTime = 0;
+
+// ServerState::RunHeaders - Get the headers before the data /*{{{*/
+// ---------------------------------------------------------------------
+/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
+ parse error occurred */
+ServerState::RunHeadersResult ServerState::RunHeaders(RequestState &Req,
+ const std::string &Uri)
+{
+ Owner->Status(_("Waiting for headers"));
+ do
+ {
+ string Data;
+ if (ReadHeaderLines(Data) == false)
+ continue;
+
+ if (Owner->Debug == true)
+ clog << "Answer for: " << Uri << endl << Data;
+
+ for (string::const_iterator I = Data.begin(); I < Data.end(); ++I)
+ {
+ string::const_iterator J = I;
+ for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J);
+ if (Req.HeaderLine(string(I,J)) == false)
+ return RUN_HEADERS_PARSE_ERROR;
+ I = J;
+ }
+
+ // 100 Continue is a Nop...
+ if (Req.Result == 100)
+ continue;
+
+ // Tidy up the connection persistence state.
+ if (Req.Encoding == RequestState::Closes && Req.HaveContent == true)
+ Persistent = false;
+
+ return RUN_HEADERS_OK;
+ }
+ while (LoadNextResponse(false, Req) == true);
+
+ return RUN_HEADERS_IO_ERROR;
+}
+ /*}}}*/
+bool RequestState::HeaderLine(string const &Line) /*{{{*/
+{
+ if (Line.empty() == true)
+ return true;
+
+ if (Line.size() > 4 && stringcasecmp(Line.data(), Line.data()+4, "HTTP") == 0)
+ {
+ // Evil servers return no version
+ if (Line[4] == '/')
+ {
+ int const elements = sscanf(Line.c_str(),"HTTP/%3u.%3u %3u%359[^\n]",&Major,&Minor,&Result,Code);
+ if (elements == 3)
+ {
+ Code[0] = '\0';
+ if (Owner != NULL && Owner->Debug == true)
+ clog << "HTTP server doesn't give Reason-Phrase for " << std::to_string(Result) << std::endl;
+ }
+ else if (elements != 4)
+ return _error->Error(_("The HTTP server sent an invalid reply header"));
+ }
+ else
+ {
+ Major = 0;
+ Minor = 9;
+ if (sscanf(Line.c_str(),"HTTP %3u%359[^\n]",&Result,Code) != 2)
+ return _error->Error(_("The HTTP server sent an invalid reply header"));
+ }
+
+ /* Check the HTTP response header to get the default persistence
+ state. */
+ if (Major < 1)
+ Server->Persistent = false;
+ else
+ {
+ if (Major == 1 && Minor == 0)
+ {
+ Server->Persistent = false;
+ }
+ else
+ {
+ Server->Persistent = true;
+ if (Server->PipelineAllowed)
+ Server->Pipeline = true;
+ }
+ }
+
+ return true;
+ }
+
+ // Blah, some servers use "connection:closes", evil.
+ // and some even send empty header fields…
+ string::size_type Pos = Line.find(':');
+ if (Pos == string::npos)
+ return _error->Error(_("Bad header line"));
+ ++Pos;
+
+ // Parse off any trailing spaces between the : and the next word.
+ string::size_type Pos2 = Pos;
+ while (Pos2 < Line.length() && isspace_ascii(Line[Pos2]) != 0)
+ Pos2++;
+
+ string const Tag(Line,0,Pos);
+ string const Val(Line,Pos2);
+
+ if (stringcasecmp(Tag,"Content-Length:") == 0)
+ {
+ if (Encoding == Closes)
+ Encoding = Stream;
+ HaveContent = true;
+
+ unsigned long long * DownloadSizePtr = &DownloadSize;
+ if (Result == 416 || (Result >= 300 && Result < 400))
+ DownloadSizePtr = &JunkSize;
+
+ *DownloadSizePtr = strtoull(Val.c_str(), NULL, 10);
+ if (*DownloadSizePtr >= std::numeric_limits<unsigned long long>::max())
+ return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header"));
+ else if (*DownloadSizePtr == 0)
+ HaveContent = false;
+
+ // On partial content (206) the Content-Length less than the real
+ // size, so do not set it here but leave that to the Content-Range
+ // header instead
+ if(Result != 206 && TotalFileSize == 0)
+ TotalFileSize = DownloadSize;
+
+ return true;
+ }
+
+ if (stringcasecmp(Tag,"Content-Type:") == 0)
+ {
+ HaveContent = true;
+ return true;
+ }
+
+ // The Content-Range field only has a meaning in HTTP/1.1 for the
+ // 206 (Partial Content) and 416 (Range Not Satisfiable) responses
+ // according to RFC7233 "Range Requests", §4.2, so only consider it
+ // for such responses.
+ if ((Result == 416 || Result == 206) && stringcasecmp(Tag,"Content-Range:") == 0)
+ {
+ HaveContent = true;
+
+ // §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416
+ if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&TotalFileSize) == 1)
+ ; // we got the expected filesize which is all we wanted
+ else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&TotalFileSize) != 2)
+ return _error->Error(_("The HTTP server sent an invalid Content-Range header"));
+ if ((unsigned long long)StartPos > TotalFileSize)
+ return _error->Error(_("This HTTP server has broken range support"));
+
+ // figure out what we will download
+ DownloadSize = TotalFileSize - StartPos;
+ return true;
+ }
+
+ if (stringcasecmp(Tag,"Transfer-Encoding:") == 0)
+ {
+ HaveContent = true;
+ if (stringcasecmp(Val,"chunked") == 0)
+ Encoding = Chunked;
+ return true;
+ }
+
+ if (stringcasecmp(Tag,"Connection:") == 0)
+ {
+ if (stringcasecmp(Val,"close") == 0)
+ {
+ Server->Persistent = false;
+ Server->Pipeline = false;
+ /* Some servers send error pages (as they are dynamically generated)
+ for simplicity via a connection close instead of e.g. chunked,
+ so assuming an always closing server only if we get a file + close */
+ if (Result >= 200 && Result < 300)
+ Server->PipelineAllowed = false;
+ }
+ else if (stringcasecmp(Val,"keep-alive") == 0)
+ Server->Persistent = true;
+ return true;
+ }
+
+ if (stringcasecmp(Tag,"Last-Modified:") == 0)
+ {
+ if (RFC1123StrToTime(Val.c_str(), Date) == false)
+ return _error->Error(_("Unknown date format"));
+ return true;
+ }
+
+ if (stringcasecmp(Tag,"Location:") == 0)
+ {
+ Location = Val;
+ return true;
+ }
+
+ if (stringcasecmp(Tag, "Accept-Ranges:") == 0)
+ {
+ std::string ranges = ',' + Val + ',';
+ ranges.erase(std::remove(ranges.begin(), ranges.end(), ' '), ranges.end());
+ if (ranges.find(",bytes,") == std::string::npos)
+ Server->RangesAllowed = false;
+ return true;
+ }
+
+ return true;
+}
+ /*}}}*/
+// ServerState::ServerState - Constructor /*{{{*/
+ServerState::ServerState(URI Srv, BaseHttpMethod *Owner) :
+ ServerName(Srv), TimeOut(120), Owner(Owner)
+{
+ Reset();
+}
+ /*}}}*/
+bool RequestState::AddPartialFileToHashes(FileFd &File) /*{{{*/
+{
+ File.Truncate(StartPos);
+ return Server->GetHashes()->AddFD(File, StartPos);
+}
+ /*}}}*/
+void ServerState::Reset() /*{{{*/
+{
+ Persistent = false;
+ Pipeline = false;
+ PipelineAllowed = true;
+ RangesAllowed = true;
+}
+ /*}}}*/
+
+// BaseHttpMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/
+// ---------------------------------------------------------------------
+/* We look at the header data we got back from the server and decide what
+ to do. Returns DealWithHeadersResult (see http.h for details).
+ */
+BaseHttpMethod::DealWithHeadersResult
+BaseHttpMethod::DealWithHeaders(FetchResult &Res, RequestState &Req)
+{
+ // Not Modified
+ if (Req.Result == 304)
+ {
+ RemoveFile("server", Queue->DestFile);
+ Res.IMSHit = true;
+ Res.LastModified = Queue->LastModified;
+ Res.Size = 0;
+ return IMS_HIT;
+ }
+
+ /* Redirect
+ *
+ * Note that it is only OK for us to treat all redirection the same
+ * because we *always* use GET, not other HTTP methods. There are
+ * three redirection codes for which it is not appropriate that we
+ * redirect. Pass on those codes so the error handling kicks in.
+ */
+ if (AllowRedirect
+ && (Req.Result > 300 && Req.Result < 400)
+ && (Req.Result != 300 // Multiple Choices
+ && Req.Result != 304 // Not Modified
+ && Req.Result != 306)) // (Not part of HTTP/1.1, reserved)
+ {
+ if (Req.Location.empty() == true)
+ ;
+ else if (Req.Location[0] == '/' && Queue->Uri.empty() == false)
+ {
+ URI Uri = Queue->Uri;
+ if (Uri.Host.empty() == false)
+ NextURI = URI::SiteOnly(Uri);
+ else
+ NextURI.clear();
+ NextURI.append(DeQuoteString(Req.Location));
+ if (Queue->Uri == NextURI)
+ {
+ SetFailReason("RedirectionLoop");
+ _error->Error("Redirection loop encountered");
+ if (Req.HaveContent == true)
+ return ERROR_WITH_CONTENT_PAGE;
+ return ERROR_UNRECOVERABLE;
+ }
+ return TRY_AGAIN_OR_REDIRECT;
+ }
+ else
+ {
+ NextURI = DeQuoteString(Req.Location);
+ URI tmpURI = NextURI;
+ if (tmpURI.Access.find('+') != std::string::npos)
+ {
+ _error->Error("Server tried to trick us into using a specific implementation: %s", tmpURI.Access.c_str());
+ if (Req.HaveContent == true)
+ return ERROR_WITH_CONTENT_PAGE;
+ return ERROR_UNRECOVERABLE;
+ }
+ URI Uri = Queue->Uri;
+ if (Binary.find('+') != std::string::npos)
+ {
+ auto base = Binary.substr(0, Binary.find('+'));
+ if (base != tmpURI.Access)
+ {
+ tmpURI.Access = base + '+' + tmpURI.Access;
+ if (tmpURI.Access == Binary)
+ {
+ std::string tmpAccess = Uri.Access;
+ std::swap(tmpURI.Access, Uri.Access);
+ NextURI = tmpURI;
+ std::swap(tmpURI.Access, Uri.Access);
+ }
+ else
+ NextURI = tmpURI;
+ }
+ }
+ if (Queue->Uri == NextURI)
+ {
+ SetFailReason("RedirectionLoop");
+ _error->Error("Redirection loop encountered");
+ if (Req.HaveContent == true)
+ return ERROR_WITH_CONTENT_PAGE;
+ return ERROR_UNRECOVERABLE;
+ }
+ Uri.Access = Binary;
+ // same protocol redirects are okay
+ if (tmpURI.Access == Uri.Access)
+ return TRY_AGAIN_OR_REDIRECT;
+ // as well as http to https
+ else if ((Uri.Access == "http" || Uri.Access == "https+http") && tmpURI.Access == "https")
+ return TRY_AGAIN_OR_REDIRECT;
+ else
+ {
+ auto const tmpplus = tmpURI.Access.find('+');
+ if (tmpplus != std::string::npos && tmpURI.Access.substr(tmpplus + 1) == "https")
+ {
+ auto const uriplus = Uri.Access.find('+');
+ if (uriplus == std::string::npos)
+ {
+ if (Uri.Access == tmpURI.Access.substr(0, tmpplus)) // foo -> foo+https
+ return TRY_AGAIN_OR_REDIRECT;
+ }
+ else if (Uri.Access.substr(uriplus + 1) == "http" &&
+ Uri.Access.substr(0, uriplus) == tmpURI.Access.substr(0, tmpplus)) // foo+http -> foo+https
+ return TRY_AGAIN_OR_REDIRECT;
+ }
+ }
+ _error->Error("Redirection from %s to '%s' is forbidden", Uri.Access.c_str(), NextURI.c_str());
+ }
+ /* else pass through for error message */
+ }
+ // retry after an invalid range response without partial data
+ else if (Req.Result == 416)
+ {
+ struct stat SBuf;
+ if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
+ {
+ bool partialHit = false;
+ if (Queue->ExpectedHashes.usable() == true)
+ {
+ Hashes resultHashes(Queue->ExpectedHashes);
+ FileFd file(Queue->DestFile, FileFd::ReadOnly);
+ Req.TotalFileSize = file.FileSize();
+ Req.Date = file.ModificationTime();
+ resultHashes.AddFD(file);
+ HashStringList const hashList = resultHashes.GetHashStringList();
+ partialHit = (Queue->ExpectedHashes == hashList);
+ }
+ else if ((unsigned long long)SBuf.st_size == Req.TotalFileSize)
+ partialHit = true;
+ if (partialHit == true)
+ {
+ // the file is completely downloaded, but was not moved
+ if (Req.HaveContent == true)
+ {
+ // nuke the sent error page
+ Server->RunDataToDevNull(Req);
+ Req.HaveContent = false;
+ }
+ Req.StartPos = Req.TotalFileSize;
+ Req.Result = 200;
+ }
+ else if (RemoveFile("server", Queue->DestFile))
+ {
+ NextURI = Queue->Uri;
+ return TRY_AGAIN_OR_REDIRECT;
+ }
+ }
+ }
+
+ /* We have a reply we don't handle. This should indicate a perm server
+ failure */
+ if (Req.Result < 200 || Req.Result >= 300)
+ {
+ if (_error->PendingError() == false)
+ {
+ std::string err;
+ strprintf(err, "HttpError%u", Req.Result);
+ SetFailReason(err);
+ _error->Error("%u %s", Req.Result, Req.Code);
+ }
+ if (Req.HaveContent == true)
+ return ERROR_WITH_CONTENT_PAGE;
+ return ERROR_UNRECOVERABLE;
+ }
+
+ // This is some sort of 2xx 'data follows' reply
+ Res.LastModified = Req.Date;
+ Res.Size = Req.TotalFileSize;
+ return FILE_IS_OPEN;
+}
+ /*}}}*/
+// BaseHttpMethod::SigTerm - Handle a fatal signal /*{{{*/
+// ---------------------------------------------------------------------
+/* This closes and timestamps the open file. This is necessary to get
+ resume behavior on user abort */
+void BaseHttpMethod::SigTerm(int)
+{
+ if (FailFd == -1)
+ _exit(100);
+
+ struct timeval times[2];
+ times[0].tv_sec = FailTime;
+ times[1].tv_sec = FailTime;
+ times[0].tv_usec = times[1].tv_usec = 0;
+ utimes(FailFile.c_str(), times);
+ close(FailFd);
+
+ _exit(100);
+}
+ /*}}}*/
+// BaseHttpMethod::Fetch - Fetch an item /*{{{*/
+// ---------------------------------------------------------------------
+/* This adds an item to the pipeline. We keep the pipeline at a fixed
+ depth. */
+bool BaseHttpMethod::Fetch(FetchItem *)
+{
+ if (Server == nullptr || QueueBack == nullptr)
+ return true;
+
+ // If pipelining is disabled, we only queue 1 request
+ auto const AllowedDepth = Server->Pipeline ? PipelineDepth : 0;
+ // how deep is our pipeline currently?
+ decltype(PipelineDepth) CurrentDepth = 0;
+ for (FetchItem const *I = Queue; I != QueueBack; I = I->Next)
+ ++CurrentDepth;
+ if (CurrentDepth > AllowedDepth)
+ return true;
+
+ do {
+ // Make sure we stick with the same server
+ if (Server->Comp(QueueBack->Uri) == false)
+ break;
+
+ bool const UsableHashes = QueueBack->ExpectedHashes.usable();
+ // if we have no hashes, do at most one such request
+ // as we can't fixup pipeling misbehaviors otherwise
+ if (CurrentDepth != 0 && UsableHashes == false)
+ break;
+
+ if (UsableHashes && FileExists(QueueBack->DestFile))
+ {
+ FileFd partial(QueueBack->DestFile, FileFd::ReadOnly);
+ Hashes wehave(QueueBack->ExpectedHashes);
+ if (QueueBack->ExpectedHashes.FileSize() == partial.FileSize())
+ {
+ if (wehave.AddFD(partial) &&
+ wehave.GetHashStringList() == QueueBack->ExpectedHashes)
+ {
+ FetchResult Res;
+ Res.Filename = QueueBack->DestFile;
+ Res.ResumePoint = QueueBack->ExpectedHashes.FileSize();
+ URIStart(Res);
+ // move item to the start of the queue as URIDone will
+ // always dequeued the first item in the queue
+ if (Queue != QueueBack)
+ {
+ FetchItem *Prev = Queue;
+ for (; Prev->Next != QueueBack; Prev = Prev->Next)
+ /* look for the previous queue item */;
+ Prev->Next = QueueBack->Next;
+ QueueBack->Next = Queue;
+ Queue = QueueBack;
+ QueueBack = Prev->Next;
+ }
+ Res.TakeHashes(wehave);
+ URIDone(Res);
+ continue;
+ }
+ else
+ RemoveFile("Fetch-Partial", QueueBack->DestFile);
+ }
+ }
+ auto const Tmp = QueueBack;
+ QueueBack = QueueBack->Next;
+ SendReq(Tmp);
+ ++CurrentDepth;
+ } while (CurrentDepth <= AllowedDepth && QueueBack != nullptr);
+
+ return true;
+}
+ /*}}}*/
+// BaseHttpMethod::Loop - Main loop /*{{{*/
+int BaseHttpMethod::Loop()
+{
+ signal(SIGTERM,SigTerm);
+ signal(SIGINT,SigTerm);
+
+ Server = 0;
+
+ int FailCounter = 0;
+ while (1)
+ {
+ // We have no commands, wait for some to arrive
+ if (Queue == 0)
+ {
+ if (WaitFd(STDIN_FILENO) == false)
+ return 0;
+ }
+
+ /* Run messages, we can accept 0 (no message) if we didn't
+ do a WaitFd above.. Otherwise the FD is closed. */
+ int Result = Run(true);
+ if (Result != -1 && (Result != 0 || Queue == 0))
+ {
+ if(FailReason.empty() == false ||
+ ConfigFindB("DependOnSTDIN", true) == true)
+ return 100;
+ else
+ return 0;
+ }
+
+ if (Queue == 0)
+ continue;
+
+ // Connect to the server
+ if (Server == 0 || Server->Comp(Queue->Uri) == false)
+ {
+ Server = CreateServerState(Queue->Uri);
+ setPostfixForMethodNames(::URI(Queue->Uri).Host.c_str());
+ AllowRedirect = ConfigFindB("AllowRedirect", true);
+ PipelineDepth = ConfigFindI("Pipeline-Depth", 10);
+ Debug = DebugEnabled();
+ }
+
+ /* If the server has explicitly said this is the last connection
+ then we pre-emptively shut down the pipeline and tear down
+ the connection. This will speed up HTTP/1.0 servers a tad
+ since we don't have to wait for the close sequence to
+ complete */
+ if (Server->Persistent == false)
+ Server->Close();
+
+ // Reset the pipeline
+ if (Server->IsOpen() == false)
+ QueueBack = Queue;
+
+ // Connect to the host
+ if (Server->Open() == false)
+ {
+ Fail(true);
+ Server = nullptr;
+ continue;
+ }
+
+ // Fill the pipeline.
+ Fetch(0);
+
+ RequestState Req(this, Server.get());
+ // Fetch the next URL header data from the server.
+ switch (Server->RunHeaders(Req, Queue->Uri))
+ {
+ case ServerState::RUN_HEADERS_OK:
+ break;
+
+ // The header data is bad
+ case ServerState::RUN_HEADERS_PARSE_ERROR:
+ {
+ _error->Error(_("Bad header data"));
+ Fail(true);
+ Server->Close();
+ RotateDNS();
+ continue;
+ }
+
+ // The server closed a connection during the header get..
+ default:
+ case ServerState::RUN_HEADERS_IO_ERROR:
+ {
+ FailCounter++;
+ _error->Discard();
+ Server->Close();
+ Server->Pipeline = false;
+ Server->PipelineAllowed = false;
+
+ if (FailCounter >= 2)
+ {
+ Fail(_("Connection failed"),true);
+ FailCounter = 0;
+ }
+
+ RotateDNS();
+ continue;
+ }
+ };
+
+ // Decide what to do.
+ FetchResult Res;
+ Res.Filename = Queue->DestFile;
+ switch (DealWithHeaders(Res, Req))
+ {
+ // Ok, the file is Open
+ case FILE_IS_OPEN:
+ {
+ URIStart(Res);
+
+ // Run the data
+ bool Result = true;
+
+ // ensure we don't fetch too much
+ // we could do "Server->MaximumSize = Queue->MaximumSize" here
+ // but that would break the clever pipeline messup detection
+ // so instead we use the size of the biggest item in the queue
+ Req.MaximumSize = FindMaximumObjectSizeInQueue();
+
+ if (Req.HaveContent)
+ Result = Server->RunData(Req);
+
+ /* If the server is sending back sizeless responses then fill in
+ the size now */
+ if (Res.Size == 0)
+ Res.Size = Req.File.Size();
+
+ // Close the file, destroy the FD object and timestamp it
+ FailFd = -1;
+ Req.File.Close();
+
+ // Timestamp
+ struct timeval times[2];
+ times[0].tv_sec = times[1].tv_sec = Req.Date;
+ times[0].tv_usec = times[1].tv_usec = 0;
+ utimes(Queue->DestFile.c_str(), times);
+
+ // Send status to APT
+ if (Result == true)
+ {
+ Hashes * const resultHashes = Server->GetHashes();
+ HashStringList const hashList = resultHashes->GetHashStringList();
+ if (PipelineDepth != 0 && Queue->ExpectedHashes.usable() == true && Queue->ExpectedHashes != hashList)
+ {
+ // we did not get the expected hash… mhhh:
+ // could it be that server/proxy messed up pipelining?
+ FetchItem * BeforeI = Queue;
+ for (FetchItem *I = Queue->Next; I != 0 && I != QueueBack; I = I->Next)
+ {
+ if (I->ExpectedHashes.usable() == true && I->ExpectedHashes == hashList)
+ {
+ // yes, he did! Disable pipelining and rewrite queue
+ if (Server->Pipeline == true)
+ {
+ Warning(_("Automatically disabled %s due to incorrect response from server/proxy. (man 5 apt.conf)"), "Acquire::http::Pipeline-Depth");
+ Server->Pipeline = false;
+ Server->PipelineAllowed = false;
+ // we keep the PipelineDepth value so that the rest of the queue can be fixed up as well
+ }
+ Rename(Res.Filename, I->DestFile);
+ Res.Filename = I->DestFile;
+ BeforeI->Next = I->Next;
+ I->Next = Queue;
+ Queue = I;
+ break;
+ }
+ BeforeI = I;
+ }
+ }
+ Res.TakeHashes(*resultHashes);
+ URIDone(Res);
+ }
+ else
+ {
+ if (Server->IsOpen() == false)
+ {
+ FailCounter++;
+ _error->Discard();
+ Server->Close();
+
+ if (FailCounter >= 2)
+ {
+ Fail(_("Connection failed"),true);
+ FailCounter = 0;
+ }
+
+ QueueBack = Queue;
+ }
+ else
+ {
+ Server->Close();
+ Fail(true);
+ }
+ }
+ break;
+ }
+
+ // IMS hit
+ case IMS_HIT:
+ {
+ URIDone(Res);
+ break;
+ }
+
+ // Hard server error, not found or something
+ case ERROR_UNRECOVERABLE:
+ {
+ Fail();
+ break;
+ }
+
+ // Hard internal error, kill the connection and fail
+ case ERROR_NOT_FROM_SERVER:
+ {
+ Fail();
+ RotateDNS();
+ Server->Close();
+ break;
+ }
+
+ // We need to flush the data, the header is like a 404 w/ error text
+ case ERROR_WITH_CONTENT_PAGE:
+ {
+ Server->RunDataToDevNull(Req);
+ Fail();
+ break;
+ }
+
+ // Try again with a new URL
+ case TRY_AGAIN_OR_REDIRECT:
+ {
+ // Clear rest of response if there is content
+ if (Req.HaveContent)
+ Server->RunDataToDevNull(Req);
+ Redirect(NextURI);
+ break;
+ }
+
+ default:
+ Fail(_("Internal error"));
+ break;
+ }
+
+ FailCounter = 0;
+ }
+
+ return 0;
+}
+ /*}}}*/
+unsigned long long BaseHttpMethod::FindMaximumObjectSizeInQueue() const /*{{{*/
+{
+ unsigned long long MaxSizeInQueue = 0;
+ for (FetchItem *I = Queue; I != 0 && I != QueueBack; I = I->Next)
+ MaxSizeInQueue = std::max(MaxSizeInQueue, I->MaximumSize);
+ return MaxSizeInQueue;
+}
+ /*}}}*/
+BaseHttpMethod::BaseHttpMethod(std::string &&Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/
+ aptMethod(std::move(Binary), Ver, Flags), Server(nullptr), PipelineDepth(10),
+ AllowRedirect(false), Debug(false)
+{
+}
+ /*}}}*/
+bool BaseHttpMethod::Configuration(std::string Message) /*{{{*/
+{
+ if (aptMethod::Configuration(Message) == false)
+ return false;
+
+ _config->CndSet("Acquire::tor::Proxy",
+ "socks5h://apt-transport-tor@localhost:9050");
+ return true;
+}
+ /*}}}*/
+bool BaseHttpMethod::AddProxyAuth(URI &Proxy, URI const &Server) const /*{{{*/
+{
+ if (std::find(methodNames.begin(), methodNames.end(), "tor") != methodNames.end() &&
+ Proxy.User == "apt-transport-tor" && Proxy.Password.empty())
+ {
+ std::string pass = Server.Host;
+ pass.erase(std::remove_if(pass.begin(), pass.end(), [](char const c) { return std::isalnum(c) == 0; }), pass.end());
+ if (pass.length() > 255)
+ Proxy.Password = pass.substr(0, 255);
+ else
+ Proxy.Password = std::move(pass);
+ }
+ // FIXME: should we support auth.conf for proxies?
+ return true;
+}
+ /*}}}*/
diff --git a/methods/basehttp.h b/methods/basehttp.h
new file mode 100644
index 000000000..41a9a4306
--- /dev/null
+++ b/methods/basehttp.h
@@ -0,0 +1,173 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+/* ######################################################################
+
+ Classes dealing with the abstraction of talking to a end via a text
+ protocol like HTTP (which is used by the http and https methods)
+
+ ##################################################################### */
+ /*}}}*/
+
+#ifndef APT_SERVER_H
+#define APT_SERVER_H
+
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/fileutl.h>
+#include "aptmethod.h"
+
+#include <time.h>
+#include <iostream>
+#include <string>
+#include <memory>
+
+using std::cout;
+using std::endl;
+
+class Hashes;
+class BaseHttpMethod;
+struct ServerState;
+
+struct RequestState
+{
+ unsigned int Major = 0;
+ unsigned int Minor = 0;
+ unsigned int Result = 0;
+ char Code[360];
+
+ // total size of the usable content (aka: the file)
+ unsigned long long TotalFileSize = 0;
+ // size we actually download (can be smaller than Size if we have partial content)
+ unsigned long long DownloadSize = 0;
+ // size of junk content (aka: server error pages)
+ unsigned long long JunkSize = 0;
+ // The start of the data (for partial content)
+ unsigned long long StartPos = 0;
+
+ unsigned long long MaximumSize = 0;
+
+ time_t Date;
+ bool HaveContent = false;
+ enum {Chunked,Stream,Closes} Encoding = Closes;
+ enum {Header, Data} State = Header;
+ std::string Location;
+
+ FileFd File;
+
+ BaseHttpMethod * const Owner;
+ ServerState * const Server;
+
+ bool HeaderLine(std::string const &Line);
+ bool AddPartialFileToHashes(FileFd &File);
+
+ RequestState(BaseHttpMethod * const Owner, ServerState * const Server) :
+ Owner(Owner), Server(Server) { time(&Date); }
+};
+
+struct ServerState
+{
+ bool Persistent;
+ bool PipelineAllowed;
+ bool RangesAllowed;
+
+ bool Pipeline;
+ URI ServerName;
+ URI Proxy;
+ unsigned long TimeOut;
+
+ protected:
+ BaseHttpMethod *Owner;
+
+ virtual bool ReadHeaderLines(std::string &Data) = 0;
+ virtual bool LoadNextResponse(bool const ToFile, RequestState &Req) = 0;
+
+ public:
+
+ /** \brief Result of the header acquire */
+ enum RunHeadersResult {
+ /** \brief Header ok */
+ RUN_HEADERS_OK,
+ /** \brief IO error while retrieving */
+ RUN_HEADERS_IO_ERROR,
+ /** \brief Parse error after retrieving */
+ RUN_HEADERS_PARSE_ERROR
+ };
+ /** \brief Get the headers before the data */
+ RunHeadersResult RunHeaders(RequestState &Req, const std::string &Uri);
+
+ bool Comp(URI Other) const {return Other.Host == ServerName.Host && Other.Port == ServerName.Port;};
+ virtual void Reset();
+ virtual bool WriteResponse(std::string const &Data) = 0;
+
+ /** \brief Transfer the data from the socket */
+ virtual bool RunData(RequestState &Req) = 0;
+ virtual bool RunDataToDevNull(RequestState &Req) = 0;
+
+ virtual bool Open() = 0;
+ virtual bool IsOpen() = 0;
+ virtual bool Close() = 0;
+ virtual bool InitHashes(HashStringList const &ExpectedHashes) = 0;
+ virtual bool Die(RequestState &Req) = 0;
+ virtual bool Flush(FileFd * const File) = 0;
+ virtual bool Go(bool ToFile, RequestState &Req) = 0;
+ virtual Hashes * GetHashes() = 0;
+
+ ServerState(URI Srv, BaseHttpMethod *Owner);
+ virtual ~ServerState() {};
+};
+
+class BaseHttpMethod : public aptMethod
+{
+ protected:
+ virtual bool Fetch(FetchItem *) APT_OVERRIDE;
+
+ std::unique_ptr<ServerState> Server;
+ std::string NextURI;
+
+ unsigned long PipelineDepth;
+ bool AllowRedirect;
+
+ // Find the biggest item in the fetch queue for the checking of the maximum
+ // size
+ unsigned long long FindMaximumObjectSizeInQueue() const APT_PURE;
+
+ public:
+ bool Debug;
+
+ /** \brief Result of the header parsing */
+ enum DealWithHeadersResult {
+ /** \brief The file is open and ready */
+ FILE_IS_OPEN,
+ /** \brief We got a IMS hit, the file has not changed */
+ IMS_HIT,
+ /** \brief The server reported a unrecoverable error */
+ ERROR_UNRECOVERABLE,
+ /** \brief The server reported a error with a error content page */
+ ERROR_WITH_CONTENT_PAGE,
+ /** \brief An error on the client side */
+ ERROR_NOT_FROM_SERVER,
+ /** \brief A redirect or retry request */
+ TRY_AGAIN_OR_REDIRECT
+ };
+ /** \brief Handle the retrieved header data */
+ virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res, RequestState &Req);
+
+ // In the event of a fatal signal this file will be closed and timestamped.
+ static std::string FailFile;
+ static int FailFd;
+ static time_t FailTime;
+ static APT_NORETURN void SigTerm(int);
+
+ int Loop();
+
+ virtual void SendReq(FetchItem *Itm) = 0;
+ virtual std::unique_ptr<ServerState> CreateServerState(URI const &uri) = 0;
+ virtual void RotateDNS() = 0;
+ virtual bool Configuration(std::string Message) APT_OVERRIDE;
+
+ bool AddProxyAuth(URI &Proxy, URI const &Server) const;
+
+ BaseHttpMethod(std::string &&Binary, char const * const Ver,unsigned long const Flags);
+ virtual ~BaseHttpMethod() {};
+};
+
+#endif
diff --git a/methods/cdrom.cc b/methods/cdrom.cc
new file mode 100644
index 000000000..87a58e948
--- /dev/null
+++ b/methods/cdrom.cc
@@ -0,0 +1,285 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: cdrom.cc,v 1.20.2.1 2004/01/16 18:58:50 mdz Exp $
+/* ######################################################################
+
+ CDROM URI method for APT
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/cdrom.h>
+#include <apt-pkg/cdromutl.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/hashes.h>
+
+#include "aptmethod.h"
+
+#include <string>
+#include <vector>
+#include <sys/stat.h>
+
+#include <iostream>
+#include <apti18n.h>
+ /*}}}*/
+
+using namespace std;
+
+class CDROMMethod : public aptMethod
+{
+ bool DatabaseLoaded;
+ bool Debug;
+
+ ::Configuration Database;
+ string CurrentID;
+ string CDROM;
+ bool MountedByApt;
+ pkgUdevCdromDevices UdevCdroms;
+
+ bool IsCorrectCD(URI want, string MountPath, string& NewID);
+ bool AutoDetectAndMount(const URI, string &NewID);
+ virtual bool Fetch(FetchItem *Itm) APT_OVERRIDE;
+ string GetID(string Name);
+ virtual void Exit() APT_OVERRIDE;
+
+ public:
+
+ CDROMMethod();
+};
+
+// CDROMMethod::CDROMethod - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+CDROMMethod::CDROMMethod() : aptMethod("cdrom", "1.0",SingleInstance | LocalOnly |
+ SendConfig | NeedsCleanup |
+ Removable),
+ DatabaseLoaded(false),
+ Debug(false),
+ MountedByApt(false)
+{
+ UdevCdroms.Dlopen();
+}
+ /*}}}*/
+// CDROMMethod::Exit - Unmount the disc if necessary /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+void CDROMMethod::Exit()
+{
+ if (MountedByApt == true)
+ UnmountCdrom(CDROM);
+}
+ /*}}}*/
+// CDROMMethod::GetID - Search the database for a matching string /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+string CDROMMethod::GetID(string Name)
+{
+ // Search for an ID
+ const Configuration::Item *Top = Database.Tree("CD");
+ if (Top != 0)
+ Top = Top->Child;
+
+ for (; Top != 0;)
+ {
+ if (Top->Value == Name)
+ return Top->Tag;
+
+ Top = Top->Next;
+ }
+ return string();
+}
+ /*}}}*/
+// CDROMMethod::AutoDetectAndMount /*{{{*/
+// ---------------------------------------------------------------------
+/* Modifies class varaiable CDROM to the mountpoint */
+bool CDROMMethod::AutoDetectAndMount(const URI Get, string &NewID)
+{
+ vector<struct CdromDevice> v = UdevCdroms.Scan();
+
+ // first check if its mounted somewhere already
+ for (unsigned int i=0; i < v.size(); i++)
+ {
+ if (v[i].Mounted)
+ {
+ if (Debug)
+ clog << "Checking mounted cdrom device " << v[i].DeviceName << endl;
+ if (IsCorrectCD(Get, v[i].MountPath, NewID))
+ {
+ CDROM = v[i].MountPath;
+ return true;
+ }
+ }
+ }
+
+ // we are not supposed to mount, exit
+ if (_config->FindB("APT::CDROM::NoMount",false) == true)
+ return false;
+
+ // check if we have the mount point
+ string AptMountPoint = _config->FindDir("Dir::Media::MountPath");
+ if (!FileExists(AptMountPoint))
+ mkdir(AptMountPoint.c_str(), 0750);
+
+ // now try mounting
+ for (unsigned int i=0; i < v.size(); i++)
+ {
+ if (!v[i].Mounted)
+ {
+ if(MountCdrom(AptMountPoint, v[i].DeviceName))
+ {
+ if (IsCorrectCD(Get, AptMountPoint, NewID))
+ {
+ MountedByApt = true;
+ CDROM = AptMountPoint;
+ return true;
+ } else {
+ UnmountCdrom(AptMountPoint);
+ }
+ }
+ }
+ }
+
+ return false;
+}
+ /*}}}*/
+// CDROMMethod::IsCorrectCD /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool CDROMMethod::IsCorrectCD(URI want, string MountPath, string& NewID)
+{
+ for (unsigned int Version = 2; Version != 0; Version--)
+ {
+ if (IdentCdrom(MountPath,NewID,Version) == false)
+ return false;
+
+ if (Debug)
+ clog << "ID " << Version << " " << NewID << endl;
+
+ // A hit
+ if (Database.Find("CD::" + NewID) == want.Host)
+ return true;
+ }
+
+ return false;
+}
+ /*}}}*/
+// CDROMMethod::Fetch - Fetch a file /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool CDROMMethod::Fetch(FetchItem *Itm)
+{
+ FetchResult Res;
+
+ URI Get = Itm->Uri;
+ string File = Get.Path;
+ Debug = DebugEnabled();
+
+ if (Debug)
+ clog << "CDROMMethod::Fetch " << Itm->Uri << endl;
+
+ /* All IMS queries are returned as a hit, CDROMs are readonly so
+ time stamps never change */
+ if (Itm->LastModified != 0)
+ {
+ Res.LastModified = Itm->LastModified;
+ Res.IMSHit = true;
+ Res.Filename = Itm->DestFile;
+ URIDone(Res);
+ return true;
+ }
+
+ // Load the database
+ if (DatabaseLoaded == false)
+ {
+ // Read the database
+ string DFile = _config->FindFile("Dir::State::cdroms");
+ if (FileExists(DFile) == true)
+ {
+ if (ReadConfigFile(Database,DFile) == false)
+ return _error->Error(_("Unable to read the cdrom database %s"),
+ DFile.c_str());
+ }
+ DatabaseLoaded = true;
+ }
+
+ // All non IMS queries for package files fail.
+ if (Itm->IndexFile == true || GetID(Get.Host).empty() == true)
+ {
+ Fail(_("Please use apt-cdrom to make this CD-ROM recognized by APT."
+ " apt-get update cannot be used to add new CD-ROMs"));
+ return true;
+ }
+
+ // We already have a CD inserted, but it is the wrong one
+ if (CurrentID.empty() == false &&
+ CurrentID != "FAIL" &&
+ Database.Find("CD::" + CurrentID) != Get.Host)
+ {
+ Fail(_("Wrong CD-ROM"),true);
+ return true;
+ }
+
+ bool const AutoDetect = ConfigFindB("AutoDetect", true);
+ CDROM = _config->FindDir("Acquire::cdrom::mount");
+ if (Debug)
+ clog << "Looking for CDROM at " << CDROM << endl;
+
+ if (CDROM[0] == '.')
+ CDROM= SafeGetCWD() + '/' + CDROM;
+
+ string NewID;
+ while (CurrentID.empty() == true)
+ {
+ if (AutoDetect)
+ AutoDetectAndMount(Get, NewID);
+
+ if(!IsMounted(CDROM))
+ MountedByApt = MountCdrom(CDROM);
+
+ if (IsCorrectCD(Get, CDROM, NewID))
+ break;
+
+ // I suppose this should prompt somehow?
+ if (_config->FindB("APT::CDROM::NoMount",false) == false &&
+ UnmountCdrom(CDROM) == false)
+ return _error->Error(_("Unable to unmount the CD-ROM in %s, it may still be in use."),
+ CDROM.c_str());
+ if (MediaFail(Get.Host,CDROM) == false)
+ {
+ CurrentID = "FAIL";
+ return _error->Error(_("Disk not found."));
+ }
+ }
+
+ // Found a CD
+ Res.Filename = CDROM + File;
+ struct stat Buf;
+ if (stat(Res.Filename.c_str(),&Buf) != 0)
+ return _error->Error(_("File not found"));
+
+ URIStart(Res);
+ if (NewID.empty() == false)
+ CurrentID = NewID;
+ Res.LastModified = Buf.st_mtime;
+ Res.Size = Buf.st_size;
+
+ Hashes Hash(Itm->ExpectedHashes);
+ FileFd Fd(Res.Filename, FileFd::ReadOnly);
+ Hash.AddFD(Fd);
+ Res.TakeHashes(Hash);
+
+ URIDone(Res);
+ return true;
+}
+ /*}}}*/
+
+int main()
+{
+ _config->CndSet("Binary::cdrom::Debug::NoDropPrivs", true);
+ return CDROMMethod().Run();
+}
diff --git a/methods/connect.cc b/methods/connect.cc
new file mode 100644
index 000000000..f6fb14769
--- /dev/null
+++ b/methods/connect.cc
@@ -0,0 +1,342 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: connect.cc,v 1.10.2.1 2004/01/16 18:58:50 mdz Exp $
+/* ######################################################################
+
+ Connect - Replacement connect call
+
+ This was originally authored by Jason Gunthorpe <jgg@debian.org>
+ and is placed in the Public Domain, do with it what you will.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/error.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/acquire-method.h>
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/srvrec.h>
+
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sstream>
+#include <string.h>
+#include<set>
+#include<string>
+
+// Internet stuff
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+
+#include "connect.h"
+#include "rfc2553emu.h"
+#include <apti18n.h>
+ /*}}}*/
+
+static std::string LastHost;
+static int LastPort = 0;
+static struct addrinfo *LastHostAddr = 0;
+static struct addrinfo *LastUsed = 0;
+
+static std::vector<SrvRec> SrvRecords;
+
+// Set of IP/hostnames that we timed out before or couldn't resolve
+static std::set<std::string> bad_addr;
+
+// RotateDNS - Select a new server from a DNS rotation /*{{{*/
+// ---------------------------------------------------------------------
+/* This is called during certain errors in order to recover by selecting a
+ new server */
+void RotateDNS()
+{
+ if (LastUsed != 0 && LastUsed->ai_next != 0)
+ LastUsed = LastUsed->ai_next;
+ else
+ LastUsed = LastHostAddr;
+}
+ /*}}}*/
+static bool ConnectionAllowed(char const * const Service, std::string const &Host)/*{{{*/
+{
+ if (unlikely(Host.empty())) // the only legal empty host (RFC2782 '.' target) is detected by caller
+ return false;
+ if (APT::String::Endswith(Host, ".onion") && _config->FindB("Acquire::BlockDotOnion", true))
+ {
+ // TRANSLATOR: %s is e.g. Tor's ".onion" which would likely fail or leak info (RFC7686)
+ _error->Error(_("Direct connection to %s domains is blocked by default."), ".onion");
+ if (strcmp(Service, "http") == 0)
+ _error->Error(_("If you meant to use Tor remember to use %s instead of %s."), "tor+http", "http");
+ return false;
+ }
+ return true;
+}
+ /*}}}*/
+// DoConnect - Attempt a connect operation /*{{{*/
+// ---------------------------------------------------------------------
+/* This helper function attempts a connection to a single address. */
+static bool DoConnect(struct addrinfo *Addr,std::string const &Host,
+ unsigned long TimeOut,int &Fd,pkgAcqMethod *Owner)
+{
+ // Show a status indicator
+ char Name[NI_MAXHOST];
+ char Service[NI_MAXSERV];
+
+ Name[0] = 0;
+ Service[0] = 0;
+ getnameinfo(Addr->ai_addr,Addr->ai_addrlen,
+ Name,sizeof(Name),Service,sizeof(Service),
+ NI_NUMERICHOST|NI_NUMERICSERV);
+ Owner->Status(_("Connecting to %s (%s)"),Host.c_str(),Name);
+
+ // if that addr did timeout before, we do not try it again
+ if(bad_addr.find(std::string(Name)) != bad_addr.end())
+ return false;
+
+ /* If this is an IP rotation store the IP we are using.. If something goes
+ wrong this will get tacked onto the end of the error message */
+ if (LastHostAddr->ai_next != 0)
+ {
+ std::stringstream ss;
+ ioprintf(ss, _("[IP: %s %s]"),Name,Service);
+ Owner->SetIP(ss.str());
+ }
+
+ // Get a socket
+ if ((Fd = socket(Addr->ai_family,Addr->ai_socktype,
+ Addr->ai_protocol)) < 0)
+ return _error->Errno("socket",_("Could not create a socket for %s (f=%u t=%u p=%u)"),
+ Name,Addr->ai_family,Addr->ai_socktype,Addr->ai_protocol);
+
+ SetNonBlock(Fd,true);
+ if (connect(Fd,Addr->ai_addr,Addr->ai_addrlen) < 0 &&
+ errno != EINPROGRESS)
+ return _error->Errno("connect",_("Cannot initiate the connection "
+ "to %s:%s (%s)."),Host.c_str(),Service,Name);
+
+ /* This implements a timeout for connect by opening the connection
+ nonblocking */
+ if (WaitFd(Fd,true,TimeOut) == false) {
+ bad_addr.insert(bad_addr.begin(), std::string(Name));
+ Owner->SetFailReason("Timeout");
+ return _error->Error(_("Could not connect to %s:%s (%s), "
+ "connection timed out"),Host.c_str(),Service,Name);
+ }
+
+ // Check the socket for an error condition
+ unsigned int Err;
+ unsigned int Len = sizeof(Err);
+ if (getsockopt(Fd,SOL_SOCKET,SO_ERROR,&Err,&Len) != 0)
+ return _error->Errno("getsockopt",_("Failed"));
+
+ if (Err != 0)
+ {
+ errno = Err;
+ if(errno == ECONNREFUSED)
+ Owner->SetFailReason("ConnectionRefused");
+ else if (errno == ETIMEDOUT)
+ Owner->SetFailReason("ConnectionTimedOut");
+ bad_addr.insert(bad_addr.begin(), std::string(Name));
+ return _error->Errno("connect",_("Could not connect to %s:%s (%s)."),Host.c_str(),
+ Service,Name);
+ }
+
+ return true;
+}
+ /*}}}*/
+// Connect to a given Hostname /*{{{*/
+static bool ConnectToHostname(std::string const &Host, int const Port,
+ const char * const Service, int DefPort, int &Fd,
+ unsigned long const TimeOut, pkgAcqMethod * const Owner)
+{
+ if (ConnectionAllowed(Service, Host) == false)
+ return false;
+ // Convert the port name/number
+ char ServStr[300];
+ if (Port != 0)
+ snprintf(ServStr,sizeof(ServStr),"%i", Port);
+ else
+ snprintf(ServStr,sizeof(ServStr),"%s", Service);
+
+ /* We used a cached address record.. Yes this is against the spec but
+ the way we have setup our rotating dns suggests that this is more
+ sensible */
+ if (LastHost != Host || LastPort != Port)
+ {
+ Owner->Status(_("Connecting to %s"),Host.c_str());
+
+ // Free the old address structure
+ if (LastHostAddr != 0)
+ {
+ freeaddrinfo(LastHostAddr);
+ LastHostAddr = 0;
+ LastUsed = 0;
+ }
+
+ // We only understand SOCK_STREAM sockets.
+ struct addrinfo Hints;
+ memset(&Hints,0,sizeof(Hints));
+ Hints.ai_socktype = SOCK_STREAM;
+ Hints.ai_flags = 0;
+#ifdef AI_IDN
+ if (_config->FindB("Acquire::Connect::IDN", true) == true)
+ Hints.ai_flags |= AI_IDN;
+#endif
+ // see getaddrinfo(3): only return address if system has such a address configured
+ // useful if system is ipv4 only, to not get ipv6, but that fails if the system has
+ // no address configured: e.g. offline and trying to connect to localhost.
+ if (_config->FindB("Acquire::Connect::AddrConfig", true) == true)
+ Hints.ai_flags |= AI_ADDRCONFIG;
+ Hints.ai_protocol = 0;
+
+ if(_config->FindB("Acquire::ForceIPv4", false) == true)
+ Hints.ai_family = AF_INET;
+ else if(_config->FindB("Acquire::ForceIPv6", false) == true)
+ Hints.ai_family = AF_INET6;
+ else
+ Hints.ai_family = AF_UNSPEC;
+
+ // if we couldn't resolve the host before, we don't try now
+ if(bad_addr.find(Host) != bad_addr.end())
+ return _error->Error(_("Could not resolve '%s'"),Host.c_str());
+
+ // Resolve both the host and service simultaneously
+ while (1)
+ {
+ int Res;
+ if ((Res = getaddrinfo(Host.c_str(),ServStr,&Hints,&LastHostAddr)) != 0 ||
+ LastHostAddr == 0)
+ {
+ if (Res == EAI_NONAME || Res == EAI_SERVICE)
+ {
+ if (DefPort != 0)
+ {
+ snprintf(ServStr, sizeof(ServStr), "%i", DefPort);
+ DefPort = 0;
+ continue;
+ }
+ bad_addr.insert(bad_addr.begin(), Host);
+ Owner->SetFailReason("ResolveFailure");
+ return _error->Error(_("Could not resolve '%s'"),Host.c_str());
+ }
+
+ if (Res == EAI_AGAIN)
+ {
+ Owner->SetFailReason("TmpResolveFailure");
+ return _error->Error(_("Temporary failure resolving '%s'"),
+ Host.c_str());
+ }
+ if (Res == EAI_SYSTEM)
+ return _error->Errno("getaddrinfo", _("System error resolving '%s:%s'"),
+ Host.c_str(),ServStr);
+ return _error->Error(_("Something wicked happened resolving '%s:%s' (%i - %s)"),
+ Host.c_str(),ServStr,Res,gai_strerror(Res));
+ }
+ break;
+ }
+
+ LastHost = Host;
+ LastPort = Port;
+ }
+
+ // When we have an IP rotation stay with the last IP.
+ struct addrinfo *CurHost = LastHostAddr;
+ if (LastUsed != 0)
+ CurHost = LastUsed;
+
+ while (CurHost != 0)
+ {
+ if (DoConnect(CurHost,Host,TimeOut,Fd,Owner) == true)
+ {
+ LastUsed = CurHost;
+ return true;
+ }
+ close(Fd);
+ Fd = -1;
+
+ // Ignore UNIX domain sockets
+ do
+ {
+ CurHost = CurHost->ai_next;
+ }
+ while (CurHost != 0 && CurHost->ai_family == AF_UNIX);
+
+ /* If we reached the end of the search list then wrap around to the
+ start */
+ if (CurHost == 0 && LastUsed != 0)
+ CurHost = LastHostAddr;
+
+ // Reached the end of the search cycle
+ if (CurHost == LastUsed)
+ break;
+
+ if (CurHost != 0)
+ _error->Discard();
+ }
+
+ if (_error->PendingError() == true)
+ return false;
+ return _error->Error(_("Unable to connect to %s:%s:"),Host.c_str(),ServStr);
+}
+ /*}}}*/
+// Connect - Connect to a server /*{{{*/
+// ---------------------------------------------------------------------
+/* Performs a connection to the server (including SRV record lookup) */
+bool Connect(std::string Host,int Port,const char *Service,
+ int DefPort,int &Fd,
+ unsigned long TimeOut,pkgAcqMethod *Owner)
+{
+ if (_error->PendingError() == true)
+ return false;
+
+ if (ConnectionAllowed(Service, Host) == false)
+ return false;
+
+ if(LastHost != Host || LastPort != Port)
+ {
+ SrvRecords.clear();
+ if (_config->FindB("Acquire::EnableSrvRecords", true) == true)
+ {
+ GetSrvRecords(Host, DefPort, SrvRecords);
+ // RFC2782 defines that a lonely '.' target is an abort reason
+ if (SrvRecords.size() == 1 && SrvRecords[0].target.empty())
+ return _error->Error("SRV records for %s indicate that "
+ "%s service is not available at this domain", Host.c_str(), Service);
+ }
+ }
+
+ size_t stackSize = 0;
+ // try to connect in the priority order of the srv records
+ std::string initialHost{std::move(Host)};
+ while(SrvRecords.empty() == false)
+ {
+ _error->PushToStack();
+ ++stackSize;
+ // PopFromSrvRecs will also remove the server
+ Host = PopFromSrvRecs(SrvRecords).target;
+ auto const ret = ConnectToHostname(Host, Port, Service, DefPort, Fd, TimeOut, Owner);
+ if (ret)
+ {
+ while(stackSize--)
+ _error->RevertToStack();
+ return true;
+ }
+ }
+ Host = std::move(initialHost);
+
+ // we have no (good) SrvRecords for this host, connect right away
+ _error->PushToStack();
+ ++stackSize;
+ auto const ret = ConnectToHostname(Host, Port, Service, DefPort, Fd,
+ TimeOut, Owner);
+ while(stackSize--)
+ if (ret)
+ _error->RevertToStack();
+ else
+ _error->MergeWithStack();
+ return ret;
+}
diff --git a/methods/connect.h b/methods/connect.h
new file mode 100644
index 000000000..bbe1bb35d
--- /dev/null
+++ b/methods/connect.h
@@ -0,0 +1,21 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: connect.h,v 1.3 2001/02/20 07:03:18 jgg Exp $
+/* ######################################################################
+
+ Connect - Replacement connect call
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef CONNECT_H
+#define CONNECT_H
+
+#include <string>
+
+class pkgAcqMethod;
+
+bool Connect(std::string To,int Port,const char *Service,int DefPort,
+ int &Fd,unsigned long TimeOut,pkgAcqMethod *Owner);
+void RotateDNS();
+
+#endif
diff --git a/methods/copy.cc b/methods/copy.cc
new file mode 100644
index 000000000..810fc2f38
--- /dev/null
+++ b/methods/copy.cc
@@ -0,0 +1,93 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: copy.cc,v 1.7.2.1 2004/01/16 18:58:50 mdz Exp $
+/* ######################################################################
+
+ Copy URI - This method takes a uri like a file: uri and copies it
+ to the destination file.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/hashes.h>
+#include <apt-pkg/configuration.h>
+#include "aptmethod.h"
+
+#include <string>
+#include <sys/stat.h>
+#include <sys/time.h>
+
+#include <apti18n.h>
+ /*}}}*/
+
+class CopyMethod : public aptMethod
+{
+ virtual bool Fetch(FetchItem *Itm) APT_OVERRIDE;
+
+ public:
+
+ CopyMethod() : aptMethod("copy", "1.0",SingleInstance | SendConfig) {};
+};
+
+// CopyMethod::Fetch - Fetch a file /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool CopyMethod::Fetch(FetchItem *Itm)
+{
+ // this ensures that relative paths work in copy
+ std::string const File = Itm->Uri.substr(Itm->Uri.find(':')+1);
+
+ // Stat the file and send a start message
+ struct stat Buf;
+ if (stat(File.c_str(),&Buf) != 0)
+ return _error->Errno("stat",_("Failed to stat"));
+
+ // Forumulate a result and send a start message
+ FetchResult Res;
+ Res.Size = Buf.st_size;
+ Res.Filename = Itm->DestFile;
+ Res.LastModified = Buf.st_mtime;
+ Res.IMSHit = false;
+ URIStart(Res);
+
+ // just calc the hashes if the source and destination are identical
+ if (File == Itm->DestFile || Itm->DestFile == "/dev/null")
+ {
+ CalculateHashes(Itm, Res);
+ URIDone(Res);
+ return true;
+ }
+
+ // See if the file exists
+ FileFd From(File,FileFd::ReadOnly);
+ FileFd To(Itm->DestFile,FileFd::WriteAtomic);
+ To.EraseOnFailure();
+
+ // Copy the file
+ if (CopyFile(From,To) == false)
+ {
+ To.OpFail();
+ return false;
+ }
+
+ From.Close();
+ To.Close();
+
+ if (TransferModificationTimes(File.c_str(), Res.Filename.c_str(), Res.LastModified) == false)
+ return false;
+
+ CalculateHashes(Itm, Res);
+ URIDone(Res);
+ return true;
+}
+ /*}}}*/
+
+int main()
+{
+ return CopyMethod().Run();
+}
diff --git a/methods/file.cc b/methods/file.cc
new file mode 100644
index 000000000..5cbf1924e
--- /dev/null
+++ b/methods/file.cc
@@ -0,0 +1,132 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: file.cc,v 1.9.2.1 2004/01/16 18:58:50 mdz Exp $
+/* ######################################################################
+
+ File URI method for APT
+
+ This simply checks that the file specified exists, if so the relevant
+ information is returned. If a .gz filename is specified then the file
+ name with .gz removed will also be checked and information about it
+ will be returned in Alt-*
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/aptconfiguration.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/hashes.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/strutl.h>
+#include "aptmethod.h"
+
+#include <string>
+#include <sys/stat.h>
+
+#include <apti18n.h>
+ /*}}}*/
+
+class FileMethod : public aptMethod
+{
+ virtual bool Fetch(FetchItem *Itm) APT_OVERRIDE;
+
+ public:
+ FileMethod() : aptMethod("file", "1.0", SingleInstance | SendConfig | LocalOnly) {};
+};
+
+// FileMethod::Fetch - Fetch a file /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool FileMethod::Fetch(FetchItem *Itm)
+{
+ URI Get = Itm->Uri;
+ std::string File = Get.Path;
+ FetchResult Res;
+ if (Get.Host.empty() == false)
+ return _error->Error(_("Invalid URI, local URIS must not start with //"));
+
+ struct stat Buf;
+ // deal with destination files which might linger around
+ if (lstat(Itm->DestFile.c_str(), &Buf) == 0)
+ {
+ if ((Buf.st_mode & S_IFREG) != 0)
+ {
+ if (Itm->LastModified == Buf.st_mtime && Itm->LastModified != 0)
+ {
+ HashStringList const hsl = Itm->ExpectedHashes;
+ if (Itm->ExpectedHashes.VerifyFile(File))
+ {
+ Res.Filename = Itm->DestFile;
+ Res.IMSHit = true;
+ }
+ }
+ }
+ }
+ if (Res.IMSHit != true)
+ RemoveFile("file", Itm->DestFile);
+
+ int olderrno = 0;
+ // See if the file exists
+ if (stat(File.c_str(),&Buf) == 0)
+ {
+ Res.Size = Buf.st_size;
+ Res.Filename = File;
+ Res.LastModified = Buf.st_mtime;
+ Res.IMSHit = false;
+ if (Itm->LastModified == Buf.st_mtime && Itm->LastModified != 0)
+ {
+ unsigned long long const filesize = Itm->ExpectedHashes.FileSize();
+ if (filesize != 0 && filesize == Res.Size)
+ Res.IMSHit = true;
+ }
+
+ CalculateHashes(Itm, Res);
+ }
+ else
+ olderrno = errno;
+ if (Res.IMSHit == false)
+ URIStart(Res);
+
+ // See if the uncompressed file exists and reuse it
+ FetchResult AltRes;
+ AltRes.Filename.clear();
+ std::vector<std::string> extensions = APT::Configuration::getCompressorExtensions();
+ for (std::vector<std::string>::const_iterator ext = extensions.begin(); ext != extensions.end(); ++ext)
+ {
+ if (APT::String::Endswith(File, *ext) == true)
+ {
+ std::string const unfile = File.substr(0, File.length() - ext->length());
+ if (stat(unfile.c_str(),&Buf) == 0)
+ {
+ AltRes.Size = Buf.st_size;
+ AltRes.Filename = unfile;
+ AltRes.LastModified = Buf.st_mtime;
+ AltRes.IMSHit = false;
+ if (Itm->LastModified == Buf.st_mtime && Itm->LastModified != 0)
+ AltRes.IMSHit = true;
+ break;
+ }
+ // no break here as we could have situations similar to '.gz' vs '.tar.gz' here
+ }
+ }
+
+ if (AltRes.Filename.empty() == false)
+ URIDone(Res,&AltRes);
+ else if (Res.Filename.empty() == false)
+ URIDone(Res);
+ else
+ {
+ errno = olderrno;
+ return _error->Errno(File.c_str(), _("File not found"));
+ }
+
+ return true;
+}
+ /*}}}*/
+
+int main()
+{
+ return FileMethod().Run();
+}
diff --git a/methods/ftp.cc b/methods/ftp.cc
new file mode 100644
index 000000000..d789637a8
--- /dev/null
+++ b/methods/ftp.cc
@@ -0,0 +1,1154 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: ftp.cc,v 1.31.2.1 2004/01/16 18:58:50 mdz Exp $
+/* ######################################################################
+
+ FTP Acquire Method - This is the FTP acquire method for APT.
+
+ This is a very simple implementation that does not try to optimize
+ at all. Commands are sent synchronously with the FTP server (as the
+ rfc recommends, but it is not really necessary..) and no tricks are
+ done to speed things along.
+
+ RFC 2428 describes the IPv6 FTP behavior
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/hashes.h>
+#include <apt-pkg/netrc.h>
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/strutl.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <iostream>
+
+// Internet stuff
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+
+#include "rfc2553emu.h"
+#include "connect.h"
+#include "ftp.h"
+
+#include <apti18n.h>
+ /*}}}*/
+
+using namespace std;
+
+/* This table is for the EPRT and EPSV commands, it maps the OS address
+ family to the IETF address families */
+struct AFMap
+{
+ unsigned long Family;
+ unsigned long IETFFamily;
+};
+
+#ifndef AF_INET6
+struct AFMap AFMap[] = {{AF_INET,1},{0, 0}};
+#else
+struct AFMap AFMap[] = {{AF_INET,1},{AF_INET6,2},{0, 0}};
+#endif
+
+unsigned long TimeOut = 120;
+URI Proxy;
+string FtpMethod::FailFile;
+int FtpMethod::FailFd = -1;
+time_t FtpMethod::FailTime = 0;
+
+// FTPConn::FTPConn - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+FTPConn::FTPConn(URI Srv) : Len(0), ServerFd(-1), DataFd(-1),
+ DataListenFd(-1), ServerName(Srv),
+ ForceExtended(false), TryPassive(true),
+ PeerAddrLen(0), ServerAddrLen(0)
+{
+ Debug = _config->FindB("Debug::Acquire::Ftp",false);
+ PasvAddr = 0;
+ Buffer[0] = '\0';
+}
+ /*}}}*/
+// FTPConn::~FTPConn - Destructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+FTPConn::~FTPConn()
+{
+ Close();
+}
+ /*}}}*/
+// FTPConn::Close - Close down the connection /*{{{*/
+// ---------------------------------------------------------------------
+/* Just tear down the socket and data socket */
+void FTPConn::Close()
+{
+ close(ServerFd);
+ ServerFd = -1;
+ close(DataFd);
+ DataFd = -1;
+ close(DataListenFd);
+ DataListenFd = -1;
+
+ if (PasvAddr != 0)
+ freeaddrinfo(PasvAddr);
+ PasvAddr = 0;
+}
+ /*}}}*/
+// FTPConn::Open - Open a new connection /*{{{*/
+// ---------------------------------------------------------------------
+/* Connect to the server using a non-blocking connection and perform a
+ login. */
+bool FTPConn::Open(pkgAcqMethod *Owner)
+{
+ // Use the already open connection if possible.
+ if (ServerFd != -1)
+ return true;
+
+ Close();
+
+ // Determine the proxy setting
+ string SpecificProxy = _config->Find("Acquire::ftp::Proxy::" + ServerName.Host);
+ if (!SpecificProxy.empty())
+ {
+ if (SpecificProxy == "DIRECT")
+ Proxy = "";
+ else
+ Proxy = SpecificProxy;
+ }
+ else
+ {
+ string DefProxy = _config->Find("Acquire::ftp::Proxy");
+ if (!DefProxy.empty())
+ {
+ Proxy = DefProxy;
+ }
+ else
+ {
+ char* result = getenv("ftp_proxy");
+ Proxy = result ? result : "";
+ }
+ }
+
+ // Parse no_proxy, a , separated list of domains
+ if (getenv("no_proxy") != 0)
+ {
+ if (CheckDomainList(ServerName.Host,getenv("no_proxy")) == true)
+ Proxy = "";
+ }
+
+ // Determine what host and port to use based on the proxy settings
+ int Port = 0;
+ string Host;
+ if (Proxy.empty() == true)
+ {
+ if (ServerName.Port != 0)
+ Port = ServerName.Port;
+ Host = ServerName.Host;
+ }
+ else
+ {
+ if (Proxy.Port != 0)
+ Port = Proxy.Port;
+ Host = Proxy.Host;
+ }
+
+ /* Connect to the remote server. Since FTP is connection oriented we
+ want to make sure we get a new server every time we reconnect */
+ RotateDNS();
+ if (Connect(Host,Port,"ftp",21,ServerFd,TimeOut,Owner) == false)
+ return false;
+
+ // Login must be before getpeername otherwise dante won't work.
+ Owner->Status(_("Logging in"));
+ bool Res = Login();
+
+ // Get the remote server's address
+ PeerAddrLen = sizeof(PeerAddr);
+ if (getpeername(ServerFd,(sockaddr *)&PeerAddr,&PeerAddrLen) != 0)
+ return _error->Errno("getpeername",_("Unable to determine the peer name"));
+
+ // Get the local machine's address
+ ServerAddrLen = sizeof(ServerAddr);
+ if (getsockname(ServerFd,(sockaddr *)&ServerAddr,&ServerAddrLen) != 0)
+ return _error->Errno("getsockname",_("Unable to determine the local name"));
+
+ return Res;
+}
+ /*}}}*/
+// FTPConn::Login - Login to the remote server /*{{{*/
+// ---------------------------------------------------------------------
+/* This performs both normal login and proxy login using a simples script
+ stored in the config file. */
+bool FTPConn::Login()
+{
+ unsigned int Tag;
+ string Msg;
+
+ // Setup the variables needed for authentication
+ string User = "anonymous";
+ string Pass = "apt_get_ftp_2.1@debian.linux.user";
+
+ // Fill in the user/pass
+ if (ServerName.User.empty() == false)
+ User = ServerName.User;
+ if (ServerName.Password.empty() == false)
+ Pass = ServerName.Password;
+
+ // Perform simple login
+ if (Proxy.empty() == true)
+ {
+ // Read the initial response
+ if (ReadResp(Tag,Msg) == false)
+ return false;
+ if (Tag >= 400)
+ return _error->Error(_("The server refused the connection and said: %s"),Msg.c_str());
+
+ // Send the user
+ if (WriteMsg(Tag,Msg,"USER %s",User.c_str()) == false)
+ return false;
+ if (Tag >= 400)
+ return _error->Error(_("USER failed, server said: %s"),Msg.c_str());
+
+ if (Tag == 331) { // 331 User name okay, need password.
+ // Send the Password
+ if (WriteMsg(Tag,Msg,"PASS %s",Pass.c_str()) == false)
+ return false;
+ if (Tag >= 400)
+ return _error->Error(_("PASS failed, server said: %s"),Msg.c_str());
+ }
+
+ // Enter passive mode
+ if (_config->Exists("Acquire::FTP::Passive::" + ServerName.Host) == true)
+ TryPassive = _config->FindB("Acquire::FTP::Passive::" + ServerName.Host,true);
+ else
+ TryPassive = _config->FindB("Acquire::FTP::Passive",true);
+ }
+ else
+ {
+ // Read the initial response
+ if (ReadResp(Tag,Msg) == false)
+ return false;
+ if (Tag >= 400)
+ return _error->Error(_("The server refused the connection and said: %s"),Msg.c_str());
+
+ // Perform proxy script execution
+ Configuration::Item const *Opts = _config->Tree("Acquire::ftp::ProxyLogin");
+ if (Opts == 0 || Opts->Child == 0)
+ return _error->Error(_("A proxy server was specified but no login "
+ "script, Acquire::ftp::ProxyLogin is empty."));
+ Opts = Opts->Child;
+
+ // Iterate over the entire login script
+ for (; Opts != 0; Opts = Opts->Next)
+ {
+ if (Opts->Value.empty() == true)
+ continue;
+
+ // Substitute the variables into the command
+ string Tmp = Opts->Value;
+ Tmp = SubstVar(Tmp,"$(PROXY_USER)",Proxy.User);
+ Tmp = SubstVar(Tmp,"$(PROXY_PASS)",Proxy.Password);
+ Tmp = SubstVar(Tmp,"$(SITE_USER)",User);
+ Tmp = SubstVar(Tmp,"$(SITE_PASS)",Pass);
+ if (ServerName.Port != 0)
+ {
+ std::string SitePort;
+ strprintf(SitePort, "%u", ServerName.Port);
+ Tmp = SubstVar(Tmp,"$(SITE_PORT)", SitePort);
+ }
+ else
+ Tmp = SubstVar(Tmp,"$(SITE_PORT)", "21");
+ Tmp = SubstVar(Tmp,"$(SITE)",ServerName.Host);
+
+ // Send the command
+ if (WriteMsg(Tag,Msg,"%s",Tmp.c_str()) == false)
+ return false;
+ if (Tag >= 400)
+ return _error->Error(_("Login script command '%s' failed, server said: %s"),Tmp.c_str(),Msg.c_str());
+ }
+
+ // Enter passive mode
+ TryPassive = false;
+ if (_config->Exists("Acquire::FTP::Passive::" + ServerName.Host) == true)
+ TryPassive = _config->FindB("Acquire::FTP::Passive::" + ServerName.Host,true);
+ else
+ {
+ if (_config->Exists("Acquire::FTP::Proxy::Passive") == true)
+ TryPassive = _config->FindB("Acquire::FTP::Proxy::Passive",true);
+ else
+ TryPassive = _config->FindB("Acquire::FTP::Passive",true);
+ }
+ }
+
+ // Force the use of extended commands
+ if (_config->Exists("Acquire::FTP::ForceExtended::" + ServerName.Host) == true)
+ ForceExtended = _config->FindB("Acquire::FTP::ForceExtended::" + ServerName.Host,true);
+ else
+ ForceExtended = _config->FindB("Acquire::FTP::ForceExtended",false);
+
+ // Binary mode
+ if (WriteMsg(Tag,Msg,"TYPE I") == false)
+ return false;
+ if (Tag >= 400)
+ return _error->Error(_("TYPE failed, server said: %s"),Msg.c_str());
+
+ return true;
+}
+ /*}}}*/
+// FTPConn::ReadLine - Read a line from the server /*{{{*/
+// ---------------------------------------------------------------------
+/* This performs a very simple buffered read. */
+bool FTPConn::ReadLine(string &Text)
+{
+ if (ServerFd == -1)
+ return false;
+
+ // Suck in a line
+ while (Len < sizeof(Buffer))
+ {
+ // Scan the buffer for a new line
+ for (unsigned int I = 0; I != Len; I++)
+ {
+ // Escape some special chars
+ if (Buffer[I] == 0)
+ Buffer[I] = '?';
+
+ // End of line?
+ if (Buffer[I] != '\n')
+ continue;
+
+ I++;
+ Text = string(Buffer,I);
+ memmove(Buffer,Buffer+I,Len - I);
+ Len -= I;
+ return true;
+ }
+
+ // Wait for some data..
+ if (WaitFd(ServerFd,false,TimeOut) == false)
+ {
+ Close();
+ return _error->Error(_("Connection timeout"));
+ }
+
+ // Suck it back
+ int Res = read(ServerFd,Buffer + Len,sizeof(Buffer) - Len);
+ if (Res == 0)
+ _error->Error(_("Server closed the connection"));
+ if (Res <= 0)
+ {
+ _error->Errno("read",_("Read error"));
+ Close();
+ return false;
+ }
+ Len += Res;
+ }
+
+ return _error->Error(_("A response overflowed the buffer."));
+}
+ /*}}}*/
+// FTPConn::ReadResp - Read a full response from the server /*{{{*/
+// ---------------------------------------------------------------------
+/* This reads a reply code from the server, it handles both p */
+bool FTPConn::ReadResp(unsigned int &Ret,string &Text)
+{
+ // Grab the first line of the response
+ string Msg;
+ if (ReadLine(Msg) == false)
+ return false;
+
+ // Get the ID code
+ char *End;
+ Ret = strtol(Msg.c_str(),&End,10);
+ if (End - Msg.c_str() != 3)
+ return _error->Error(_("Protocol corruption"));
+
+ // All done ?
+ Text = Msg.c_str()+4;
+ if (*End == ' ')
+ {
+ if (Debug == true)
+ cerr << "<- '" << QuoteString(Text,"") << "'" << endl;
+ return true;
+ }
+
+ if (*End != '-')
+ return _error->Error(_("Protocol corruption"));
+
+ /* Okay, here we do the continued message trick. This is foolish, but
+ proftpd follows the protocol as specified and wu-ftpd doesn't, so
+ we filter. I wonder how many clients break if you use proftpd and
+ put a '- in the 3rd spot in the message? */
+ char Leader[4];
+ strncpy(Leader,Msg.c_str(),3);
+ Leader[3] = 0;
+ while (ReadLine(Msg) == true)
+ {
+ // Short, it must be using RFC continuation..
+ if (Msg.length() < 4)
+ {
+ Text += Msg;
+ continue;
+ }
+
+ // Oops, finished
+ if (strncmp(Msg.c_str(),Leader,3) == 0 && Msg[3] == ' ')
+ {
+ Text += Msg.c_str()+4;
+ break;
+ }
+
+ // This message has the wu-ftpd style reply code prefixed
+ if (strncmp(Msg.c_str(),Leader,3) == 0 && Msg[3] == '-')
+ {
+ Text += Msg.c_str()+4;
+ continue;
+ }
+
+ // Must be RFC style prefixing
+ Text += Msg;
+ }
+
+ if (Debug == true && _error->PendingError() == false)
+ cerr << "<- '" << QuoteString(Text,"") << "'" << endl;
+
+ return !_error->PendingError();
+}
+ /*}}}*/
+// FTPConn::WriteMsg - Send a message to the server /*{{{*/
+// ---------------------------------------------------------------------
+/* Simple printf like function.. */
+bool FTPConn::WriteMsg(unsigned int &Ret,string &Text,const char *Fmt,...)
+{
+ va_list args;
+ va_start(args,Fmt);
+
+ // sprintf the description
+ char S[400];
+ vsnprintf(S,sizeof(S) - 4,Fmt,args);
+ strcat(S,"\r\n");
+ va_end(args);
+
+ if (Debug == true)
+ cerr << "-> '" << QuoteString(S,"") << "'" << endl;
+
+ // Send it off
+ unsigned long Len = strlen(S);
+ unsigned long Start = 0;
+ while (Len != 0)
+ {
+ if (WaitFd(ServerFd,true,TimeOut) == false)
+ {
+ Close();
+ return _error->Error(_("Connection timeout"));
+ }
+
+ int Res = write(ServerFd,S + Start,Len);
+ if (Res <= 0)
+ {
+ _error->Errno("write",_("Write error"));
+ Close();
+ return false;
+ }
+
+ Len -= Res;
+ Start += Res;
+ }
+
+ return ReadResp(Ret,Text);
+}
+ /*}}}*/
+// FTPConn::GoPasv - Enter Passive mode /*{{{*/
+// ---------------------------------------------------------------------
+/* Try to enter passive mode, the return code does not indicate if passive
+ mode could or could not be established, only if there was a fatal error.
+ We have to enter passive mode every time we make a data connection :| */
+bool FTPConn::GoPasv()
+{
+ /* The PASV command only works on IPv4 sockets, even though it could
+ in theory suppory IPv6 via an all zeros reply */
+ if (((struct sockaddr *)&PeerAddr)->sa_family != AF_INET ||
+ ForceExtended == true)
+ return ExtGoPasv();
+
+ if (PasvAddr != 0)
+ freeaddrinfo(PasvAddr);
+ PasvAddr = 0;
+
+ // Try to enable pasv mode
+ unsigned int Tag;
+ string Msg;
+ if (WriteMsg(Tag,Msg,"PASV") == false)
+ return false;
+
+ // Unsupported function
+ string::size_type Pos = Msg.find('(');
+ if (Tag >= 400)
+ return true;
+
+ //wu-2.6.2(1) ftp server, returns
+ //227 Entering Passive Mode 193,219,28,140,150,111
+ //without parentheses, let's try to cope with it.
+ //wget(1) and ftp(1) can.
+ if (Pos == string::npos)
+ Pos = Msg.rfind(' ');
+ else
+ ++Pos;
+
+ // Still unsupported function
+ if (Pos == string::npos)
+ return true;
+
+ // Scan it
+ unsigned a0,a1,a2,a3,p0,p1;
+ if (sscanf(Msg.c_str() + Pos,"%u,%u,%u,%u,%u,%u",&a0,&a1,&a2,&a3,&p0,&p1) != 6)
+ return true;
+
+ /* Some evil servers return 0 to mean their addr. We can actually speak
+ to these servers natively using IPv6 */
+ if (a0 == 0 && a1 == 0 && a2 == 0 && a3 == 0)
+ {
+ // Get the IP in text form
+ char Name[NI_MAXHOST];
+ char Service[NI_MAXSERV];
+ getnameinfo((struct sockaddr *)&PeerAddr,PeerAddrLen,
+ Name,sizeof(Name),Service,sizeof(Service),
+ NI_NUMERICHOST|NI_NUMERICSERV);
+
+ struct addrinfo Hints;
+ memset(&Hints,0,sizeof(Hints));
+ Hints.ai_socktype = SOCK_STREAM;
+ Hints.ai_family = ((struct sockaddr *)&PeerAddr)->sa_family;
+ Hints.ai_flags |= AI_NUMERICHOST;
+
+ // Get a new passive address.
+ char Port[100];
+ snprintf(Port,sizeof(Port),"%u",(p0 << 8) + p1);
+ if (getaddrinfo(Name,Port,&Hints,&PasvAddr) != 0)
+ return true;
+ return true;
+ }
+
+ struct addrinfo Hints;
+ memset(&Hints,0,sizeof(Hints));
+ Hints.ai_socktype = SOCK_STREAM;
+ Hints.ai_family = AF_INET;
+ Hints.ai_flags |= AI_NUMERICHOST;
+
+ // Get a new passive address.
+ char Port[100];
+ snprintf(Port,sizeof(Port),"%u",(p0 << 8) + p1);
+ char Name[100];
+ snprintf(Name,sizeof(Name),"%u.%u.%u.%u",a0,a1,a2,a3);
+ if (getaddrinfo(Name,Port,&Hints,&PasvAddr) != 0)
+ return true;
+ return true;
+}
+ /*}}}*/
+// FTPConn::ExtGoPasv - Enter Extended Passive mode /*{{{*/
+// ---------------------------------------------------------------------
+/* Try to enter extended passive mode. See GoPasv above and RFC 2428 */
+bool FTPConn::ExtGoPasv()
+{
+ if (PasvAddr != 0)
+ freeaddrinfo(PasvAddr);
+ PasvAddr = 0;
+
+ // Try to enable pasv mode
+ unsigned int Tag;
+ string Msg;
+ if (WriteMsg(Tag,Msg,"EPSV") == false)
+ return false;
+
+ // Unsupported function
+ string::size_type Pos = Msg.find('(');
+ if (Tag >= 400 || Pos == string::npos)
+ return true;
+
+ // Scan it
+ string::const_iterator List[4];
+ unsigned Count = 0;
+ Pos++;
+ for (string::const_iterator I = Msg.begin() + Pos; I < Msg.end(); ++I)
+ {
+ if (*I != Msg[Pos])
+ continue;
+ if (Count >= 4)
+ return true;
+ List[Count++] = I;
+ }
+ if (Count != 4)
+ return true;
+
+ // Break it up ..
+ unsigned long Proto = 0;
+ unsigned long Port = 0;
+ string IP;
+ IP = string(List[1]+1,List[2]);
+ Port = atoi(string(List[2]+1,List[3]).c_str());
+ if (IP.empty() == false)
+ Proto = atoi(string(List[0]+1,List[1]).c_str());
+
+ if (Port == 0)
+ return false;
+
+ // String version of the port
+ char PStr[100];
+ snprintf(PStr,sizeof(PStr),"%lu",Port);
+
+ // Get the IP in text form
+ struct addrinfo Hints;
+ memset(&Hints,0,sizeof(Hints));
+ Hints.ai_socktype = SOCK_STREAM;
+ Hints.ai_flags |= AI_NUMERICHOST;
+
+ /* The RFC defined case, connect to the old IP/protocol using the
+ new port. */
+ if (IP.empty() == true)
+ {
+ // Get the IP in text form
+ char Name[NI_MAXHOST];
+ char Service[NI_MAXSERV];
+ getnameinfo((struct sockaddr *)&PeerAddr,PeerAddrLen,
+ Name,sizeof(Name),Service,sizeof(Service),
+ NI_NUMERICHOST|NI_NUMERICSERV);
+ IP = Name;
+ Hints.ai_family = ((struct sockaddr *)&PeerAddr)->sa_family;
+ }
+ else
+ {
+ // Get the family..
+ Hints.ai_family = 0;
+ for (unsigned J = 0; AFMap[J].Family != 0; J++)
+ if (AFMap[J].IETFFamily == Proto)
+ Hints.ai_family = AFMap[J].Family;
+ if (Hints.ai_family == 0)
+ return true;
+ }
+
+ // Get a new passive address.
+ if (getaddrinfo(IP.c_str(),PStr,&Hints,&PasvAddr) != 0)
+ return true;
+
+ return true;
+}
+ /*}}}*/
+// FTPConn::Size - Return the size of a file /*{{{*/
+// ---------------------------------------------------------------------
+/* Grab the file size from the server, 0 means no size or empty file */
+bool FTPConn::Size(const char *Path,unsigned long long &Size)
+{
+ // Query the size
+ unsigned int Tag;
+ string Msg;
+ Size = 0;
+ if (WriteMsg(Tag,Msg,"SIZE %s",Path) == false)
+ return false;
+
+ char *End;
+ Size = strtoull(Msg.c_str(),&End,10);
+ if (Tag >= 400 || End == Msg.c_str())
+ Size = 0;
+ return true;
+}
+ /*}}}*/
+// FTPConn::ModTime - Return the modification time of the file /*{{{*/
+// ---------------------------------------------------------------------
+/* Like Size no error is returned if the command is not supported. If the
+ command fails then time is set to the current time of day to fool
+ date checks. */
+bool FTPConn::ModTime(const char *Path, time_t &Time)
+{
+ Time = time(&Time);
+
+ // Query the mod time
+ unsigned int Tag;
+ string Msg;
+ if (WriteMsg(Tag,Msg,"MDTM %s",Path) == false)
+ return false;
+ if (Tag >= 400 || Msg.empty() == true || isdigit(Msg[0]) == 0)
+ return true;
+
+ // Parse it
+ return FTPMDTMStrToTime(Msg.c_str(), Time);
+}
+ /*}}}*/
+// FTPConn::CreateDataFd - Get a data connection /*{{{*/
+// ---------------------------------------------------------------------
+/* Create the data connection. Call FinalizeDataFd after this though.. */
+bool FTPConn::CreateDataFd()
+{
+ close(DataFd);
+ DataFd = -1;
+
+ // Attempt to enter passive mode.
+ if (TryPassive == true)
+ {
+ if (GoPasv() == false)
+ return false;
+
+ // Oops, didn't work out, don't bother trying again.
+ if (PasvAddr == 0)
+ TryPassive = false;
+ }
+
+ // Passive mode?
+ if (PasvAddr != 0)
+ {
+ // Get a socket
+ if ((DataFd = socket(PasvAddr->ai_family,PasvAddr->ai_socktype,
+ PasvAddr->ai_protocol)) < 0)
+ return _error->Errno("socket",_("Could not create a socket"));
+
+ // Connect to the server
+ SetNonBlock(DataFd,true);
+ if (connect(DataFd,PasvAddr->ai_addr,PasvAddr->ai_addrlen) < 0 &&
+ errno != EINPROGRESS)
+ return _error->Errno("socket",_("Could not create a socket"));
+
+ /* This implements a timeout for connect by opening the connection
+ nonblocking */
+ if (WaitFd(DataFd,true,TimeOut) == false)
+ return _error->Error(_("Could not connect data socket, connection timed out"));
+ unsigned int Err;
+ unsigned int Len = sizeof(Err);
+ if (getsockopt(DataFd,SOL_SOCKET,SO_ERROR,&Err,&Len) != 0)
+ return _error->Errno("getsockopt",_("Failed"));
+ if (Err != 0)
+ return _error->Error(_("Could not connect passive socket."));
+
+ return true;
+ }
+
+ // Port mode :<
+ close(DataListenFd);
+ DataListenFd = -1;
+
+ // Get the information for a listening socket.
+ struct addrinfo *BindAddr = NULL;
+ struct addrinfo Hints;
+ memset(&Hints,0,sizeof(Hints));
+ Hints.ai_socktype = SOCK_STREAM;
+ Hints.ai_flags |= AI_PASSIVE;
+ Hints.ai_family = ((struct sockaddr *)&ServerAddr)->sa_family;
+ if (getaddrinfo(0,"0",&Hints,&BindAddr) != 0 || BindAddr == NULL)
+ return _error->Error(_("getaddrinfo was unable to get a listening socket"));
+
+ // Construct the socket
+ if ((DataListenFd = socket(BindAddr->ai_family,BindAddr->ai_socktype,
+ BindAddr->ai_protocol)) < 0)
+ {
+ freeaddrinfo(BindAddr);
+ return _error->Errno("socket",_("Could not create a socket"));
+ }
+
+ // Bind and listen
+ if (::bind(DataListenFd,BindAddr->ai_addr,BindAddr->ai_addrlen) < 0)
+ {
+ freeaddrinfo(BindAddr);
+ return _error->Errno("bind",_("Could not bind a socket"));
+ }
+ freeaddrinfo(BindAddr);
+ if (listen(DataListenFd,1) < 0)
+ return _error->Errno("listen",_("Could not listen on the socket"));
+ SetNonBlock(DataListenFd,true);
+
+ // Determine the name to send to the remote
+ struct sockaddr_storage Addr;
+ socklen_t AddrLen = sizeof(Addr);
+ if (getsockname(DataListenFd,(sockaddr *)&Addr,&AddrLen) < 0)
+ return _error->Errno("getsockname",_("Could not determine the socket's name"));
+
+
+ // Reverse the address. We need the server address and the data port.
+ char Name[NI_MAXHOST];
+ char Service[NI_MAXSERV];
+ char Service2[NI_MAXSERV];
+ getnameinfo((struct sockaddr *)&Addr,AddrLen,
+ Name,sizeof(Name),Service,sizeof(Service),
+ NI_NUMERICHOST|NI_NUMERICSERV);
+ getnameinfo((struct sockaddr *)&ServerAddr,ServerAddrLen,
+ Name,sizeof(Name),Service2,sizeof(Service2),
+ NI_NUMERICHOST|NI_NUMERICSERV);
+
+ // Send off an IPv4 address in the old port format
+ if (((struct sockaddr *)&Addr)->sa_family == AF_INET &&
+ ForceExtended == false)
+ {
+ // Convert the dots in the quad into commas
+ for (char *I = Name; *I != 0; I++)
+ if (*I == '.')
+ *I = ',';
+ unsigned long Port = atoi(Service);
+
+ // Send the port command
+ unsigned int Tag;
+ string Msg;
+ if (WriteMsg(Tag,Msg,"PORT %s,%d,%d",
+ Name,
+ (int)(Port >> 8) & 0xff, (int)(Port & 0xff)) == false)
+ return false;
+ if (Tag >= 400)
+ return _error->Error(_("Unable to send PORT command"));
+ return true;
+ }
+
+ // Construct an EPRT command
+ unsigned Proto = 0;
+ for (unsigned J = 0; AFMap[J].Family != 0; J++)
+ if (AFMap[J].Family == ((struct sockaddr *)&Addr)->sa_family)
+ Proto = AFMap[J].IETFFamily;
+ if (Proto == 0)
+ return _error->Error(_("Unknown address family %u (AF_*)"),
+ ((struct sockaddr *)&Addr)->sa_family);
+
+ // Send the EPRT command
+ unsigned int Tag;
+ string Msg;
+ if (WriteMsg(Tag,Msg,"EPRT |%u|%s|%s|",Proto,Name,Service) == false)
+ return false;
+ if (Tag >= 400)
+ return _error->Error(_("EPRT failed, server said: %s"),Msg.c_str());
+ return true;
+}
+ /*}}}*/
+// FTPConn::Finalize - Complete the Data connection /*{{{*/
+// ---------------------------------------------------------------------
+/* If the connection is in port mode this waits for the other end to hook
+ up to us. */
+bool FTPConn::Finalize()
+{
+ // Passive mode? Do nothing
+ if (PasvAddr != 0)
+ return true;
+
+ // Close any old socket..
+ close(DataFd);
+ DataFd = -1;
+
+ // Wait for someone to connect..
+ if (WaitFd(DataListenFd,false,TimeOut) == false)
+ return _error->Error(_("Data socket connect timed out"));
+
+ // Accept the connection
+ struct sockaddr_in Addr;
+ socklen_t Len = sizeof(Addr);
+ DataFd = accept(DataListenFd,(struct sockaddr *)&Addr,&Len);
+ if (DataFd < 0)
+ return _error->Errno("accept",_("Unable to accept connection"));
+
+ close(DataListenFd);
+ DataListenFd = -1;
+
+ return true;
+}
+ /*}}}*/
+// FTPConn::Get - Get a file /*{{{*/
+// ---------------------------------------------------------------------
+/* This opens a data connection, sends REST and RETR and then
+ transfers the file over. */
+bool FTPConn::Get(const char *Path,FileFd &To,unsigned long long Resume,
+ Hashes &Hash,bool &Missing, unsigned long long MaximumSize,
+ pkgAcqMethod *Owner)
+{
+ Missing = false;
+ if (CreateDataFd() == false)
+ return false;
+
+ unsigned int Tag;
+ string Msg;
+ if (Resume != 0)
+ {
+ if (WriteMsg(Tag,Msg,"REST %u",Resume) == false)
+ return false;
+ if (Tag >= 400)
+ Resume = 0;
+ }
+
+ if (To.Truncate(Resume) == false)
+ return false;
+
+ if (To.Seek(0) == false)
+ return false;
+
+ if (Resume != 0)
+ {
+ if (Hash.AddFD(To,Resume) == false)
+ {
+ _error->Errno("read",_("Problem hashing file"));
+ return false;
+ }
+ }
+
+ // Send the get command
+ if (WriteMsg(Tag,Msg,"RETR %s",Path) == false)
+ return false;
+
+ if (Tag >= 400)
+ {
+ if (Tag == 550)
+ Missing = true;
+ return _error->Error(_("Unable to fetch file, server said '%s'"),Msg.c_str());
+ }
+
+ // Finish off the data connection
+ if (Finalize() == false)
+ return false;
+
+ // Copy loop
+ unsigned char Buffer[4096];
+ while (1)
+ {
+ // Wait for some data..
+ if (WaitFd(DataFd,false,TimeOut) == false)
+ {
+ Close();
+ return _error->Error(_("Data socket timed out"));
+ }
+
+ // Read the data..
+ int Res = read(DataFd,Buffer,sizeof(Buffer));
+ if (Res == 0)
+ break;
+ if (Res < 0)
+ {
+ if (errno == EAGAIN)
+ continue;
+ break;
+ }
+
+ Hash.Add(Buffer,Res);
+ if (To.Write(Buffer,Res) == false)
+ {
+ Close();
+ return false;
+ }
+
+ if (MaximumSize > 0 && To.Tell() > MaximumSize)
+ {
+ Owner->SetFailReason("MaximumSizeExceeded");
+ return _error->Error("Writing more data than expected (%llu > %llu)",
+ To.Tell(), MaximumSize);
+ }
+ }
+
+ // All done
+ close(DataFd);
+ DataFd = -1;
+
+ // Read the closing message from the server
+ if (ReadResp(Tag,Msg) == false)
+ return false;
+ if (Tag >= 400)
+ return _error->Error(_("Data transfer failed, server said '%s'"),Msg.c_str());
+ return true;
+}
+ /*}}}*/
+
+// FtpMethod::FtpMethod - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+FtpMethod::FtpMethod() : aptMethod("ftp","1.0",SendConfig)
+{
+ signal(SIGTERM,SigTerm);
+ signal(SIGINT,SigTerm);
+
+ Server = 0;
+ FailFd = -1;
+}
+ /*}}}*/
+// FtpMethod::SigTerm - Handle a fatal signal /*{{{*/
+// ---------------------------------------------------------------------
+/* This closes and timestamps the open file. This is necessary to get
+ resume behavior on user abort */
+void FtpMethod::SigTerm(int)
+{
+ if (FailFd == -1)
+ _exit(100);
+
+ // Timestamp
+ struct timeval times[2];
+ times[0].tv_sec = FailTime;
+ times[1].tv_sec = FailTime;
+ times[0].tv_usec = times[1].tv_usec = 0;
+ utimes(FailFile.c_str(), times);
+
+ close(FailFd);
+
+ _exit(100);
+}
+ /*}}}*/
+// FtpMethod::Configuration - Handle a configuration message /*{{{*/
+// ---------------------------------------------------------------------
+/* We stash the desired pipeline depth */
+bool FtpMethod::Configuration(string Message)
+{
+ if (aptMethod::Configuration(Message) == false)
+ return false;
+
+ TimeOut = _config->FindI("Acquire::Ftp::Timeout",TimeOut);
+
+ return true;
+}
+ /*}}}*/
+// FtpMethod::Fetch - Fetch a file /*{{{*/
+// ---------------------------------------------------------------------
+/* Fetch a single file, called by the base class.. */
+bool FtpMethod::Fetch(FetchItem *Itm)
+{
+ URI Get = Itm->Uri;
+ const char *File = Get.Path.c_str();
+ FetchResult Res;
+ Res.Filename = Itm->DestFile;
+ Res.IMSHit = false;
+
+ maybe_add_auth (Get, _config->FindFile("Dir::Etc::netrc"));
+
+ // Connect to the server
+ if (Server == 0 || Server->Comp(Get) == false)
+ {
+ delete Server;
+ Server = new FTPConn(Get);
+ }
+
+ // Could not connect is a transient error..
+ if (Server->Open(this) == false)
+ {
+ Server->Close();
+ Fail(true);
+ return true;
+ }
+
+ // Get the files information
+ Status(_("Query"));
+ unsigned long long Size;
+ if (Server->Size(File,Size) == false ||
+ Server->ModTime(File,FailTime) == false)
+ {
+ Fail(true);
+ return true;
+ }
+ Res.Size = Size;
+
+ // See if it is an IMS hit
+ if (Itm->LastModified == FailTime)
+ {
+ Res.Size = 0;
+ Res.IMSHit = true;
+ URIDone(Res);
+ return true;
+ }
+
+ // See if the file exists
+ struct stat Buf;
+ if (stat(Itm->DestFile.c_str(),&Buf) == 0)
+ {
+ if (Size == (unsigned long long)Buf.st_size && FailTime == Buf.st_mtime)
+ {
+ Res.Size = Buf.st_size;
+ Res.LastModified = Buf.st_mtime;
+ Res.ResumePoint = Buf.st_size;
+ URIDone(Res);
+ return true;
+ }
+
+ // Resume?
+ if (FailTime == Buf.st_mtime && Size > (unsigned long long)Buf.st_size)
+ Res.ResumePoint = Buf.st_size;
+ }
+
+ // Open the file
+ Hashes Hash(Itm->ExpectedHashes);
+ {
+ FileFd Fd(Itm->DestFile,FileFd::WriteAny);
+ if (_error->PendingError() == true)
+ return false;
+
+ URIStart(Res);
+
+ FailFile = Itm->DestFile;
+ FailFile.c_str(); // Make sure we don't do a malloc in the signal handler
+ FailFd = Fd.Fd();
+
+ bool Missing;
+ if (Server->Get(File,Fd,Res.ResumePoint,Hash,Missing,Itm->MaximumSize,this) == false)
+ {
+ Fd.Close();
+
+ // Timestamp
+ struct timeval times[2];
+ times[0].tv_sec = FailTime;
+ times[1].tv_sec = FailTime;
+ times[0].tv_usec = times[1].tv_usec = 0;
+ utimes(FailFile.c_str(), times);
+
+ // If the file is missing we hard fail and delete the destfile
+ // otherwise transient fail
+ if (Missing == true) {
+ RemoveFile("ftp", FailFile);
+ return false;
+ }
+ Fail(true);
+ return true;
+ }
+
+ Res.Size = Fd.Size();
+
+ // Timestamp
+ struct timeval times[2];
+ times[0].tv_sec = FailTime;
+ times[1].tv_sec = FailTime;
+ times[0].tv_usec = times[1].tv_usec = 0;
+ utimes(Fd.Name().c_str(), times);
+ FailFd = -1;
+ }
+
+ Res.LastModified = FailTime;
+ Res.TakeHashes(Hash);
+
+ URIDone(Res);
+
+ return true;
+}
+ /*}}}*/
+
+int main(int, const char *argv[])
+{
+ /* See if we should be come the http client - we do this for http
+ proxy urls */
+ if (getenv("ftp_proxy") != 0)
+ {
+ URI Proxy = string(getenv("ftp_proxy"));
+
+ // Run the HTTP method
+ if (Proxy.Access == "http")
+ {
+ // Copy over the environment setting
+ char S[300];
+ snprintf(S,sizeof(S),"http_proxy=%s",getenv("ftp_proxy"));
+ putenv(S);
+ putenv((char *)"no_proxy=");
+
+ // Run the http method
+ string Path = flNotFile(argv[0]) + "http";
+ execl(Path.c_str(),Path.c_str(),(char *)NULL);
+ cerr << _("Unable to invoke ") << Path << endl;
+ exit(100);
+ }
+ }
+ return FtpMethod().Run();
+}
diff --git a/methods/ftp.h b/methods/ftp.h
new file mode 100644
index 000000000..6a12475a0
--- /dev/null
+++ b/methods/ftp.h
@@ -0,0 +1,91 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/// $Id: ftp.h,v 1.4 2001/03/06 07:15:29 jgg Exp $
+// $Id: ftp.h,v 1.4 2001/03/06 07:15:29 jgg Exp $
+/* ######################################################################
+
+ FTP Acquire Method - This is the FTP acquire method for APT.
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef APT_FTP_H
+#define APT_FTP_H
+
+#include <apt-pkg/strutl.h>
+#include "aptmethod.h"
+
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <time.h>
+#include <string>
+
+class FTPConn
+{
+ char Buffer[1024*10];
+ unsigned long Len;
+ int ServerFd;
+ int DataFd;
+ int DataListenFd;
+ URI ServerName;
+ bool ForceExtended;
+ bool TryPassive;
+ bool Debug;
+
+ struct addrinfo *PasvAddr;
+
+ // Generic Peer Address
+ struct sockaddr_storage PeerAddr;
+ socklen_t PeerAddrLen;
+
+ // Generic Server Address (us)
+ struct sockaddr_storage ServerAddr;
+ socklen_t ServerAddrLen;
+
+ // Private helper functions
+ bool ReadLine(std::string &Text);
+ bool Login();
+ bool CreateDataFd();
+ bool Finalize();
+
+ public:
+
+ bool Comp(URI Other) {return Other.Host == ServerName.Host && Other.Port == ServerName.Port && Other.User == ServerName.User && Other.Password == ServerName.Password; };
+
+ // Raw connection IO
+ bool ReadResp(unsigned int &Ret,std::string &Text);
+ bool WriteMsg(unsigned int &Ret,std::string &Text,const char *Fmt,...);
+
+ // Connection control
+ bool Open(pkgAcqMethod *Owner);
+ void Close();
+ bool GoPasv();
+ bool ExtGoPasv();
+
+ // Query
+ bool Size(const char *Path,unsigned long long &Size);
+ bool ModTime(const char *Path, time_t &Time);
+ bool Get(const char *Path,FileFd &To,unsigned long long Resume,
+ Hashes &MD5,bool &Missing, unsigned long long MaximumSize,
+ pkgAcqMethod *Owner);
+
+ explicit FTPConn(URI Srv);
+ ~FTPConn();
+};
+
+class FtpMethod : public aptMethod
+{
+ virtual bool Fetch(FetchItem *Itm) APT_OVERRIDE;
+ virtual bool Configuration(std::string Message) APT_OVERRIDE;
+
+ FTPConn *Server;
+
+ static std::string FailFile;
+ static int FailFd;
+ static time_t FailTime;
+ static APT_NORETURN void SigTerm(int);
+
+ public:
+
+ FtpMethod();
+};
+
+#endif
diff --git a/methods/gpgv.cc b/methods/gpgv.cc
new file mode 100644
index 000000000..51c268d02
--- /dev/null
+++ b/methods/gpgv.cc
@@ -0,0 +1,452 @@
+#include <config.h>
+
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/gpgv.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/fileutl.h>
+#include "aptmethod.h"
+
+#include <ctype.h>
+#include <errno.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <array>
+#include <algorithm>
+#include <sstream>
+#include <iterator>
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <apti18n.h>
+
+using std::string;
+using std::vector;
+
+#define GNUPGPREFIX "[GNUPG:]"
+#define GNUPGBADSIG "[GNUPG:] BADSIG"
+#define GNUPGERRSIG "[GNUPG:] ERRSIG"
+#define GNUPGNOPUBKEY "[GNUPG:] NO_PUBKEY"
+#define GNUPGVALIDSIG "[GNUPG:] VALIDSIG"
+#define GNUPGGOODSIG "[GNUPG:] GOODSIG"
+#define GNUPGEXPKEYSIG "[GNUPG:] EXPKEYSIG"
+#define GNUPGEXPSIG "[GNUPG:] EXPSIG"
+#define GNUPGREVKEYSIG "[GNUPG:] REVKEYSIG"
+#define GNUPGNODATA "[GNUPG:] NODATA"
+#define APTKEYWARNING "[APTKEY:] WARNING"
+#define APTKEYERROR "[APTKEY:] ERROR"
+
+struct Digest {
+ enum class State {
+ Untrusted,
+ Weak,
+ Trusted,
+ } state;
+ char name[32];
+
+ State getState() const {
+ std::string optionUntrusted;
+ std::string optionWeak;
+ strprintf(optionUntrusted, "APT::Hashes::%s::Untrusted", name);
+ strprintf(optionWeak, "APT::Hashes::%s::Weak", name);
+ if (_config->FindB(optionUntrusted, false) == true)
+ return State::Untrusted;
+ if (_config->FindB(optionWeak, false) == true)
+ return State::Weak;
+
+ return state;
+ }
+};
+
+static constexpr Digest Digests[] = {
+ {Digest::State::Untrusted, "Invalid digest"},
+ {Digest::State::Untrusted, "MD5"},
+ {Digest::State::Untrusted, "SHA1"},
+ {Digest::State::Untrusted, "RIPE-MD/160"},
+ {Digest::State::Trusted, "Reserved digest"},
+ {Digest::State::Trusted, "Reserved digest"},
+ {Digest::State::Trusted, "Reserved digest"},
+ {Digest::State::Trusted, "Reserved digest"},
+ {Digest::State::Trusted, "SHA256"},
+ {Digest::State::Trusted, "SHA384"},
+ {Digest::State::Trusted, "SHA512"},
+ {Digest::State::Trusted, "SHA224"},
+};
+
+static Digest FindDigest(std::string const & Digest)
+{
+ int id = atoi(Digest.c_str());
+ if (id >= 0 && static_cast<unsigned>(id) < _count(Digests)) {
+ return Digests[id];
+ } else {
+ return Digests[0];
+ }
+}
+
+struct Signer {
+ std::string key;
+ std::string note;
+};
+static bool IsTheSameKey(std::string const &validsig, std::string const &goodsig) {
+ // VALIDSIG reports a keyid (40 = 24 + 16), GOODSIG is a longid (16) only
+ return validsig.compare(24, 16, goodsig, strlen("GOODSIG "), 16) == 0;
+}
+
+class GPGVMethod : public aptMethod
+{
+ private:
+ string VerifyGetSigners(const char *file, const char *outfile,
+ std::string const &key,
+ vector<string> &GoodSigners,
+ vector<string> &BadSigners,
+ vector<string> &WorthlessSigners,
+ vector<Signer> &SoonWorthlessSigners,
+ vector<string> &NoPubKeySigners);
+ protected:
+ virtual bool URIAcquire(std::string const &Message, FetchItem *Itm) APT_OVERRIDE;
+ public:
+ GPGVMethod() : aptMethod("gpgv","1.0",SingleInstance | SendConfig) {};
+};
+static void PushEntryWithKeyID(std::vector<std::string> &Signers, char * const buffer, bool const Debug)
+{
+ char * const msg = buffer + sizeof(GNUPGPREFIX);
+ char *p = msg;
+ // skip the message
+ while (*p && !isspace(*p))
+ ++p;
+ // skip the separator whitespace
+ ++p;
+ // skip the hexdigit fingerprint
+ while (*p && isxdigit(*p))
+ ++p;
+ // cut the rest from the message
+ *p = '\0';
+ if (Debug == true)
+ std::clog << "Got " << msg << " !" << std::endl;
+ Signers.push_back(msg);
+}
+static void PushEntryWithUID(std::vector<std::string> &Signers, char * const buffer, bool const Debug)
+{
+ std::string msg = buffer + sizeof(GNUPGPREFIX);
+ auto const nuke = msg.find_last_not_of("\n\t\r");
+ if (nuke != std::string::npos)
+ msg.erase(nuke + 1);
+ if (Debug == true)
+ std::clog << "Got " << msg << " !" << std::endl;
+ Signers.push_back(msg);
+}
+string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
+ std::string const &key,
+ vector<string> &GoodSigners,
+ vector<string> &BadSigners,
+ vector<string> &WorthlessSigners,
+ vector<Signer> &SoonWorthlessSigners,
+ vector<string> &NoPubKeySigners)
+{
+ bool const Debug = DebugEnabled();
+
+ if (Debug == true)
+ std::clog << "inside VerifyGetSigners" << std::endl;
+
+ int fd[2];
+ bool const keyIsID = (key.empty() == false && key[0] != '/');
+
+ if (pipe(fd) < 0)
+ return "Couldn't create pipe";
+
+ pid_t pid = fork();
+ if (pid < 0)
+ return string("Couldn't spawn new process") + strerror(errno);
+ else if (pid == 0)
+ ExecGPGV(outfile, file, 3, fd, (keyIsID ? "" : key));
+ close(fd[1]);
+
+ FILE *pipein = fdopen(fd[0], "r");
+
+ // Loop over the output of apt-key (which really is gnupg), and check the signatures.
+ std::vector<std::string> ValidSigners;
+ std::vector<std::string> ErrSigners;
+ size_t buffersize = 0;
+ char *buffer = NULL;
+ bool gotNODATA = false;
+ while (1)
+ {
+ if (getline(&buffer, &buffersize, pipein) == -1)
+ break;
+ if (Debug == true)
+ std::clog << "Read: " << buffer << std::endl;
+
+ // Push the data into three separate vectors, which
+ // we later concatenate. They're kept separate so
+ // if we improve the apt method communication stuff later
+ // it will be better.
+ if (strncmp(buffer, GNUPGBADSIG, sizeof(GNUPGBADSIG)-1) == 0)
+ PushEntryWithUID(BadSigners, buffer, Debug);
+ else if (strncmp(buffer, GNUPGERRSIG, sizeof(GNUPGERRSIG)-1) == 0)
+ PushEntryWithKeyID(ErrSigners, buffer, Debug);
+ else if (strncmp(buffer, GNUPGNOPUBKEY, sizeof(GNUPGNOPUBKEY)-1) == 0)
+ {
+ PushEntryWithKeyID(NoPubKeySigners, buffer, Debug);
+ ErrSigners.erase(std::remove_if(ErrSigners.begin(), ErrSigners.end(), [&](std::string const &errsig) {
+ return errsig.compare(strlen("ERRSIG "), 16, buffer, sizeof(GNUPGNOPUBKEY), 16) == 0; }), ErrSigners.end());
+ }
+ else if (strncmp(buffer, GNUPGNODATA, sizeof(GNUPGNODATA)-1) == 0)
+ gotNODATA = true;
+ else if (strncmp(buffer, GNUPGEXPKEYSIG, sizeof(GNUPGEXPKEYSIG)-1) == 0)
+ PushEntryWithUID(WorthlessSigners, buffer, Debug);
+ else if (strncmp(buffer, GNUPGEXPSIG, sizeof(GNUPGEXPSIG)-1) == 0)
+ PushEntryWithUID(WorthlessSigners, buffer, Debug);
+ else if (strncmp(buffer, GNUPGREVKEYSIG, sizeof(GNUPGREVKEYSIG)-1) == 0)
+ PushEntryWithUID(WorthlessSigners, buffer, Debug);
+ else if (strncmp(buffer, GNUPGGOODSIG, sizeof(GNUPGGOODSIG)-1) == 0)
+ PushEntryWithKeyID(GoodSigners, buffer, Debug);
+ else if (strncmp(buffer, GNUPGVALIDSIG, sizeof(GNUPGVALIDSIG)-1) == 0)
+ {
+ std::istringstream iss(buffer + sizeof(GNUPGVALIDSIG));
+ vector<string> tokens{std::istream_iterator<string>{iss},
+ std::istream_iterator<string>{}};
+ auto const sig = tokens[0];
+ // Reject weak digest algorithms
+ Digest digest = FindDigest(tokens[7]);
+ switch (digest.getState()) {
+ case Digest::State::Weak:
+ // Treat them like an expired key: For that a message about expiry
+ // is emitted, a VALIDSIG, but no GOODSIG.
+ SoonWorthlessSigners.push_back({sig, digest.name});
+ if (Debug == true)
+ std::clog << "Got weak VALIDSIG, key ID: " << sig << std::endl;
+ break;
+ case Digest::State::Untrusted:
+ // Treat them like an expired key: For that a message about expiry
+ // is emitted, a VALIDSIG, but no GOODSIG.
+ WorthlessSigners.push_back(sig);
+ GoodSigners.erase(std::remove_if(GoodSigners.begin(), GoodSigners.end(), [&](std::string const &goodsig) {
+ return IsTheSameKey(sig, goodsig); }), GoodSigners.end());
+ if (Debug == true)
+ std::clog << "Got untrusted VALIDSIG, key ID: " << sig << std::endl;
+ break;
+
+ case Digest::State::Trusted:
+ if (Debug == true)
+ std::clog << "Got trusted VALIDSIG, key ID: " << sig << std::endl;
+ break;
+ }
+
+ ValidSigners.push_back(sig);
+ }
+ else if (strncmp(buffer, APTKEYWARNING, sizeof(APTKEYWARNING)-1) == 0)
+ Warning("%s", buffer + sizeof(APTKEYWARNING));
+ else if (strncmp(buffer, APTKEYERROR, sizeof(APTKEYERROR)-1) == 0)
+ _error->Error("%s", buffer + sizeof(APTKEYERROR));
+ }
+ fclose(pipein);
+ free(buffer);
+ std::move(ErrSigners.begin(), ErrSigners.end(), std::back_inserter(WorthlessSigners));
+
+ // apt-key has a --keyid parameter, but this requires gpg, so we call it without it
+ // and instead check after the fact which keyids where used for verification
+ if (keyIsID == true)
+ {
+ if (Debug == true)
+ std::clog << "GoodSigs needs to be limited to keyid " << key << std::endl;
+ bool foundGood = false;
+ for (auto const &k: VectorizeString(key, ','))
+ {
+ if (std::find(ValidSigners.begin(), ValidSigners.end(), k) == ValidSigners.end())
+ continue;
+ // we look for GOODSIG here as well as an expired sig is a valid sig as well (but not a good one)
+ std::string const goodfingerprint = "GOODSIG " + k;
+ std::string const goodlongkeyid = "GOODSIG " + k.substr(24, 16);
+ foundGood = std::find(GoodSigners.begin(), GoodSigners.end(), goodfingerprint) != GoodSigners.end();
+ if (Debug == true)
+ std::clog << "Key " << k << " is valid sig, is " << goodfingerprint << " also a good one? " << (foundGood ? "yes" : "no") << std::endl;
+ std::string goodsig;
+ if (foundGood == false)
+ {
+ foundGood = std::find(GoodSigners.begin(), GoodSigners.end(), goodlongkeyid) != GoodSigners.end();
+ if (Debug == true)
+ std::clog << "Key " << k << " is valid sig, is " << goodlongkeyid << " also a good one? " << (foundGood ? "yes" : "no") << std::endl;
+ goodsig = goodlongkeyid;
+ }
+ else
+ goodsig = goodfingerprint;
+ if (foundGood == false)
+ continue;
+ std::copy(GoodSigners.begin(), GoodSigners.end(), std::back_insert_iterator<std::vector<std::string> >(NoPubKeySigners));
+ GoodSigners.clear();
+ GoodSigners.push_back(goodsig);
+ NoPubKeySigners.erase(
+ std::remove(NoPubKeySigners.begin(),
+ std::remove(NoPubKeySigners.begin(), NoPubKeySigners.end(), goodfingerprint),
+ goodlongkeyid),
+ NoPubKeySigners.end()
+ );
+ break;
+ }
+ if (foundGood == false)
+ {
+ std::copy(GoodSigners.begin(), GoodSigners.end(), std::back_insert_iterator<std::vector<std::string> >(NoPubKeySigners));
+ GoodSigners.clear();
+ }
+ }
+
+ int status;
+ waitpid(pid, &status, 0);
+ if (Debug == true)
+ {
+ ioprintf(std::clog, "gpgv exited with status %i\n", WEXITSTATUS(status));
+ }
+
+ if (Debug)
+ {
+ std::cerr << "Summary:" << std::endl << " Good: ";
+ std::copy(GoodSigners.begin(), GoodSigners.end(), std::ostream_iterator<std::string>(std::cerr, ", "));
+ std::cerr << std::endl << " Bad: ";
+ std::copy(BadSigners.begin(), BadSigners.end(), std::ostream_iterator<std::string>(std::cerr, ", "));
+ std::cerr << std::endl << " Worthless: ";
+ std::copy(WorthlessSigners.begin(), WorthlessSigners.end(), std::ostream_iterator<std::string>(std::cerr, ", "));
+ std::cerr << std::endl << " SoonWorthless: ";
+ std::for_each(SoonWorthlessSigners.begin(), SoonWorthlessSigners.end(), [](Signer const &sig) { std::cerr << sig.key << ", "; });
+ std::cerr << std::endl << " NoPubKey: ";
+ std::copy(NoPubKeySigners.begin(), NoPubKeySigners.end(), std::ostream_iterator<std::string>(std::cerr, ", "));
+ std::cerr << std::endl << " NODATA: " << (gotNODATA ? "yes" : "no") << std::endl;
+ }
+
+ if (WEXITSTATUS(status) == 112)
+ {
+ // acquire system checks for "NODATA" to generate GPG errors (the others are only warnings)
+ std::string errmsg;
+ //TRANSLATORS: %s is a single techy word like 'NODATA'
+ strprintf(errmsg, _("Clearsigned file isn't valid, got '%s' (does the network require authentication?)"), "NODATA");
+ return errmsg;
+ }
+ else if (gotNODATA)
+ {
+ // acquire system checks for "NODATA" to generate GPG errors (the others are only warnings)
+ std::string errmsg;
+ //TRANSLATORS: %s is a single techy word like 'NODATA'
+ strprintf(errmsg, _("Signed file isn't valid, got '%s' (does the network require authentication?)"), "NODATA");
+ return errmsg;
+ }
+ else if (WEXITSTATUS(status) == 0)
+ {
+ if (keyIsID)
+ {
+ // gpgv will report success, but we want to enforce a certain keyring
+ // so if we haven't found the key the valid we found is in fact invalid
+ if (GoodSigners.empty())
+ return _("At least one invalid signature was encountered.");
+ }
+ else
+ {
+ if (GoodSigners.empty())
+ return _("Internal error: Good signature, but could not determine key fingerprint?!");
+ }
+ return "";
+ }
+ else if (WEXITSTATUS(status) == 1)
+ return _("At least one invalid signature was encountered.");
+ else if (WEXITSTATUS(status) == 111)
+ return _("Could not execute 'apt-key' to verify signature (is gnupg installed?)");
+ else
+ return _("Unknown error executing apt-key");
+}
+
+bool GPGVMethod::URIAcquire(std::string const &Message, FetchItem *Itm)
+{
+ URI const Get = Itm->Uri;
+ string const Path = Get.Host + Get.Path; // To account for relative paths
+ std::string const key = LookupTag(Message, "Signed-By");
+ vector<string> GoodSigners;
+ vector<string> BadSigners;
+ // a worthless signature is a expired or revoked one
+ vector<string> WorthlessSigners;
+ vector<Signer> SoonWorthlessSigners;
+ vector<string> NoPubKeySigners;
+
+ FetchResult Res;
+ Res.Filename = Itm->DestFile;
+ URIStart(Res);
+
+ // Run apt-key on file, extract contents and get the key ID of the signer
+ string const msg = VerifyGetSigners(Path.c_str(), Itm->DestFile.c_str(), key,
+ GoodSigners, BadSigners, WorthlessSigners,
+ SoonWorthlessSigners, NoPubKeySigners);
+ if (_error->PendingError())
+ return false;
+
+ // Check if all good signers are soon worthless and warn in that case
+ if (std::all_of(GoodSigners.begin(), GoodSigners.end(), [&](std::string const &good) {
+ return std::any_of(SoonWorthlessSigners.begin(), SoonWorthlessSigners.end(), [&](Signer const &weak) {
+ return IsTheSameKey(weak.key, good);
+ });
+ }))
+ {
+ for (auto const & Signer : SoonWorthlessSigners)
+ // TRANSLATORS: The second %s is the reason and is untranslated for repository owners.
+ Warning(_("Signature by key %s uses weak digest algorithm (%s)"), Signer.key.c_str(), Signer.note.c_str());
+ }
+
+ if (GoodSigners.empty() || !BadSigners.empty() || !NoPubKeySigners.empty())
+ {
+ string errmsg;
+ // In this case, something bad probably happened, so we just go
+ // with what the other method gave us for an error message.
+ if (BadSigners.empty() && WorthlessSigners.empty() && NoPubKeySigners.empty())
+ errmsg = msg;
+ else
+ {
+ if (!BadSigners.empty())
+ {
+ errmsg += _("The following signatures were invalid:\n");
+ for (vector<string>::iterator I = BadSigners.begin();
+ I != BadSigners.end(); ++I)
+ errmsg += (*I + "\n");
+ }
+ if (!WorthlessSigners.empty())
+ {
+ errmsg += _("The following signatures were invalid:\n");
+ for (vector<string>::iterator I = WorthlessSigners.begin();
+ I != WorthlessSigners.end(); ++I)
+ errmsg += (*I + "\n");
+ }
+ if (!NoPubKeySigners.empty())
+ {
+ errmsg += _("The following signatures couldn't be verified because the public key is not available:\n");
+ for (vector<string>::iterator I = NoPubKeySigners.begin();
+ I != NoPubKeySigners.end(); ++I)
+ errmsg += (*I + "\n");
+ }
+ }
+ // this is only fatal if we have no good sigs or if we have at
+ // least one bad signature. good signatures and NoPubKey signatures
+ // happen easily when a file is signed with multiple signatures
+ if(GoodSigners.empty() or !BadSigners.empty())
+ return _error->Error("%s", errmsg.c_str());
+ }
+
+ // Just pass the raw output up, because passing it as a real data
+ // structure is too difficult with the method stuff. We keep it
+ // as three separate vectors for future extensibility.
+ Res.GPGVOutput = GoodSigners;
+ std::move(BadSigners.begin(), BadSigners.end(), std::back_inserter(Res.GPGVOutput));
+ std::move(NoPubKeySigners.begin(), NoPubKeySigners.end(), std::back_inserter(Res.GPGVOutput));
+ URIDone(Res);
+
+ if (DebugEnabled())
+ std::clog << "apt-key succeeded\n";
+
+ return true;
+}
+
+
+int main()
+{
+ return GPGVMethod().Run();
+}
diff --git a/methods/http.cc b/methods/http.cc
new file mode 100644
index 000000000..9f5959548
--- /dev/null
+++ b/methods/http.cc
@@ -0,0 +1,1014 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: http.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $
+/* ######################################################################
+
+ HTTP Acquire Method - This is the HTTP acquire method for APT.
+
+ It uses HTTP/1.1 and many of the fancy options there-in, such as
+ pipelining, range, if-range and so on.
+
+ It is based on a doubly buffered select loop. A groupe of requests are
+ fed into a single output buffer that is constantly fed out the
+ socket. This provides ideal pipelining as in many cases all of the
+ requests will fit into a single packet. The input socket is buffered
+ the same way and fed into the fd for the file (may be a pipe in future).
+
+ This double buffering provides fairly substantial transfer rates,
+ compared to wget the http method is about 4% faster. Most importantly,
+ when HTTP is compared with FTP as a protocol the speed difference is
+ huge. In tests over the internet from two sites to llug (via ATM) this
+ program got 230k/s sustained http transfer rates. FTP on the other
+ hand topped out at 170k/s. That combined with the time to setup the
+ FTP connection makes HTTP a vastly superior protocol.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/hashes.h>
+#include <apt-pkg/netrc.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/proxy.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <sys/select.h>
+#include <cstring>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <arpa/inet.h>
+#include <iostream>
+#include <sstream>
+
+#include "config.h"
+#include "connect.h"
+#include "http.h"
+
+#include <apti18n.h>
+ /*}}}*/
+using namespace std;
+
+unsigned long long CircleBuf::BwReadLimit=0;
+unsigned long long CircleBuf::BwTickReadData=0;
+struct timeval CircleBuf::BwReadTick={0,0};
+const unsigned int CircleBuf::BW_HZ=10;
+
+// CircleBuf::CircleBuf - Circular input buffer /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+CircleBuf::CircleBuf(HttpMethod const * const Owner, unsigned long long Size)
+ : Size(Size), Hash(NULL), TotalWriten(0)
+{
+ Buf = new unsigned char[Size];
+ Reset();
+
+ CircleBuf::BwReadLimit = Owner->ConfigFindI("Dl-Limit", 0) * 1024;
+}
+ /*}}}*/
+// CircleBuf::Reset - Reset to the default state /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+void CircleBuf::Reset()
+{
+ InP = 0;
+ OutP = 0;
+ StrPos = 0;
+ TotalWriten = 0;
+ MaxGet = (unsigned long long)-1;
+ OutQueue = string();
+ if (Hash != NULL)
+ {
+ delete Hash;
+ Hash = NULL;
+ }
+}
+ /*}}}*/
+// CircleBuf::Read - Read from a FD into the circular buffer /*{{{*/
+// ---------------------------------------------------------------------
+/* This fills up the buffer with as much data as is in the FD, assuming it
+ is non-blocking.. */
+bool CircleBuf::Read(int Fd)
+{
+ while (1)
+ {
+ // Woops, buffer is full
+ if (InP - OutP == Size)
+ return true;
+
+ // what's left to read in this tick
+ unsigned long long const BwReadMax = CircleBuf::BwReadLimit/BW_HZ;
+
+ if(CircleBuf::BwReadLimit) {
+ struct timeval now;
+ gettimeofday(&now,0);
+
+ unsigned long long d = (now.tv_sec-CircleBuf::BwReadTick.tv_sec)*1000000 +
+ now.tv_usec-CircleBuf::BwReadTick.tv_usec;
+ if(d > 1000000/BW_HZ) {
+ CircleBuf::BwReadTick = now;
+ CircleBuf::BwTickReadData = 0;
+ }
+
+ if(CircleBuf::BwTickReadData >= BwReadMax) {
+ usleep(1000000/BW_HZ);
+ return true;
+ }
+ }
+
+ // Write the buffer segment
+ ssize_t Res;
+ if(CircleBuf::BwReadLimit) {
+ Res = read(Fd,Buf + (InP%Size),
+ BwReadMax > LeftRead() ? LeftRead() : BwReadMax);
+ } else
+ Res = read(Fd,Buf + (InP%Size),LeftRead());
+
+ if(Res > 0 && BwReadLimit > 0)
+ CircleBuf::BwTickReadData += Res;
+
+ if (Res == 0)
+ return false;
+ if (Res < 0)
+ {
+ if (errno == EAGAIN)
+ return true;
+ return false;
+ }
+
+ if (InP == 0)
+ gettimeofday(&Start,0);
+ InP += Res;
+ }
+}
+ /*}}}*/
+// CircleBuf::Read - Put the string into the buffer /*{{{*/
+// ---------------------------------------------------------------------
+/* This will hold the string in and fill the buffer with it as it empties */
+bool CircleBuf::Read(string const &Data)
+{
+ OutQueue.append(Data);
+ FillOut();
+ return true;
+}
+ /*}}}*/
+// CircleBuf::FillOut - Fill the buffer from the output queue /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+void CircleBuf::FillOut()
+{
+ if (OutQueue.empty() == true)
+ return;
+ while (1)
+ {
+ // Woops, buffer is full
+ if (InP - OutP == Size)
+ return;
+
+ // Write the buffer segment
+ unsigned long long Sz = LeftRead();
+ if (OutQueue.length() - StrPos < Sz)
+ Sz = OutQueue.length() - StrPos;
+ memcpy(Buf + (InP%Size),OutQueue.c_str() + StrPos,Sz);
+
+ // Advance
+ StrPos += Sz;
+ InP += Sz;
+ if (OutQueue.length() == StrPos)
+ {
+ StrPos = 0;
+ OutQueue = "";
+ return;
+ }
+ }
+}
+ /*}}}*/
+// CircleBuf::Write - Write from the buffer into a FD /*{{{*/
+// ---------------------------------------------------------------------
+/* This empties the buffer into the FD. */
+bool CircleBuf::Write(int Fd)
+{
+ while (1)
+ {
+ FillOut();
+
+ // Woops, buffer is empty
+ if (OutP == InP)
+ return true;
+
+ if (OutP == MaxGet)
+ return true;
+
+ // Write the buffer segment
+ ssize_t Res;
+ Res = write(Fd,Buf + (OutP%Size),LeftWrite());
+
+ if (Res == 0)
+ return false;
+ if (Res < 0)
+ {
+ if (errno == EAGAIN)
+ return true;
+
+ return false;
+ }
+
+ TotalWriten += Res;
+
+ if (Hash != NULL)
+ Hash->Add(Buf + (OutP%Size),Res);
+
+ OutP += Res;
+ }
+}
+ /*}}}*/
+// CircleBuf::WriteTillEl - Write from the buffer to a string /*{{{*/
+// ---------------------------------------------------------------------
+/* This copies till the first empty line */
+bool CircleBuf::WriteTillEl(string &Data,bool Single)
+{
+ // We cheat and assume it is unneeded to have more than one buffer load
+ for (unsigned long long I = OutP; I < InP; I++)
+ {
+ if (Buf[I%Size] != '\n')
+ continue;
+ ++I;
+
+ if (Single == false)
+ {
+ if (I < InP && Buf[I%Size] == '\r')
+ ++I;
+ if (I >= InP || Buf[I%Size] != '\n')
+ continue;
+ ++I;
+ }
+
+ Data = "";
+ while (OutP < I)
+ {
+ unsigned long long Sz = LeftWrite();
+ if (Sz == 0)
+ return false;
+ if (I - OutP < Sz)
+ Sz = I - OutP;
+ Data += string((char *)(Buf + (OutP%Size)),Sz);
+ OutP += Sz;
+ }
+ return true;
+ }
+ return false;
+}
+ /*}}}*/
+// CircleBuf::Stats - Print out stats information /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+void CircleBuf::Stats()
+{
+ if (InP == 0)
+ return;
+
+ struct timeval Stop;
+ gettimeofday(&Stop,0);
+/* float Diff = Stop.tv_sec - Start.tv_sec +
+ (float)(Stop.tv_usec - Start.tv_usec)/1000000;
+ clog << "Got " << InP << " in " << Diff << " at " << InP/Diff << endl;*/
+}
+ /*}}}*/
+CircleBuf::~CircleBuf()
+{
+ delete [] Buf;
+ delete Hash;
+}
+
+// HttpServerState::HttpServerState - Constructor /*{{{*/
+HttpServerState::HttpServerState(URI Srv,HttpMethod *Owner) : ServerState(Srv, Owner), In(Owner, 64*1024), Out(Owner, 4*1024)
+{
+ TimeOut = Owner->ConfigFindI("Timeout", TimeOut);
+ Reset();
+}
+ /*}}}*/
+// HttpServerState::Open - Open a connection to the server /*{{{*/
+// ---------------------------------------------------------------------
+/* This opens a connection to the server. */
+static bool TalkToSocksProxy(int const ServerFd, std::string const &Proxy,
+ char const * const type, bool const ReadWrite, uint8_t * const ToFrom,
+ unsigned int const Size, unsigned int const Timeout)
+{
+ if (WaitFd(ServerFd, ReadWrite, Timeout) == false)
+ return _error->Error("Waiting for the SOCKS proxy %s to %s timed out", URI::SiteOnly(Proxy).c_str(), type);
+ if (ReadWrite == false)
+ {
+ if (FileFd::Read(ServerFd, ToFrom, Size) == false)
+ return _error->Error("Reading the %s from SOCKS proxy %s failed", type, URI::SiteOnly(Proxy).c_str());
+ }
+ else
+ {
+ if (FileFd::Write(ServerFd, ToFrom, Size) == false)
+ return _error->Error("Writing the %s to SOCKS proxy %s failed", type, URI::SiteOnly(Proxy).c_str());
+ }
+ return true;
+}
+bool HttpServerState::Open()
+{
+ // Use the already open connection if possible.
+ if (ServerFd != -1)
+ return true;
+
+ Close();
+ In.Reset();
+ Out.Reset();
+ Persistent = true;
+
+ // Determine the proxy setting
+ AutoDetectProxy(ServerName);
+ string SpecificProxy = Owner->ConfigFind("Proxy::" + ServerName.Host, "");
+ if (!SpecificProxy.empty())
+ {
+ if (SpecificProxy == "DIRECT")
+ Proxy = "";
+ else
+ Proxy = SpecificProxy;
+ }
+ else
+ {
+ string DefProxy = Owner->ConfigFind("Proxy", "");
+ if (!DefProxy.empty())
+ {
+ Proxy = DefProxy;
+ }
+ else
+ {
+ char* result = getenv("http_proxy");
+ Proxy = result ? result : "";
+ }
+ }
+
+ // Parse no_proxy, a , separated list of domains
+ if (getenv("no_proxy") != 0)
+ {
+ if (CheckDomainList(ServerName.Host,getenv("no_proxy")) == true)
+ Proxy = "";
+ }
+
+ if (Proxy.empty() == false)
+ Owner->AddProxyAuth(Proxy, ServerName);
+
+ if (Proxy.Access == "socks5h")
+ {
+ if (Connect(Proxy.Host, Proxy.Port, "socks", 1080, ServerFd, TimeOut, Owner) == false)
+ return false;
+
+ /* We implement a very basic SOCKS5 client here complying mostly to RFC1928 expect
+ * for not offering GSSAPI auth which is a must (we only do no or user/pass auth).
+ * We also expect the SOCKS5 server to do hostname lookup (aka socks5h) */
+ std::string const ProxyInfo = URI::SiteOnly(Proxy);
+ Owner->Status(_("Connecting to %s (%s)"),"SOCKS5h proxy",ProxyInfo.c_str());
+ auto const Timeout = Owner->ConfigFindI("TimeOut", 120);
+ #define APT_WriteOrFail(TYPE, DATA, LENGTH) if (TalkToSocksProxy(ServerFd, ProxyInfo, TYPE, true, DATA, LENGTH, Timeout) == false) return false
+ #define APT_ReadOrFail(TYPE, DATA, LENGTH) if (TalkToSocksProxy(ServerFd, ProxyInfo, TYPE, false, DATA, LENGTH, Timeout) == false) return false
+ if (ServerName.Host.length() > 255)
+ return _error->Error("Can't use SOCKS5h as hostname %s is too long!", ServerName.Host.c_str());
+ if (Proxy.User.length() > 255 || Proxy.Password.length() > 255)
+ return _error->Error("Can't use user&pass auth as they are too long (%lu and %lu) for the SOCKS5!", Proxy.User.length(), Proxy.Password.length());
+ if (Proxy.User.empty())
+ {
+ uint8_t greeting[] = { 0x05, 0x01, 0x00 };
+ APT_WriteOrFail("greet-1", greeting, sizeof(greeting));
+ }
+ else
+ {
+ uint8_t greeting[] = { 0x05, 0x02, 0x00, 0x02 };
+ APT_WriteOrFail("greet-2", greeting, sizeof(greeting));
+ }
+ uint8_t greeting[2];
+ APT_ReadOrFail("greet back", greeting, sizeof(greeting));
+ if (greeting[0] != 0x05)
+ return _error->Error("SOCKS proxy %s greets back with wrong version: %d", ProxyInfo.c_str(), greeting[0]);
+ if (greeting[1] == 0x00)
+ ; // no auth has no method-dependent sub-negotiations
+ else if (greeting[1] == 0x02)
+ {
+ if (Proxy.User.empty())
+ return _error->Error("SOCKS proxy %s negotiated user&pass auth, but we had not offered it!", ProxyInfo.c_str());
+ // user&pass auth sub-negotiations are defined by RFC1929
+ std::vector<uint8_t> auth = {{ 0x01, static_cast<uint8_t>(Proxy.User.length()) }};
+ std::copy(Proxy.User.begin(), Proxy.User.end(), std::back_inserter(auth));
+ auth.push_back(static_cast<uint8_t>(Proxy.Password.length()));
+ std::copy(Proxy.Password.begin(), Proxy.Password.end(), std::back_inserter(auth));
+ APT_WriteOrFail("user&pass auth", auth.data(), auth.size());
+ uint8_t authstatus[2];
+ APT_ReadOrFail("auth report", authstatus, sizeof(authstatus));
+ if (authstatus[0] != 0x01)
+ return _error->Error("SOCKS proxy %s auth status response with wrong version: %d", ProxyInfo.c_str(), authstatus[0]);
+ if (authstatus[1] != 0x00)
+ return _error->Error("SOCKS proxy %s reported authorization failure: username or password incorrect? (%d)", ProxyInfo.c_str(), authstatus[1]);
+ }
+ else
+ return _error->Error("SOCKS proxy %s greets back having not found a common authorization method: %d", ProxyInfo.c_str(), greeting[1]);
+ union { uint16_t * i; uint8_t * b; } portu;
+ uint16_t port = htons(static_cast<uint16_t>(ServerName.Port == 0 ? 80 : ServerName.Port));
+ portu.i = &port;
+ std::vector<uint8_t> request = {{ 0x05, 0x01, 0x00, 0x03, static_cast<uint8_t>(ServerName.Host.length()) }};
+ std::copy(ServerName.Host.begin(), ServerName.Host.end(), std::back_inserter(request));
+ request.push_back(portu.b[0]);
+ request.push_back(portu.b[1]);
+ APT_WriteOrFail("request", request.data(), request.size());
+ uint8_t response[4];
+ APT_ReadOrFail("first part of response", response, sizeof(response));
+ if (response[0] != 0x05)
+ return _error->Error("SOCKS proxy %s response with wrong version: %d", ProxyInfo.c_str(), response[0]);
+ if (response[2] != 0x00)
+ return _error->Error("SOCKS proxy %s has unexpected non-zero reserved field value: %d", ProxyInfo.c_str(), response[2]);
+ std::string bindaddr;
+ if (response[3] == 0x01) // IPv4 address
+ {
+ uint8_t ip4port[6];
+ APT_ReadOrFail("IPv4+Port of response", ip4port, sizeof(ip4port));
+ portu.b[0] = ip4port[4];
+ portu.b[1] = ip4port[5];
+ port = ntohs(*portu.i);
+ strprintf(bindaddr, "%d.%d.%d.%d:%d", ip4port[0], ip4port[1], ip4port[2], ip4port[3], port);
+ }
+ else if (response[3] == 0x03) // hostname
+ {
+ uint8_t namelength;
+ APT_ReadOrFail("hostname length of response", &namelength, 1);
+ uint8_t hostname[namelength + 2];
+ APT_ReadOrFail("hostname of response", hostname, sizeof(hostname));
+ portu.b[0] = hostname[namelength];
+ portu.b[1] = hostname[namelength + 1];
+ port = ntohs(*portu.i);
+ hostname[namelength] = '\0';
+ strprintf(bindaddr, "%s:%d", hostname, port);
+ }
+ else if (response[3] == 0x04) // IPv6 address
+ {
+ uint8_t ip6port[18];
+ APT_ReadOrFail("IPv6+port of response", ip6port, sizeof(ip6port));
+ portu.b[0] = ip6port[16];
+ portu.b[1] = ip6port[17];
+ port = ntohs(*portu.i);
+ strprintf(bindaddr, "[%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X]:%d",
+ ip6port[0], ip6port[1], ip6port[2], ip6port[3], ip6port[4], ip6port[5], ip6port[6], ip6port[7],
+ ip6port[8], ip6port[9], ip6port[10], ip6port[11], ip6port[12], ip6port[13], ip6port[14], ip6port[15],
+ port);
+ }
+ else
+ return _error->Error("SOCKS proxy %s destination address is of unknown type: %d",
+ ProxyInfo.c_str(), response[3]);
+ if (response[1] != 0x00)
+ {
+ char const * errstr = nullptr;
+ auto errcode = response[1];
+ // Tor error reporting can be a bit arcane, lets try to detect & fix it up
+ if (bindaddr == "0.0.0.0:0")
+ {
+ auto const lastdot = ServerName.Host.rfind('.');
+ if (lastdot == std::string::npos || ServerName.Host.substr(lastdot) != ".onion")
+ ;
+ else if (errcode == 0x01)
+ {
+ auto const prevdot = ServerName.Host.rfind('.', lastdot - 1);
+ if (lastdot == 16 && prevdot == std::string::npos)
+ ; // valid .onion address
+ else if (prevdot != std::string::npos && (lastdot - prevdot) == 17)
+ ; // valid .onion address with subdomain(s)
+ else
+ {
+ errstr = "Invalid hostname: onion service name must be 16 characters long";
+ Owner->SetFailReason("SOCKS");
+ }
+ }
+ // in all likelihood the service is either down or the address has
+ // a typo and so "Host unreachable" is the better understood error
+ // compared to the technically correct "TLL expired".
+ else if (errcode == 0x06)
+ errcode = 0x04;
+ }
+ if (errstr == nullptr)
+ {
+ switch (errcode)
+ {
+ case 0x01: errstr = "general SOCKS server failure"; Owner->SetFailReason("SOCKS"); break;
+ case 0x02: errstr = "connection not allowed by ruleset"; Owner->SetFailReason("SOCKS"); break;
+ case 0x03: errstr = "Network unreachable"; Owner->SetFailReason("ConnectionTimedOut"); break;
+ case 0x04: errstr = "Host unreachable"; Owner->SetFailReason("ConnectionTimedOut"); break;
+ case 0x05: errstr = "Connection refused"; Owner->SetFailReason("ConnectionRefused"); break;
+ case 0x06: errstr = "TTL expired"; Owner->SetFailReason("Timeout"); break;
+ case 0x07: errstr = "Command not supported"; Owner->SetFailReason("SOCKS"); break;
+ case 0x08: errstr = "Address type not supported"; Owner->SetFailReason("SOCKS"); break;
+ default: errstr = "Unknown error"; Owner->SetFailReason("SOCKS"); break;
+ }
+ }
+ return _error->Error("SOCKS proxy %s could not connect to %s (%s) due to: %s (%d)",
+ ProxyInfo.c_str(), ServerName.Host.c_str(), bindaddr.c_str(), errstr, response[1]);
+ }
+ else if (Owner->DebugEnabled())
+ ioprintf(std::clog, "http: SOCKS proxy %s connection established to %s (%s)\n",
+ ProxyInfo.c_str(), ServerName.Host.c_str(), bindaddr.c_str());
+
+ if (WaitFd(ServerFd, true, Timeout) == false)
+ return _error->Error("SOCKS proxy %s reported connection to %s (%s), but timed out",
+ ProxyInfo.c_str(), ServerName.Host.c_str(), bindaddr.c_str());
+ #undef APT_ReadOrFail
+ #undef APT_WriteOrFail
+ }
+ else
+ {
+ // Determine what host and port to use based on the proxy settings
+ int Port = 0;
+ string Host;
+ if (Proxy.empty() == true || Proxy.Host.empty() == true)
+ {
+ if (ServerName.Port != 0)
+ Port = ServerName.Port;
+ Host = ServerName.Host;
+ }
+ else if (Proxy.Access != "http")
+ return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str());
+ else
+ {
+ if (Proxy.Port != 0)
+ Port = Proxy.Port;
+ Host = Proxy.Host;
+ }
+ return Connect(Host,Port,"http",80,ServerFd,TimeOut,Owner);
+ }
+ return true;
+}
+ /*}}}*/
+// HttpServerState::Close - Close a connection to the server /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool HttpServerState::Close()
+{
+ close(ServerFd);
+ ServerFd = -1;
+ return true;
+}
+ /*}}}*/
+// HttpServerState::RunData - Transfer the data from the socket /*{{{*/
+bool HttpServerState::RunData(RequestState &Req)
+{
+ Req.State = RequestState::Data;
+
+ // Chunked transfer encoding is fun..
+ if (Req.Encoding == RequestState::Chunked)
+ {
+ while (1)
+ {
+ // Grab the block size
+ bool Last = true;
+ string Data;
+ In.Limit(-1);
+ do
+ {
+ if (In.WriteTillEl(Data,true) == true)
+ break;
+ }
+ while ((Last = Go(false, Req)) == true);
+
+ if (Last == false)
+ return false;
+
+ // See if we are done
+ unsigned long long Len = strtoull(Data.c_str(),0,16);
+ if (Len == 0)
+ {
+ In.Limit(-1);
+
+ // We have to remove the entity trailer
+ Last = true;
+ do
+ {
+ if (In.WriteTillEl(Data,true) == true && Data.length() <= 2)
+ break;
+ }
+ while ((Last = Go(false, Req)) == true);
+ if (Last == false)
+ return false;
+ return !_error->PendingError();
+ }
+
+ // Transfer the block
+ In.Limit(Len);
+ while (Go(true, Req) == true)
+ if (In.IsLimit() == true)
+ break;
+
+ // Error
+ if (In.IsLimit() == false)
+ return false;
+
+ // The server sends an extra new line before the next block specifier..
+ In.Limit(-1);
+ Last = true;
+ do
+ {
+ if (In.WriteTillEl(Data,true) == true)
+ break;
+ }
+ while ((Last = Go(false, Req)) == true);
+ if (Last == false)
+ return false;
+ }
+ }
+ else
+ {
+ /* Closes encoding is used when the server did not specify a size, the
+ loss of the connection means we are done */
+ if (Req.JunkSize != 0)
+ In.Limit(Req.JunkSize);
+ else if (Req.DownloadSize != 0)
+ In.Limit(Req.DownloadSize);
+ else if (Persistent == false)
+ In.Limit(-1);
+
+ // Just transfer the whole block.
+ do
+ {
+ if (In.IsLimit() == false)
+ continue;
+
+ In.Limit(-1);
+ return !_error->PendingError();
+ }
+ while (Go(true, Req) == true);
+ }
+
+ return Flush(&Req.File) && !_error->PendingError();
+}
+ /*}}}*/
+bool HttpServerState::RunDataToDevNull(RequestState &Req) /*{{{*/
+{
+ // no need to clean up if we discard the connection anyhow
+ if (Persistent == false)
+ return true;
+ Req.File.Open("/dev/null", FileFd::WriteOnly);
+ return RunData(Req);
+}
+ /*}}}*/
+bool HttpServerState::ReadHeaderLines(std::string &Data) /*{{{*/
+{
+ return In.WriteTillEl(Data);
+}
+ /*}}}*/
+bool HttpServerState::LoadNextResponse(bool const ToFile, RequestState &Req)/*{{{*/
+{
+ return Go(ToFile, Req);
+}
+ /*}}}*/
+bool HttpServerState::WriteResponse(const std::string &Data) /*{{{*/
+{
+ return Out.Read(Data);
+}
+ /*}}}*/
+APT_PURE bool HttpServerState::IsOpen() /*{{{*/
+{
+ return (ServerFd != -1);
+}
+ /*}}}*/
+bool HttpServerState::InitHashes(HashStringList const &ExpectedHashes) /*{{{*/
+{
+ delete In.Hash;
+ In.Hash = new Hashes(ExpectedHashes);
+ return true;
+}
+ /*}}}*/
+void HttpServerState::Reset() /*{{{*/
+{
+ ServerState::Reset();
+ ServerFd = -1;
+}
+ /*}}}*/
+
+APT_PURE Hashes * HttpServerState::GetHashes() /*{{{*/
+{
+ return In.Hash;
+}
+ /*}}}*/
+// HttpServerState::Die - The server has closed the connection. /*{{{*/
+bool HttpServerState::Die(RequestState &Req)
+{
+ unsigned int LErrno = errno;
+
+ // Dump the buffer to the file
+ if (Req.State == RequestState::Data)
+ {
+ if (Req.File.IsOpen() == false)
+ return true;
+ // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
+ // can't be set
+ if (Req.File.Name() != "/dev/null")
+ SetNonBlock(Req.File.Fd(),false);
+ while (In.WriteSpace() == true)
+ {
+ if (In.Write(Req.File.Fd()) == false)
+ return _error->Errno("write",_("Error writing to the file"));
+
+ // Done
+ if (In.IsLimit() == true)
+ return true;
+ }
+ }
+
+ // See if this is because the server finished the data stream
+ if (In.IsLimit() == false && Req.State != RequestState::Header &&
+ Persistent == true)
+ {
+ Close();
+ if (LErrno == 0)
+ return _error->Error(_("Error reading from server. Remote end closed connection"));
+ errno = LErrno;
+ return _error->Errno("read",_("Error reading from server"));
+ }
+ else
+ {
+ In.Limit(-1);
+
+ // Nothing left in the buffer
+ if (In.WriteSpace() == false)
+ return false;
+
+ // We may have got multiple responses back in one packet..
+ Close();
+ return true;
+ }
+
+ return false;
+}
+ /*}}}*/
+// HttpServerState::Flush - Dump the buffer into the file /*{{{*/
+// ---------------------------------------------------------------------
+/* This takes the current input buffer from the Server FD and writes it
+ into the file */
+bool HttpServerState::Flush(FileFd * const File)
+{
+ if (File != nullptr)
+ {
+ // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
+ // can't be set
+ if (File->Name() != "/dev/null")
+ SetNonBlock(File->Fd(),false);
+ if (In.WriteSpace() == false)
+ return true;
+
+ while (In.WriteSpace() == true)
+ {
+ if (In.Write(File->Fd()) == false)
+ return _error->Errno("write",_("Error writing to file"));
+ if (In.IsLimit() == true)
+ return true;
+ }
+
+ if (In.IsLimit() == true || Persistent == false)
+ return true;
+ }
+ return false;
+}
+ /*}}}*/
+// HttpServerState::Go - Run a single loop /*{{{*/
+// ---------------------------------------------------------------------
+/* This runs the select loop over the server FDs, Output file FDs and
+ stdin. */
+bool HttpServerState::Go(bool ToFile, RequestState &Req)
+{
+ // Server has closed the connection
+ if (ServerFd == -1 && (In.WriteSpace() == false ||
+ ToFile == false))
+ return false;
+
+ fd_set rfds,wfds;
+ FD_ZERO(&rfds);
+ FD_ZERO(&wfds);
+
+ /* Add the server. We only send more requests if the connection will
+ be persisting */
+ if (Out.WriteSpace() == true && ServerFd != -1
+ && Persistent == true)
+ FD_SET(ServerFd,&wfds);
+ if (In.ReadSpace() == true && ServerFd != -1)
+ FD_SET(ServerFd,&rfds);
+
+ // Add the file
+ int FileFD = -1;
+ if (Req.File.IsOpen())
+ FileFD = Req.File.Fd();
+
+ if (In.WriteSpace() == true && ToFile == true && FileFD != -1)
+ FD_SET(FileFD,&wfds);
+
+ // Add stdin
+ if (Owner->ConfigFindB("DependOnSTDIN", true) == true)
+ FD_SET(STDIN_FILENO,&rfds);
+
+ // Figure out the max fd
+ int MaxFd = FileFD;
+ if (MaxFd < ServerFd)
+ MaxFd = ServerFd;
+
+ // Select
+ struct timeval tv;
+ tv.tv_sec = TimeOut;
+ tv.tv_usec = 0;
+ int Res = 0;
+ if ((Res = select(MaxFd+1,&rfds,&wfds,0,&tv)) < 0)
+ {
+ if (errno == EINTR)
+ return true;
+ return _error->Errno("select",_("Select failed"));
+ }
+
+ if (Res == 0)
+ {
+ _error->Error(_("Connection timed out"));
+ return Die(Req);
+ }
+
+ // Handle server IO
+ if (ServerFd != -1 && FD_ISSET(ServerFd,&rfds))
+ {
+ errno = 0;
+ if (In.Read(ServerFd) == false)
+ return Die(Req);
+ }
+
+ if (ServerFd != -1 && FD_ISSET(ServerFd,&wfds))
+ {
+ errno = 0;
+ if (Out.Write(ServerFd) == false)
+ return Die(Req);
+ }
+
+ // Send data to the file
+ if (FileFD != -1 && FD_ISSET(FileFD,&wfds))
+ {
+ if (In.Write(FileFD) == false)
+ return _error->Errno("write",_("Error writing to output file"));
+ }
+
+ if (Req.MaximumSize > 0 && Req.File.IsOpen() && Req.File.Failed() == false && Req.File.Tell() > Req.MaximumSize)
+ {
+ Owner->SetFailReason("MaximumSizeExceeded");
+ return _error->Error("Writing more data than expected (%llu > %llu)",
+ Req.File.Tell(), Req.MaximumSize);
+ }
+
+ // Handle commands from APT
+ if (FD_ISSET(STDIN_FILENO,&rfds))
+ {
+ if (Owner->Run(true) != -1)
+ exit(100);
+ }
+
+ return true;
+}
+ /*}}}*/
+
+// HttpMethod::SendReq - Send the HTTP request /*{{{*/
+// ---------------------------------------------------------------------
+/* This places the http request in the outbound buffer */
+void HttpMethod::SendReq(FetchItem *Itm)
+{
+ URI Uri = Itm->Uri;
+ {
+ auto const plus = Binary.find('+');
+ if (plus != std::string::npos)
+ Uri.Access = Binary.substr(plus + 1);
+ }
+
+ // The HTTP server expects a hostname with a trailing :port
+ std::stringstream Req;
+ string ProperHost;
+
+ if (Uri.Host.find(':') != string::npos)
+ ProperHost = '[' + Uri.Host + ']';
+ else
+ ProperHost = Uri.Host;
+
+ /* RFC 2616 §5.1.2 requires absolute URIs for requests to proxies,
+ but while its a must for all servers to accept absolute URIs,
+ it is assumed clients will sent an absolute path for non-proxies */
+ std::string requesturi;
+ if (Server->Proxy.Access != "http" || Server->Proxy.empty() == true || Server->Proxy.Host.empty())
+ requesturi = Uri.Path;
+ else
+ requesturi = Uri;
+
+ // The "+" is encoded as a workaround for a amazon S3 bug
+ // see LP bugs #1003633 and #1086997.
+ requesturi = QuoteString(requesturi, "+~ ");
+
+ /* Build the request. No keep-alive is included as it is the default
+ in 1.1, can cause problems with proxies, and we are an HTTP/1.1
+ client anyway.
+ C.f. https://tools.ietf.org/wg/httpbis/trac/ticket/158 */
+ Req << "GET " << requesturi << " HTTP/1.1\r\n";
+ if (Uri.Port != 0)
+ Req << "Host: " << ProperHost << ":" << std::to_string(Uri.Port) << "\r\n";
+ else
+ Req << "Host: " << ProperHost << "\r\n";
+
+ // generate a cache control header (if needed)
+ if (ConfigFindB("No-Cache",false) == true)
+ Req << "Cache-Control: no-cache\r\n"
+ << "Pragma: no-cache\r\n";
+ else if (Itm->IndexFile == true)
+ Req << "Cache-Control: max-age=" << std::to_string(ConfigFindI("Max-Age", 0)) << "\r\n";
+ else if (ConfigFindB("No-Store", false) == true)
+ Req << "Cache-Control: no-store\r\n";
+
+ // If we ask for uncompressed files servers might respond with content-
+ // negotiation which lets us end up with compressed files we do not support,
+ // see 657029, 657560 and co, so if we have no extension on the request
+ // ask for text only. As a sidenote: If there is nothing to negotate servers
+ // seem to be nice and ignore it.
+ if (ConfigFindB("SendAccept", true) == true)
+ {
+ size_t const filepos = Itm->Uri.find_last_of('/');
+ string const file = Itm->Uri.substr(filepos + 1);
+ if (flExtension(file) == file)
+ Req << "Accept: text/*\r\n";
+ }
+
+ // Check for a partial file and send if-queries accordingly
+ struct stat SBuf;
+ if (Server->RangesAllowed && stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
+ Req << "Range: bytes=" << std::to_string(SBuf.st_size) << "-\r\n"
+ << "If-Range: " << TimeRFC1123(SBuf.st_mtime, false) << "\r\n";
+ else if (Itm->LastModified != 0)
+ Req << "If-Modified-Since: " << TimeRFC1123(Itm->LastModified, false).c_str() << "\r\n";
+
+ if (Server->Proxy.Access == "http" &&
+ (Server->Proxy.User.empty() == false || Server->Proxy.Password.empty() == false))
+ Req << "Proxy-Authorization: Basic "
+ << Base64Encode(Server->Proxy.User + ":" + Server->Proxy.Password) << "\r\n";
+
+ maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc"));
+ if (Uri.User.empty() == false || Uri.Password.empty() == false)
+ Req << "Authorization: Basic "
+ << Base64Encode(Uri.User + ":" + Uri.Password) << "\r\n";
+
+ Req << "User-Agent: " << ConfigFind("User-Agent",
+ "Debian APT-HTTP/1.3 (" PACKAGE_VERSION ")") << "\r\n";
+
+ Req << "\r\n";
+
+ if (Debug == true)
+ cerr << Req.str() << endl;
+
+ Server->WriteResponse(Req.str());
+}
+ /*}}}*/
+std::unique_ptr<ServerState> HttpMethod::CreateServerState(URI const &uri)/*{{{*/
+{
+ return std::unique_ptr<ServerState>(new HttpServerState(uri, this));
+}
+ /*}}}*/
+void HttpMethod::RotateDNS() /*{{{*/
+{
+ ::RotateDNS();
+}
+ /*}}}*/
+BaseHttpMethod::DealWithHeadersResult HttpMethod::DealWithHeaders(FetchResult &Res, RequestState &Req)/*{{{*/
+{
+ auto ret = BaseHttpMethod::DealWithHeaders(Res, Req);
+ if (ret != BaseHttpMethod::FILE_IS_OPEN)
+ return ret;
+ if (Req.File.Open(Queue->DestFile, FileFd::WriteAny) == false)
+ return ERROR_NOT_FROM_SERVER;
+
+ FailFile = Queue->DestFile;
+ FailFile.c_str(); // Make sure we don't do a malloc in the signal handler
+ FailFd = Req.File.Fd();
+ FailTime = Req.Date;
+
+ if (Server->InitHashes(Queue->ExpectedHashes) == false || Req.AddPartialFileToHashes(Req.File) == false)
+ {
+ _error->Errno("read",_("Problem hashing file"));
+ return ERROR_NOT_FROM_SERVER;
+ }
+ if (Req.StartPos > 0)
+ Res.ResumePoint = Req.StartPos;
+
+ SetNonBlock(Req.File.Fd(),true);
+ return FILE_IS_OPEN;
+}
+ /*}}}*/
+HttpMethod::HttpMethod(std::string &&pProg) : BaseHttpMethod(pProg.c_str(), "1.2", Pipeline | SendConfig)/*{{{*/
+{
+ auto addName = std::inserter(methodNames, methodNames.begin());
+ if (Binary != "http")
+ addName = "http";
+ auto const plus = Binary.find('+');
+ if (plus != std::string::npos)
+ addName = Binary.substr(0, plus);
+}
+ /*}}}*/
diff --git a/methods/http.h b/methods/http.h
new file mode 100644
index 000000000..c79a6454e
--- /dev/null
+++ b/methods/http.h
@@ -0,0 +1,142 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $
+// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $
+/* ######################################################################
+
+ HTTP Acquire Method - This is the HTTP acquire method for APT.
+
+ ##################################################################### */
+ /*}}}*/
+
+#ifndef APT_HTTP_H
+#define APT_HTTP_H
+
+#include <apt-pkg/strutl.h>
+
+#include <string>
+#include <sys/time.h>
+#include <iostream>
+
+#include "basehttp.h"
+
+using std::cout;
+using std::endl;
+
+class FileFd;
+class HttpMethod;
+class Hashes;
+
+class CircleBuf
+{
+ unsigned char *Buf;
+ unsigned long long Size;
+ unsigned long long InP;
+ unsigned long long OutP;
+ std::string OutQueue;
+ unsigned long long StrPos;
+ unsigned long long MaxGet;
+ struct timeval Start;
+
+ static unsigned long long BwReadLimit;
+ static unsigned long long BwTickReadData;
+ static struct timeval BwReadTick;
+ static const unsigned int BW_HZ;
+
+ unsigned long long LeftRead() const
+ {
+ unsigned long long Sz = Size - (InP - OutP);
+ if (Sz > Size - (InP%Size))
+ Sz = Size - (InP%Size);
+ return Sz;
+ }
+ unsigned long long LeftWrite() const
+ {
+ unsigned long long Sz = InP - OutP;
+ if (InP > MaxGet)
+ Sz = MaxGet - OutP;
+ if (Sz > Size - (OutP%Size))
+ Sz = Size - (OutP%Size);
+ return Sz;
+ }
+ void FillOut();
+
+ public:
+ Hashes *Hash;
+ // total amount of data that got written so far
+ unsigned long long TotalWriten;
+
+ // Read data in
+ bool Read(int Fd);
+ bool Read(std::string const &Data);
+
+ // Write data out
+ bool Write(int Fd);
+ bool WriteTillEl(std::string &Data,bool Single = false);
+
+ // Control the write limit
+ void Limit(long long Max) {if (Max == -1) MaxGet = 0-1; else MaxGet = OutP + Max;}
+ bool IsLimit() const {return MaxGet == OutP;};
+ void Print() const {cout << MaxGet << ',' << OutP << endl;};
+
+ // Test for free space in the buffer
+ bool ReadSpace() const {return Size - (InP - OutP) > 0;};
+ bool WriteSpace() const {return InP - OutP > 0;};
+
+ void Reset();
+ // Dump everything
+ void Stats();
+
+ CircleBuf(HttpMethod const * const Owner, unsigned long long Size);
+ ~CircleBuf();
+};
+
+struct HttpServerState: public ServerState
+{
+ // This is the connection itself. Output is data FROM the server
+ CircleBuf In;
+ CircleBuf Out;
+ int ServerFd;
+
+ protected:
+ virtual bool ReadHeaderLines(std::string &Data) APT_OVERRIDE;
+ virtual bool LoadNextResponse(bool const ToFile, RequestState &Req) APT_OVERRIDE;
+ virtual bool WriteResponse(std::string const &Data) APT_OVERRIDE;
+
+ public:
+ virtual void Reset() APT_OVERRIDE;
+
+ virtual bool RunData(RequestState &Req) APT_OVERRIDE;
+ virtual bool RunDataToDevNull(RequestState &Req) APT_OVERRIDE;
+
+ virtual bool Open() APT_OVERRIDE;
+ virtual bool IsOpen() APT_OVERRIDE;
+ virtual bool Close() APT_OVERRIDE;
+ virtual bool InitHashes(HashStringList const &ExpectedHashes) APT_OVERRIDE;
+ virtual Hashes * GetHashes() APT_OVERRIDE;
+ virtual bool Die(RequestState &Req) APT_OVERRIDE;
+ virtual bool Flush(FileFd * const File) APT_OVERRIDE;
+ virtual bool Go(bool ToFile, RequestState &Req) APT_OVERRIDE;
+
+ HttpServerState(URI Srv, HttpMethod *Owner);
+ virtual ~HttpServerState() {Close();};
+};
+
+class HttpMethod : public BaseHttpMethod
+{
+ public:
+ virtual void SendReq(FetchItem *Itm) APT_OVERRIDE;
+
+ virtual std::unique_ptr<ServerState> CreateServerState(URI const &uri) APT_OVERRIDE;
+ virtual void RotateDNS() APT_OVERRIDE;
+ virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res, RequestState &Req) APT_OVERRIDE;
+
+ protected:
+ std::string AutoDetectProxyCmd;
+
+ public:
+ friend struct HttpServerState;
+
+ explicit HttpMethod(std::string &&pProg);
+};
+
+#endif
diff --git a/methods/http_main.cc b/methods/http_main.cc
new file mode 100644
index 000000000..1e56044b7
--- /dev/null
+++ b/methods/http_main.cc
@@ -0,0 +1,17 @@
+#include <config.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/error.h>
+#include <signal.h>
+
+#include "http.h"
+
+int main(int, const char *argv[])
+{
+ // ignore SIGPIPE, this can happen on write() if the socket
+ // closes the connection (this is dealt with via ServerDie())
+ signal(SIGPIPE, SIG_IGN);
+ std::string Binary = flNotDir(argv[0]);
+ if (Binary.find('+') == std::string::npos && Binary != "http")
+ Binary.append("+http");
+ return HttpMethod(std::move(Binary)).Loop();
+}
diff --git a/methods/https.cc b/methods/https.cc
new file mode 100644
index 000000000..d71ef0bf0
--- /dev/null
+++ b/methods/https.cc
@@ -0,0 +1,548 @@
+//-*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: http.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $
+/* ######################################################################
+
+ HTTPS Acquire Method - This is the HTTPS acquire method for APT.
+
+ It uses libcurl
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/hashes.h>
+#include <apt-pkg/netrc.h>
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/macros.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/proxy.h>
+
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <stdlib.h>
+
+#include <array>
+#include <iostream>
+#include <sstream>
+
+
+#include "https.h"
+
+#include <apti18n.h>
+ /*}}}*/
+using namespace std;
+
+struct APT_HIDDEN CURLUserPointer {
+ HttpsMethod * const https;
+ HttpsMethod::FetchResult * const Res;
+ HttpsMethod::FetchItem const * const Itm;
+ RequestState * const Req;
+ CURLUserPointer(HttpsMethod * const https, HttpsMethod::FetchResult * const Res,
+ HttpsMethod::FetchItem const * const Itm, RequestState * const Req) : https(https), Res(Res), Itm(Itm), Req(Req) {}
+};
+
+size_t
+HttpsMethod::parse_header(void *buffer, size_t size, size_t nmemb, void *userp)
+{
+ size_t len = size * nmemb;
+ CURLUserPointer *me = static_cast<CURLUserPointer *>(userp);
+ std::string line((char*) buffer, len);
+ for (--len; len > 0; --len)
+ if (isspace_ascii(line[len]) == 0)
+ {
+ ++len;
+ break;
+ }
+ line.erase(len);
+
+ if (line.empty() == true)
+ {
+ if (me->Req->File.Open(me->Itm->DestFile, FileFd::WriteAny) == false)
+ return ERROR_NOT_FROM_SERVER;
+
+ me->Req->JunkSize = 0;
+ if (me->Req->Result != 416 && me->Req->StartPos != 0)
+ ;
+ else if (me->Req->Result == 416)
+ {
+ bool partialHit = false;
+ if (me->Itm->ExpectedHashes.usable() == true)
+ {
+ Hashes resultHashes(me->Itm->ExpectedHashes);
+ FileFd file(me->Itm->DestFile, FileFd::ReadOnly);
+ me->Req->TotalFileSize = file.FileSize();
+ me->Req->Date = file.ModificationTime();
+ resultHashes.AddFD(file);
+ HashStringList const hashList = resultHashes.GetHashStringList();
+ partialHit = (me->Itm->ExpectedHashes == hashList);
+ }
+ else if (me->Req->Result == 416 && me->Req->TotalFileSize == me->Req->File.FileSize())
+ partialHit = true;
+
+ if (partialHit == true)
+ {
+ me->Req->Result = 200;
+ me->Req->StartPos = me->Req->TotalFileSize;
+ // the actual size is not important for https as curl will deal with it
+ // by itself and e.g. doesn't bother us with transport-encoding…
+ me->Req->JunkSize = std::numeric_limits<unsigned long long>::max();
+ }
+ else
+ me->Req->StartPos = 0;
+ }
+ else
+ me->Req->StartPos = 0;
+
+ me->Res->LastModified = me->Req->Date;
+ me->Res->Size = me->Req->TotalFileSize;
+ me->Res->ResumePoint = me->Req->StartPos;
+
+ // we expect valid data, so tell our caller we get the file now
+ if (me->Req->Result >= 200 && me->Req->Result < 300)
+ {
+ if (me->Res->Size != 0 && me->Res->Size > me->Res->ResumePoint)
+ me->https->URIStart(*me->Res);
+ if (me->Req->AddPartialFileToHashes(me->Req->File) == false)
+ return 0;
+ }
+ else
+ me->Req->JunkSize = std::numeric_limits<decltype(me->Req->JunkSize)>::max();
+ }
+ else if (me->Req->HeaderLine(line) == false)
+ return 0;
+
+ return size*nmemb;
+}
+
+size_t
+HttpsMethod::write_data(void *buffer, size_t size, size_t nmemb, void *userp)
+{
+ CURLUserPointer *me = static_cast<CURLUserPointer *>(userp);
+ size_t buffer_size = size * nmemb;
+ // we don't need to count the junk here, just drop anything we get as
+ // we don't always know how long it would be, e.g. in chunked encoding.
+ if (me->Req->JunkSize != 0)
+ return buffer_size;
+
+ if(me->Req->File.Write(buffer, buffer_size) != true)
+ return 0;
+
+ if(me->https->Queue->MaximumSize > 0)
+ {
+ unsigned long long const TotalWritten = me->Req->File.Tell();
+ if (TotalWritten > me->https->Queue->MaximumSize)
+ {
+ me->https->SetFailReason("MaximumSizeExceeded");
+ _error->Error("Writing more data than expected (%llu > %llu)",
+ TotalWritten, me->https->Queue->MaximumSize);
+ return 0;
+ }
+ }
+
+ if (me->https->Server->GetHashes()->Add((unsigned char const * const)buffer, buffer_size) == false)
+ return 0;
+
+ return buffer_size;
+}
+
+// HttpsServerState::HttpsServerState - Constructor /*{{{*/
+HttpsServerState::HttpsServerState(URI Srv,HttpsMethod * Owner) : ServerState(Srv, Owner), Hash(NULL)
+{
+ TimeOut = Owner->ConfigFindI("Timeout", TimeOut);
+ Reset();
+}
+ /*}}}*/
+bool HttpsServerState::InitHashes(HashStringList const &ExpectedHashes) /*{{{*/
+{
+ delete Hash;
+ Hash = new Hashes(ExpectedHashes);
+ return true;
+}
+ /*}}}*/
+APT_PURE Hashes * HttpsServerState::GetHashes() /*{{{*/
+{
+ return Hash;
+}
+ /*}}}*/
+
+bool HttpsMethod::SetupProxy() /*{{{*/
+{
+ URI ServerName = Queue->Uri;
+
+ // Determine the proxy setting
+ AutoDetectProxy(ServerName);
+
+ // Curl should never read proxy settings from the environment, as
+ // we determine which proxy to use. Do this for consistency among
+ // methods and prevent an environment variable overriding a
+ // no-proxy ("DIRECT") setting in apt.conf.
+ curl_easy_setopt(curl, CURLOPT_PROXY, "");
+
+ // Determine the proxy setting - try https first, fallback to http and use env at last
+ string UseProxy = ConfigFind("Proxy::" + ServerName.Host, "");
+ if (UseProxy.empty() == true)
+ UseProxy = ConfigFind("Proxy", "");
+ // User wants to use NO proxy, so nothing to setup
+ if (UseProxy == "DIRECT")
+ return true;
+
+ // Parse no_proxy, a comma (,) separated list of domains we don't want to use
+ // a proxy for so we stop right here if it is in the list
+ if (getenv("no_proxy") != 0 && CheckDomainList(ServerName.Host,getenv("no_proxy")) == true)
+ return true;
+
+ if (UseProxy.empty() == true)
+ {
+ const char* result = nullptr;
+ if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end())
+ result = getenv("https_proxy");
+ // FIXME: Fall back to http_proxy is to remain compatible with
+ // existing setups and behaviour of apt.conf. This should be
+ // deprecated in the future (including apt.conf). Most other
+ // programs do not fall back to http proxy settings and neither
+ // should Apt.
+ if (result == nullptr && std::find(methodNames.begin(), methodNames.end(), "http") != methodNames.end())
+ result = getenv("http_proxy");
+ UseProxy = result == nullptr ? "" : result;
+ }
+
+ // Determine what host and port to use based on the proxy settings
+ if (UseProxy.empty() == false)
+ {
+ Proxy = UseProxy;
+ AddProxyAuth(Proxy, ServerName);
+
+ if (Proxy.Access == "socks5h")
+ curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5_HOSTNAME);
+ else if (Proxy.Access == "socks5")
+ curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5);
+ else if (Proxy.Access == "socks4a")
+ curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4A);
+ else if (Proxy.Access == "socks")
+ curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4);
+ else if (Proxy.Access == "http" || Proxy.Access == "https")
+ curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_HTTP);
+ else
+ return false;
+
+ if (Proxy.Port != 1)
+ curl_easy_setopt(curl, CURLOPT_PROXYPORT, Proxy.Port);
+ curl_easy_setopt(curl, CURLOPT_PROXY, Proxy.Host.c_str());
+ if (Proxy.User.empty() == false || Proxy.Password.empty() == false)
+ {
+ curl_easy_setopt(curl, CURLOPT_PROXYUSERNAME, Proxy.User.c_str());
+ curl_easy_setopt(curl, CURLOPT_PROXYPASSWORD, Proxy.Password.c_str());
+ }
+ }
+ return true;
+} /*}}}*/
+// HttpsMethod::Fetch - Fetch an item /*{{{*/
+// ---------------------------------------------------------------------
+/* This adds an item to the pipeline. We keep the pipeline at a fixed
+ depth. */
+bool HttpsMethod::Fetch(FetchItem *Itm)
+{
+ struct stat SBuf;
+ struct curl_slist *headers=NULL;
+ char curl_errorstr[CURL_ERROR_SIZE];
+ URI Uri = Itm->Uri;
+ setPostfixForMethodNames(Uri.Host.c_str());
+ AllowRedirect = ConfigFindB("AllowRedirect", true);
+ Debug = DebugEnabled();
+
+ // TODO:
+ // - http::Pipeline-Depth
+ // - error checking/reporting
+ // - more debug options? (CURLOPT_DEBUGFUNCTION?)
+ {
+ auto const plus = Binary.find('+');
+ if (plus != std::string::npos)
+ Uri.Access = Binary.substr(plus + 1);
+ }
+
+ curl_easy_reset(curl);
+ if (SetupProxy() == false)
+ return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str());
+
+ maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc"));
+ if (Server == nullptr || Server->Comp(Itm->Uri) == false)
+ Server = CreateServerState(Itm->Uri);
+
+ // The "+" is encoded as a workaround for a amazon S3 bug
+ // see LP bugs #1003633 and #1086997. (taken from http method)
+ Uri.Path = QuoteString(Uri.Path, "+~ ");
+
+ FetchResult Res;
+ RequestState Req(this, Server.get());
+ CURLUserPointer userp(this, &Res, Itm, &Req);
+ // callbacks
+ curl_easy_setopt(curl, CURLOPT_URL, static_cast<string>(Uri).c_str());
+ curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, parse_header);
+ curl_easy_setopt(curl, CURLOPT_WRITEHEADER, &userp);
+ curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
+ curl_easy_setopt(curl, CURLOPT_WRITEDATA, &userp);
+ // options
+ curl_easy_setopt(curl, CURLOPT_NOPROGRESS, true);
+ curl_easy_setopt(curl, CURLOPT_FILETIME, true);
+ curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 0);
+
+ if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end())
+ {
+ curl_easy_setopt(curl, CURLOPT_PROTOCOLS, CURLPROTO_HTTPS);
+ curl_easy_setopt(curl, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTPS);
+
+ // File containing the list of trusted CA.
+ std::string const cainfo = ConfigFind("CaInfo", "");
+ if(cainfo.empty() == false)
+ curl_easy_setopt(curl, CURLOPT_CAINFO, cainfo.c_str());
+ // Check server certificate against previous CA list ...
+ curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, ConfigFindB("Verify-Peer", true) ? 1 : 0);
+ // ... and hostname against cert CN or subjectAltName
+ curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, ConfigFindB("Verify-Host", true) ? 2 : 0);
+ // Also enforce issuer of server certificate using its cert
+ std::string const issuercert = ConfigFind("IssuerCert", "");
+ if(issuercert.empty() == false)
+ curl_easy_setopt(curl, CURLOPT_ISSUERCERT, issuercert.c_str());
+ // For client authentication, certificate file ...
+ std::string const pem = ConfigFind("SslCert", "");
+ if(pem.empty() == false)
+ curl_easy_setopt(curl, CURLOPT_SSLCERT, pem.c_str());
+ // ... and associated key.
+ std::string const key = ConfigFind("SslKey", "");
+ if(key.empty() == false)
+ curl_easy_setopt(curl, CURLOPT_SSLKEY, key.c_str());
+ // Allow forcing SSL version to SSLv3 or TLSv1
+ long final_version = CURL_SSLVERSION_DEFAULT;
+ std::string const sslversion = ConfigFind("SslForceVersion", "");
+ if(sslversion == "TLSv1")
+ final_version = CURL_SSLVERSION_TLSv1;
+ else if(sslversion == "TLSv1.0")
+ final_version = CURL_SSLVERSION_TLSv1_0;
+ else if(sslversion == "TLSv1.1")
+ final_version = CURL_SSLVERSION_TLSv1_1;
+ else if(sslversion == "TLSv1.2")
+ final_version = CURL_SSLVERSION_TLSv1_2;
+ else if(sslversion == "SSLv3")
+ final_version = CURL_SSLVERSION_SSLv3;
+ curl_easy_setopt(curl, CURLOPT_SSLVERSION, final_version);
+ // CRL file
+ std::string const crlfile = ConfigFind("CrlFile", "");
+ if(crlfile.empty() == false)
+ curl_easy_setopt(curl, CURLOPT_CRLFILE, crlfile.c_str());
+ }
+ else
+ {
+ curl_easy_setopt(curl, CURLOPT_PROTOCOLS, CURLPROTO_HTTP);
+ curl_easy_setopt(curl, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTP);
+ }
+ // cache-control
+ if(ConfigFindB("No-Cache", false) == false)
+ {
+ // cache enabled
+ if (ConfigFindB("No-Store", false) == true)
+ headers = curl_slist_append(headers,"Cache-Control: no-store");
+ std::string ss;
+ strprintf(ss, "Cache-Control: max-age=%u", ConfigFindI("Max-Age", 0));
+ headers = curl_slist_append(headers, ss.c_str());
+ } else {
+ // cache disabled by user
+ headers = curl_slist_append(headers, "Cache-Control: no-cache");
+ headers = curl_slist_append(headers, "Pragma: no-cache");
+ }
+ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
+ // speed limit
+ int const dlLimit = ConfigFindI("Dl-Limit", 0) * 1024;
+ if (dlLimit > 0)
+ curl_easy_setopt(curl, CURLOPT_MAX_RECV_SPEED_LARGE, dlLimit);
+
+ // set header
+ curl_easy_setopt(curl, CURLOPT_USERAGENT, ConfigFind("User-Agent", "Debian APT-CURL/1.0 (" PACKAGE_VERSION ")").c_str());
+
+ // set timeout
+ int const timeout = ConfigFindI("Timeout", 120);
+ curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, timeout);
+ //set really low lowspeed timeout (see #497983)
+ curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, DL_MIN_SPEED);
+ curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, timeout);
+
+ if(_config->FindB("Acquire::ForceIPv4", false) == true)
+ curl_easy_setopt(curl, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4);
+ else if(_config->FindB("Acquire::ForceIPv6", false) == true)
+ curl_easy_setopt(curl, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V6);
+
+ // debug
+ if (Debug == true)
+ curl_easy_setopt(curl, CURLOPT_VERBOSE, true);
+
+ // error handling
+ curl_errorstr[0] = '\0';
+ curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_errorstr);
+
+ // If we ask for uncompressed files servers might respond with content-
+ // negotiation which lets us end up with compressed files we do not support,
+ // see 657029, 657560 and co, so if we have no extension on the request
+ // ask for text only. As a sidenote: If there is nothing to negotate servers
+ // seem to be nice and ignore it.
+ if (ConfigFindB("SendAccept", true))
+ {
+ size_t const filepos = Itm->Uri.find_last_of('/');
+ string const file = Itm->Uri.substr(filepos + 1);
+ if (flExtension(file) == file)
+ headers = curl_slist_append(headers, "Accept: text/*");
+ }
+
+ // if we have the file send an if-range query with a range header
+ if (Server->RangesAllowed && stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
+ {
+ std::string Buf;
+ strprintf(Buf, "Range: bytes=%lli-", (long long) SBuf.st_size);
+ headers = curl_slist_append(headers, Buf.c_str());
+ strprintf(Buf, "If-Range: %s", TimeRFC1123(SBuf.st_mtime, false).c_str());
+ headers = curl_slist_append(headers, Buf.c_str());
+ }
+ else if(Itm->LastModified > 0)
+ {
+ curl_easy_setopt(curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_IFMODSINCE);
+ curl_easy_setopt(curl, CURLOPT_TIMEVALUE, Itm->LastModified);
+ }
+
+ if (Server->InitHashes(Itm->ExpectedHashes) == false)
+ return false;
+
+ // keep apt updated
+ Res.Filename = Itm->DestFile;
+
+ // get it!
+ CURLcode success = curl_easy_perform(curl);
+
+ // If the server returns 200 OK but the If-Modified-Since condition is not
+ // met, CURLINFO_CONDITION_UNMET will be set to 1
+ long curl_condition_unmet = 0;
+ curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &curl_condition_unmet);
+ if (curl_condition_unmet == 1)
+ Req.Result = 304;
+
+ Req.File.Close();
+ curl_slist_free_all(headers);
+
+ // cleanup
+ if (success != CURLE_OK)
+ {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wswitch"
+ switch (success)
+ {
+ case CURLE_COULDNT_RESOLVE_PROXY:
+ case CURLE_COULDNT_RESOLVE_HOST:
+ SetFailReason("ResolveFailure");
+ break;
+ case CURLE_COULDNT_CONNECT:
+ SetFailReason("ConnectionRefused");
+ break;
+ case CURLE_OPERATION_TIMEDOUT:
+ SetFailReason("Timeout");
+ break;
+ }
+#pragma GCC diagnostic pop
+ // only take curls technical errors if we haven't our own
+ // (e.g. for the maximum size limit we have and curls can be confusing)
+ if (_error->PendingError() == false)
+ _error->Error("%s", curl_errorstr);
+ else
+ _error->Warning("curl: %s", curl_errorstr);
+ return false;
+ }
+
+ switch (DealWithHeaders(Res, Req))
+ {
+ case BaseHttpMethod::IMS_HIT:
+ URIDone(Res);
+ break;
+
+ case BaseHttpMethod::ERROR_WITH_CONTENT_PAGE:
+ // unlink, no need keep 401/404 page content in partial/
+ RemoveFile(Binary.c_str(), Req.File.Name());
+ case BaseHttpMethod::ERROR_UNRECOVERABLE:
+ case BaseHttpMethod::ERROR_NOT_FROM_SERVER:
+ return false;
+
+ case BaseHttpMethod::TRY_AGAIN_OR_REDIRECT:
+ Redirect(NextURI);
+ break;
+
+ case BaseHttpMethod::FILE_IS_OPEN:
+ struct stat resultStat;
+ if (unlikely(stat(Req.File.Name().c_str(), &resultStat) != 0))
+ {
+ _error->Errno("stat", "Unable to access file %s", Req.File.Name().c_str());
+ return false;
+ }
+ Res.Size = resultStat.st_size;
+
+ // Timestamp
+ curl_easy_getinfo(curl, CURLINFO_FILETIME, &Res.LastModified);
+ if (Res.LastModified != -1)
+ {
+ struct timeval times[2];
+ times[0].tv_sec = Res.LastModified;
+ times[1].tv_sec = Res.LastModified;
+ times[0].tv_usec = times[1].tv_usec = 0;
+ utimes(Req.File.Name().c_str(), times);
+ }
+ else
+ Res.LastModified = resultStat.st_mtime;
+
+ // take hashes
+ Res.TakeHashes(*(Server->GetHashes()));
+
+ // keep apt updated
+ URIDone(Res);
+ break;
+ }
+ return true;
+}
+ /*}}}*/
+std::unique_ptr<ServerState> HttpsMethod::CreateServerState(URI const &uri)/*{{{*/
+{
+ return std::unique_ptr<ServerState>(new HttpsServerState(uri, this));
+}
+ /*}}}*/
+HttpsMethod::HttpsMethod(std::string &&pProg) : BaseHttpMethod(std::move(pProg),"1.2",Pipeline | SendConfig)/*{{{*/
+{
+ auto addName = std::inserter(methodNames, methodNames.begin());
+ addName = "http";
+ auto const plus = Binary.find('+');
+ if (plus != std::string::npos)
+ {
+ addName = Binary.substr(plus + 1);
+ auto base = Binary.substr(0, plus);
+ if (base != "https")
+ addName = base;
+ }
+ if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end())
+ curl_global_init(CURL_GLOBAL_SSL);
+ else
+ curl_global_init(CURL_GLOBAL_NOTHING);
+ curl = curl_easy_init();
+}
+ /*}}}*/
+HttpsMethod::~HttpsMethod() /*{{{*/
+{
+ curl_easy_cleanup(curl);
+}
+ /*}}}*/
+int main(int, const char *argv[]) /*{{{*/
+{
+ std::string Binary = flNotDir(argv[0]);
+ if (Binary.find('+') == std::string::npos && Binary != "https")
+ Binary.append("+https");
+ return HttpsMethod(std::move(Binary)).Run();
+}
+ /*}}}*/
diff --git a/methods/https.h b/methods/https.h
new file mode 100644
index 000000000..fbbf34501
--- /dev/null
+++ b/methods/https.h
@@ -0,0 +1,88 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $
+// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $
+/* ######################################################################
+
+ HTTP Acquire Method - This is the HTTP acquire method for APT.
+
+ ##################################################################### */
+ /*}}}*/
+
+#ifndef APT_HTTPS_H
+#define APT_HTTPS_H
+
+#include <curl/curl.h>
+#include <iostream>
+#include <stddef.h>
+#include <string>
+#include <memory>
+
+#include "basehttp.h"
+
+using std::cout;
+using std::endl;
+
+class Hashes;
+class HttpsMethod;
+class FileFd;
+
+class HttpsServerState : public ServerState
+{
+ Hashes * Hash;
+
+ protected:
+ virtual bool ReadHeaderLines(std::string &/*Data*/) APT_OVERRIDE { return false; }
+ virtual bool LoadNextResponse(bool const /*ToFile*/, RequestState &/*Req*/) APT_OVERRIDE { return false; }
+
+ public:
+ virtual bool WriteResponse(std::string const &/*Data*/) APT_OVERRIDE { return false; }
+
+ /** \brief Transfer the data from the socket */
+ virtual bool RunData(RequestState &) APT_OVERRIDE { return false; }
+ virtual bool RunDataToDevNull(RequestState &) APT_OVERRIDE { return false; }
+
+ virtual bool Open() APT_OVERRIDE { return false; }
+ virtual bool IsOpen() APT_OVERRIDE { return false; }
+ virtual bool Close() APT_OVERRIDE { return false; }
+ virtual bool InitHashes(HashStringList const &ExpectedHashes) APT_OVERRIDE;
+ virtual Hashes * GetHashes() APT_OVERRIDE;
+ virtual bool Die(RequestState &/*Req*/) APT_OVERRIDE { return false; }
+ virtual bool Flush(FileFd * const /*File*/) APT_OVERRIDE { return false; }
+ virtual bool Go(bool /*ToFile*/, RequestState &/*Req*/) APT_OVERRIDE { return false; }
+
+ HttpsServerState(URI Srv, HttpsMethod *Owner);
+ virtual ~HttpsServerState() {Close();};
+};
+
+class HttpsMethod : public BaseHttpMethod
+{
+ // minimum speed in bytes/se that triggers download timeout handling
+ static const int DL_MIN_SPEED = 10;
+
+ virtual bool Fetch(FetchItem *) APT_OVERRIDE;
+
+ static size_t parse_header(void *buffer, size_t size, size_t nmemb, void *userp);
+ static size_t write_data(void *buffer, size_t size, size_t nmemb, void *userp);
+ static int progress_callback(void *clientp, double dltotal, double dlnow,
+ double ultotal, double ulnow);
+ bool SetupProxy();
+ CURL *curl;
+
+ // Used by BaseHttpMethods unused by https
+ virtual void SendReq(FetchItem *) APT_OVERRIDE { exit(42); }
+ virtual void RotateDNS() APT_OVERRIDE { exit(42); }
+
+ public:
+
+ virtual std::unique_ptr<ServerState> CreateServerState(URI const &uri) APT_OVERRIDE;
+ using pkgAcqMethod::FetchResult;
+ using pkgAcqMethod::FetchItem;
+
+ explicit HttpsMethod(std::string &&pProg);
+ virtual ~HttpsMethod();
+};
+
+#include <apt-pkg/strutl.h>
+URI Proxy;
+
+#endif
diff --git a/methods/mirror.cc b/methods/mirror.cc
new file mode 100644
index 000000000..71faaf591
--- /dev/null
+++ b/methods/mirror.cc
@@ -0,0 +1,470 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: mirror.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $
+/* ######################################################################
+
+ Mirror Acquire Method - This is the Mirror acquire method for APT.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/aptconfiguration.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/acquire-item.h>
+#include <apt-pkg/acquire.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/sourcelist.h>
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/metaindex.h>
+#include <apt-pkg/strutl.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <algorithm>
+#include <iostream>
+#include <fstream>
+#include <sys/stat.h>
+#include <sys/utsname.h>
+#include <dirent.h>
+
+using namespace std;
+
+#include<sstream>
+
+#include "mirror.h"
+#include "http.h"
+#include <apti18n.h>
+ /*}}}*/
+
+/* Done:
+ * - works with http (only!)
+ * - always picks the first mirror from the list
+ * - call out to problem reporting script
+ * - supports "deb mirror://host/path/to/mirror-list/// dist component"
+ * - uses pkgAcqMethod::FailReason() to have a string representation
+ * of the failure that is also send to LP
+ *
+ * TODO:
+ * - deal with running as non-root because we can't write to the lists
+ dir then -> use the cached mirror file
+ * - better method to download than having a pkgAcquire interface here
+ * and better error handling there!
+ * - support more than http
+ * - testing :)
+ */
+
+MirrorMethod::MirrorMethod()
+ : HttpMethod("mirror"), DownloadedMirrorFile(false), Debug(false)
+{
+}
+
+// HttpMethod::Configuration - Handle a configuration message /*{{{*/
+// ---------------------------------------------------------------------
+/* We stash the desired pipeline depth */
+bool MirrorMethod::Configuration(string Message)
+{
+ if (pkgAcqMethod::Configuration(Message) == false)
+ return false;
+ Debug = DebugEnabled();
+
+ return true;
+}
+ /*}}}*/
+
+// clean the mirrors dir based on ttl information
+bool MirrorMethod::Clean(string Dir)
+{
+ vector<metaIndex *>::const_iterator I;
+
+ if(Debug)
+ clog << "MirrorMethod::Clean(): " << Dir << endl;
+
+ if(Dir == "/")
+ return _error->Error("will not clean: '/'");
+
+ // read sources.list
+ pkgSourceList list;
+ list.ReadMainList();
+
+ DIR *D = opendir(Dir.c_str());
+ if (D == 0)
+ return _error->Errno("opendir",_("Unable to read %s"),Dir.c_str());
+
+ string StartDir = SafeGetCWD();
+ if (chdir(Dir.c_str()) != 0)
+ {
+ closedir(D);
+ return _error->Errno("chdir",_("Unable to change to %s"),Dir.c_str());
+ }
+
+ for (struct dirent *Dir = readdir(D); Dir != 0; Dir = readdir(D))
+ {
+ // Skip some files..
+ if (strcmp(Dir->d_name,"lock") == 0 ||
+ strcmp(Dir->d_name,"partial") == 0 ||
+ strcmp(Dir->d_name,"lost+found") == 0 ||
+ strcmp(Dir->d_name,".") == 0 ||
+ strcmp(Dir->d_name,"..") == 0)
+ continue;
+
+ // see if we have that uri
+ for(I=list.begin(); I != list.end(); ++I)
+ {
+ string uri = (*I)->GetURI();
+ if(uri.compare(0, strlen("mirror://"), "mirror://") != 0)
+ continue;
+ string BaseUri = uri.substr(0,uri.size()-1);
+ if (URItoFileName(BaseUri) == Dir->d_name)
+ break;
+ }
+ // nothing found, nuke it
+ if (I == list.end())
+ RemoveFile("mirror", Dir->d_name);
+ }
+
+ closedir(D);
+ if (chdir(StartDir.c_str()) != 0)
+ return _error->Errno("chdir",_("Unable to change to %s"),StartDir.c_str());
+ return true;
+}
+
+
+bool MirrorMethod::DownloadMirrorFile(string /*mirror_uri_str*/)
+{
+ // not that great to use pkgAcquire here, but we do not have
+ // any other way right now
+ string fetch = BaseUri;
+ fetch.replace(0,strlen("mirror://"),"http://");
+
+#if 0 // no need for this, the getArchitectures() will also include the main
+ // arch
+ // append main architecture
+ fetch += "?arch=" + _config->Find("Apt::Architecture");
+#endif
+
+ // append all architectures
+ std::vector<std::string> vec = APT::Configuration::getArchitectures();
+ for (std::vector<std::string>::const_iterator I = vec.begin();
+ I != vec.end(); ++I)
+ if (I == vec.begin())
+ fetch += "?arch=" + (*I);
+ else
+ fetch += "&arch=" + (*I);
+
+ // append the dist as a query string
+ if (Dist != "")
+ fetch += "&dist=" + Dist;
+
+ if(Debug)
+ clog << "MirrorMethod::DownloadMirrorFile(): '" << fetch << "'"
+ << " to " << MirrorFile << endl;
+
+ pkgAcquire Fetcher;
+ new pkgAcqFile(&Fetcher, fetch, "", 0, "", "", "", MirrorFile);
+ bool res = (Fetcher.Run() == pkgAcquire::Continue);
+ if(res) {
+ DownloadedMirrorFile = true;
+ chmod(MirrorFile.c_str(), 0644);
+ }
+ Fetcher.Shutdown();
+
+ if(Debug)
+ clog << "MirrorMethod::DownloadMirrorFile() success: " << res << endl;
+
+ return res;
+}
+
+// Randomizes the lines in the mirror file, this is used so that
+// we spread the load on the mirrors evenly
+bool MirrorMethod::RandomizeMirrorFile(string mirror_file)
+{
+ vector<string> content;
+ string line;
+
+ if (!FileExists(mirror_file))
+ return false;
+
+ // read
+ ifstream in(mirror_file.c_str());
+ while ( !in.eof() ) {
+ getline(in, line);
+ content.push_back(line);
+ }
+
+ // we want the file to be random for each different machine, but also
+ // "stable" on the same machine. this is to avoid running into out-of-sync
+ // issues (i.e. Release/Release.gpg different on each mirror)
+ struct utsname buf;
+ int seed=1;
+ if(uname(&buf) == 0) {
+ for(int i=0,seed=1; buf.nodename[i] != 0; ++i) {
+ seed = seed * 31 + buf.nodename[i];
+ }
+ }
+ srand( seed );
+ random_shuffle(content.begin(), content.end());
+
+ // write
+ ofstream out(mirror_file.c_str());
+ while ( !content.empty()) {
+ line = content.back();
+ content.pop_back();
+ out << line << "\n";
+ }
+
+ return true;
+}
+
+/* convert a the Queue->Uri back to the mirror base uri and look
+ * at all mirrors we have for this, this is needed as queue->uri
+ * may point to different mirrors (if TryNextMirror() was run)
+ */
+void MirrorMethod::CurrentQueueUriToMirror()
+{
+ // already in mirror:// style so nothing to do
+ if(Queue->Uri.find("mirror://") == 0)
+ return;
+
+ // find current mirror and select next one
+ for (vector<string>::const_iterator mirror = AllMirrors.begin();
+ mirror != AllMirrors.end(); ++mirror)
+ {
+ if (Queue->Uri.find(*mirror) == 0)
+ {
+ Queue->Uri.replace(0, mirror->length(), BaseUri);
+ return;
+ }
+ }
+ _error->Error("Internal error: Failed to convert %s back to %s",
+ Queue->Uri.c_str(), BaseUri.c_str());
+}
+
+bool MirrorMethod::TryNextMirror()
+{
+ // find current mirror and select next one
+ for (vector<string>::const_iterator mirror = AllMirrors.begin();
+ mirror != AllMirrors.end(); ++mirror)
+ {
+ if (Queue->Uri.find(*mirror) != 0)
+ continue;
+
+ vector<string>::const_iterator nextmirror = mirror + 1;
+ if (nextmirror == AllMirrors.end())
+ break;
+ Queue->Uri.replace(0, mirror->length(), *nextmirror);
+ if (Debug)
+ clog << "TryNextMirror: " << Queue->Uri << endl;
+
+ // inform parent
+ UsedMirror = *nextmirror;
+ Log("Switching mirror");
+ return true;
+ }
+
+ if (Debug)
+ clog << "TryNextMirror could not find another mirror to try" << endl;
+
+ return false;
+}
+
+bool MirrorMethod::InitMirrors()
+{
+ // if we do not have a MirrorFile, fallback
+ if(!FileExists(MirrorFile))
+ {
+ // FIXME: fallback to a default mirror here instead
+ // and provide a config option to define that default
+ return _error->Error(_("No mirror file '%s' found "), MirrorFile.c_str());
+ }
+
+ if (access(MirrorFile.c_str(), R_OK) != 0)
+ {
+ // FIXME: fallback to a default mirror here instead
+ // and provide a config option to define that default
+ return _error->Error(_("Can not read mirror file '%s'"), MirrorFile.c_str());
+ }
+
+ // FIXME: make the mirror selection more clever, do not
+ // just use the first one!
+ // BUT: we can not make this random, the mirror has to be
+ // stable across session, because otherwise we can
+ // get into sync issues (got indexfiles from mirror A,
+ // but packages from mirror B - one might be out of date etc)
+ ifstream in(MirrorFile.c_str());
+ string s;
+ while (!in.eof())
+ {
+ getline(in, s);
+
+ // ignore lines that start with #
+ if (s.find("#") == 0)
+ continue;
+ // ignore empty lines
+ if (s.size() == 0)
+ continue;
+ // ignore non http lines
+ if (s.compare(0, strlen("http://"), "http://") != 0)
+ continue;
+
+ AllMirrors.push_back(s);
+ }
+ if (AllMirrors.empty()) {
+ return _error->Error(_("No entry found in mirror file '%s'"), MirrorFile.c_str());
+ }
+ Mirror = AllMirrors[0];
+ UsedMirror = Mirror;
+ return true;
+}
+
+string MirrorMethod::GetMirrorFileName(string mirror_uri_str)
+{
+ /*
+ - a mirror_uri_str looks like this:
+ mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors/dists/feisty/Release.gpg
+
+ - the matching source.list entry
+ deb mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors feisty main
+
+ - we actually want to go after:
+ http://people.ubuntu.com/~mvo/apt/mirror/mirrors
+
+ And we need to save the BaseUri for later:
+ - mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors
+
+ FIXME: what if we have two similar prefixes?
+ mirror://people.ubuntu.com/~mvo/mirror
+ mirror://people.ubuntu.com/~mvo/mirror2
+ then mirror_uri_str looks like:
+ mirror://people.ubuntu.com/~mvo/apt/mirror/dists/feisty/Release.gpg
+ mirror://people.ubuntu.com/~mvo/apt/mirror2/dists/feisty/Release.gpg
+ we search sources.list and find:
+ mirror://people.ubuntu.com/~mvo/apt/mirror
+ in both cases! So we need to apply some domain knowledge here :( and
+ check for /dists/ or /Release.gpg as suffixes
+ */
+ string name;
+ if(Debug)
+ std::cerr << "GetMirrorFileName: " << mirror_uri_str << std::endl;
+
+ // read sources.list and find match
+ vector<metaIndex *>::const_iterator I;
+ pkgSourceList list;
+ list.ReadMainList();
+ for(I=list.begin(); I != list.end(); ++I)
+ {
+ string uristr = (*I)->GetURI();
+ if(Debug)
+ std::cerr << "Checking: " << uristr << std::endl;
+ if(uristr.substr(0,strlen("mirror://")) != string("mirror://"))
+ continue;
+ // find matching uri in sources.list
+ if(mirror_uri_str.substr(0,uristr.size()) == uristr)
+ {
+ if(Debug)
+ std::cerr << "found BaseURI: " << uristr << std::endl;
+ BaseUri = uristr.substr(0,uristr.size()-1);
+ Dist = (*I)->GetDist();
+ }
+ }
+ // get new file
+ name = _config->FindDir("Dir::State::mirrors") + URItoFileName(BaseUri);
+
+ if(Debug)
+ {
+ cerr << "base-uri: " << BaseUri << endl;
+ cerr << "mirror-file: " << name << endl;
+ }
+ return name;
+}
+
+// MirrorMethod::Fetch - Fetch an item /*{{{*/
+// ---------------------------------------------------------------------
+/* This adds an item to the pipeline. We keep the pipeline at a fixed
+ depth. */
+bool MirrorMethod::Fetch(FetchItem *Itm)
+{
+ if(Debug)
+ clog << "MirrorMethod::Fetch()" << endl;
+
+ // the http method uses Fetch(0) as a way to update the pipeline,
+ // just let it do its work in this case - Fetch() with a valid
+ // Itm will always run before the first Fetch(0)
+ if(Itm == NULL)
+ return HttpMethod::Fetch(Itm);
+
+ // if we don't have the name of the mirror file on disk yet,
+ // calculate it now (can be derived from the uri)
+ if(MirrorFile.empty())
+ MirrorFile = GetMirrorFileName(Itm->Uri);
+
+ // download mirror file once (if we are after index files)
+ if(Itm->IndexFile && !DownloadedMirrorFile)
+ {
+ Clean(_config->FindDir("Dir::State::mirrors"));
+ if (DownloadMirrorFile(Itm->Uri))
+ RandomizeMirrorFile(MirrorFile);
+ }
+
+ if(AllMirrors.empty()) {
+ if(!InitMirrors()) {
+ // no valid mirror selected, something went wrong downloading
+ // from the master mirror site most likely and there is
+ // no old mirror file availalbe
+ return false;
+ }
+ }
+
+ if(Itm->Uri.find("mirror://") != string::npos)
+ Itm->Uri.replace(0,BaseUri.size(), Mirror);
+
+ if(Debug)
+ clog << "Fetch: " << Itm->Uri << endl << endl;
+
+ // now run the real fetcher
+ return HttpMethod::Fetch(Itm);
+}
+
+void MirrorMethod::Fail(string Err,bool Transient)
+{
+ // FIXME: TryNextMirror is not ideal for indexfile as we may
+ // run into auth issues
+
+ if (Debug)
+ clog << "Failure to get " << Queue->Uri << endl;
+
+ // try the next mirror on fail (if its not a expected failure,
+ // e.g. translations are ok to ignore)
+ if (!Queue->FailIgnore && TryNextMirror())
+ return;
+
+ // all mirrors failed, so bail out
+ string s;
+ strprintf(s, _("[Mirror: %s]"), Mirror.c_str());
+ SetIP(s);
+
+ CurrentQueueUriToMirror();
+ pkgAcqMethod::Fail(Err, Transient);
+}
+
+void MirrorMethod::URIStart(FetchResult &Res)
+{
+ CurrentQueueUriToMirror();
+ pkgAcqMethod::URIStart(Res);
+}
+
+void MirrorMethod::URIDone(FetchResult &Res,FetchResult *Alt)
+{
+ CurrentQueueUriToMirror();
+ pkgAcqMethod::URIDone(Res, Alt);
+}
+
+
+int main()
+{
+ return MirrorMethod().Loop();
+}
+
+
diff --git a/methods/mirror.h b/methods/mirror.h
new file mode 100644
index 000000000..6ebe08e6b
--- /dev/null
+++ b/methods/mirror.h
@@ -0,0 +1,57 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+/* ######################################################################
+
+ MIRROR Acquire Method - This is the MIRROR acquire method for APT.
+
+ ##################################################################### */
+ /*}}}*/
+
+#ifndef APT_MIRROR_H
+#define APT_MIRROR_H
+
+#include <iostream>
+#include <string>
+#include <vector>
+
+using std::cout;
+using std::cerr;
+using std::endl;
+
+#include "http.h"
+
+class MirrorMethod : public HttpMethod
+{
+ FetchResult Res;
+ // we simply transform between BaseUri and Mirror
+ std::string BaseUri; // the original mirror://... url
+ std::string Mirror; // the selected mirror uri (http://...)
+ std::vector<std::string> AllMirrors; // all available mirrors
+ std::string MirrorFile; // the file that contains the list of mirrors
+ bool DownloadedMirrorFile; // already downloaded this session
+ std::string Dist; // the target distrubtion (e.g. sid, oneiric)
+
+ bool Debug;
+
+ protected:
+ bool DownloadMirrorFile(std::string uri);
+ bool RandomizeMirrorFile(std::string file);
+ std::string GetMirrorFileName(std::string uri);
+ bool InitMirrors();
+ bool TryNextMirror();
+ void CurrentQueueUriToMirror();
+ bool Clean(std::string dir);
+
+ // we need to overwrite those to transform the url back
+ virtual void Fail(std::string Why, bool Transient = false) APT_OVERRIDE;
+ virtual void URIStart(FetchResult &Res) APT_OVERRIDE;
+ virtual void URIDone(FetchResult &Res,FetchResult *Alt = 0) APT_OVERRIDE;
+ virtual bool Configuration(std::string Message) APT_OVERRIDE;
+
+ public:
+ MirrorMethod();
+ virtual bool Fetch(FetchItem *Itm) APT_OVERRIDE;
+};
+
+
+#endif
diff --git a/methods/rfc2553emu.cc b/methods/rfc2553emu.cc
new file mode 100644
index 000000000..372882769
--- /dev/null
+++ b/methods/rfc2553emu.cc
@@ -0,0 +1,245 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: rfc2553emu.cc,v 1.8 2001/02/20 07:03:18 jgg Exp $
+/* ######################################################################
+
+ RFC 2553 Emulation - Provides emulation for RFC 2553 getaddrinfo,
+ freeaddrinfo and getnameinfo
+
+ This is really C code, it just has a .cc extensions to play nicer with
+ the rest of APT.
+
+ Originally written by Jason Gunthorpe <jgg@debian.org> and placed into
+ the Public Domain, do with it what you will.
+
+ ##################################################################### */
+ /*}}}*/
+#include <config.h>
+
+#include <stdlib.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <string.h>
+#include <stdio.h>
+#include "rfc2553emu.h"
+
+#ifndef HAVE_GETADDRINFO
+// getaddrinfo - Resolve a hostname /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+int getaddrinfo(const char *nodename, const char *servname,
+ const struct addrinfo *hints,
+ struct addrinfo **res)
+{
+ struct addrinfo **Result = res;
+ hostent *Addr;
+ unsigned int Port;
+ int Proto;
+ const char *End;
+ char **CurAddr;
+
+ // Try to convert the service as a number
+ Port = htons(strtol(servname,(char **)&End,0));
+ Proto = SOCK_STREAM;
+
+ if (hints != 0 && hints->ai_socktype != 0)
+ Proto = hints->ai_socktype;
+
+ // Not a number, must be a name.
+ if (End != servname + strlen(servname))
+ {
+ struct servent *Srv = 0;
+
+ // Do a lookup in the service database
+ if (hints == 0 || hints->ai_socktype == SOCK_STREAM)
+ Srv = getservbyname(servname,"tcp");
+ if (hints != 0 && hints->ai_socktype == SOCK_DGRAM)
+ Srv = getservbyname(servname,"udp");
+ if (Srv == 0)
+ return EAI_NONAME;
+
+ // Get the right protocol
+ Port = Srv->s_port;
+ if (strcmp(Srv->s_proto,"tcp") == 0)
+ Proto = SOCK_STREAM;
+ else
+ {
+ if (strcmp(Srv->s_proto,"udp") == 0)
+ Proto = SOCK_DGRAM;
+ else
+ return EAI_NONAME;
+ }
+
+ if (hints != 0 && hints->ai_socktype != Proto &&
+ hints->ai_socktype != 0)
+ return EAI_SERVICE;
+ }
+
+ // Hostname lookup, only if this is not a listening socket
+ if (hints != 0 && (hints->ai_flags & AI_PASSIVE) != AI_PASSIVE)
+ {
+ Addr = gethostbyname(nodename);
+ if (Addr == 0)
+ {
+ if (h_errno == TRY_AGAIN)
+ return EAI_AGAIN;
+ if (h_errno == NO_RECOVERY)
+ return EAI_FAIL;
+ return EAI_NONAME;
+ }
+
+ // No A records
+ if (Addr->h_addr_list[0] == 0)
+ return EAI_NONAME;
+
+ CurAddr = Addr->h_addr_list;
+ }
+ else
+ CurAddr = (char **)&End; // Fake!
+
+ // Start constructing the linked list
+ *res = 0;
+ for (; *CurAddr != 0; CurAddr++)
+ {
+ // New result structure
+ *Result = (struct addrinfo *)calloc(sizeof(**Result),1);
+ if (*Result == 0)
+ {
+ freeaddrinfo(*res);
+ return EAI_MEMORY;
+ }
+ if (*res == 0)
+ *res = *Result;
+
+ (*Result)->ai_family = AF_INET;
+ (*Result)->ai_socktype = Proto;
+
+ // If we have the IPPROTO defines we can set the protocol field
+ #ifdef IPPROTO_TCP
+ if (Proto == SOCK_STREAM)
+ (*Result)->ai_protocol = IPPROTO_TCP;
+ if (Proto == SOCK_DGRAM)
+ (*Result)->ai_protocol = IPPROTO_UDP;
+ #endif
+
+ // Allocate space for the address
+ (*Result)->ai_addrlen = sizeof(struct sockaddr_in);
+ (*Result)->ai_addr = (struct sockaddr *)calloc(sizeof(sockaddr_in),1);
+ if ((*Result)->ai_addr == 0)
+ {
+ freeaddrinfo(*res);
+ return EAI_MEMORY;
+ }
+
+ // Set the address
+ ((struct sockaddr_in *)(*Result)->ai_addr)->sin_family = AF_INET;
+ ((struct sockaddr_in *)(*Result)->ai_addr)->sin_port = Port;
+
+ if (hints != 0 && (hints->ai_flags & AI_PASSIVE) != AI_PASSIVE)
+ ((struct sockaddr_in *)(*Result)->ai_addr)->sin_addr = *(in_addr *)(*CurAddr);
+ else
+ {
+ // Already zerod by calloc.
+ break;
+ }
+
+ Result = &(*Result)->ai_next;
+ }
+
+ return 0;
+}
+ /*}}}*/
+// freeaddrinfo - Free the result of getaddrinfo /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+void freeaddrinfo(struct addrinfo *ai)
+{
+ while (ai != 0)
+ {
+ free(ai->ai_addr);
+ ai = ai->ai_next;
+ free(ai);
+ }
+}
+ /*}}}*/
+#endif // HAVE_GETADDRINFO
+
+#ifndef HAVE_GETNAMEINFO
+// getnameinfo - Convert a sockaddr to a string /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+int getnameinfo(const struct sockaddr *sa, socklen_t salen,
+ char *host, size_t hostlen,
+ char *serv, size_t servlen,
+ int flags)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ // This routine only supports internet addresses
+ if (sa->sa_family != AF_INET)
+ return EAI_ADDRFAMILY;
+
+ if (host != 0)
+ {
+ // Try to resolve the hostname
+ if ((flags & NI_NUMERICHOST) != NI_NUMERICHOST)
+ {
+ struct hostent *Ent = gethostbyaddr((char *)&sin->sin_addr,sizeof(sin->sin_addr),
+ AF_INET);
+ if (Ent != 0)
+ strncpy(host,Ent->h_name,hostlen);
+ else
+ {
+ if ((flags & NI_NAMEREQD) == NI_NAMEREQD)
+ {
+ if (h_errno == TRY_AGAIN)
+ return EAI_AGAIN;
+ if (h_errno == NO_RECOVERY)
+ return EAI_FAIL;
+ return EAI_NONAME;
+ }
+
+ flags |= NI_NUMERICHOST;
+ }
+ }
+
+ // Resolve as a plain numberic
+ if ((flags & NI_NUMERICHOST) == NI_NUMERICHOST)
+ {
+ strncpy(host,inet_ntoa(sin->sin_addr),hostlen);
+ }
+ }
+
+ if (serv != 0)
+ {
+ // Try to resolve the hostname
+ if ((flags & NI_NUMERICSERV) != NI_NUMERICSERV)
+ {
+ struct servent *Ent;
+ if ((flags & NI_DATAGRAM) == NI_DATAGRAM)
+ Ent = getservbyport(ntohs(sin->sin_port),"udp");
+ else
+ Ent = getservbyport(ntohs(sin->sin_port),"tcp");
+
+ if (Ent != 0)
+ strncpy(serv,Ent->s_name,servlen);
+ else
+ {
+ if ((flags & NI_NAMEREQD) == NI_NAMEREQD)
+ return EAI_NONAME;
+
+ flags |= NI_NUMERICSERV;
+ }
+ }
+
+ // Resolve as a plain numberic
+ if ((flags & NI_NUMERICSERV) == NI_NUMERICSERV)
+ {
+ snprintf(serv,servlen,"%u",ntohs(sin->sin_port));
+ }
+ }
+
+ return 0;
+}
+ /*}}}*/
+#endif // HAVE_GETNAMEINFO
diff --git a/methods/rfc2553emu.h b/methods/rfc2553emu.h
new file mode 100644
index 000000000..ad7ddf48a
--- /dev/null
+++ b/methods/rfc2553emu.h
@@ -0,0 +1,113 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: rfc2553emu.h,v 1.4 2000/06/18 06:04:45 jgg Exp $
+/* ######################################################################
+
+ RFC 2553 Emulation - Provides emulation for RFC 2553 getaddrinfo,
+ freeaddrinfo and getnameinfo
+
+ These functions are necessary to write portable protocol independent
+ networking. They transparently support IPv4, IPv6 and probably many
+ other protocols too. This implementation is needed when the host does
+ not support these standards. It implements a simple wrapper that
+ basically supports only IPv4.
+
+ Perfect emulation is not provided, but it is passable..
+
+ Originally written by Jason Gunthorpe <jgg@debian.org> and placed into
+ the Public Domain, do with it what you will.
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef RFC2553EMU_H
+#define RFC2553EMU_H
+
+#include <netdb.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+
+// Autosense getaddrinfo
+#if defined(AI_PASSIVE) && defined(EAI_NONAME)
+#define HAVE_GETADDRINFO
+#endif
+
+// Autosense getnameinfo
+#if defined(NI_NUMERICHOST)
+#define HAVE_GETNAMEINFO
+#endif
+
+// getaddrinfo support?
+#ifndef HAVE_GETADDRINFO
+ // Renamed to advoid type clashing.. (for debugging)
+ struct addrinfo_emu
+ {
+ int ai_flags; /* AI_PASSIVE, AI_CANONNAME, AI_NUMERICHOST */
+ int ai_family; /* PF_xxx */
+ int ai_socktype; /* SOCK_xxx */
+ int ai_protocol; /* 0 or IPPROTO_xxx for IPv4 and IPv6 */
+ size_t ai_addrlen; /* length of ai_addr */
+ char *ai_canonname; /* canonical name for nodename */
+ struct sockaddr *ai_addr; /* binary address */
+ struct addrinfo_emu *ai_next; /* next structure in linked list */
+ };
+ #define addrinfo addrinfo_emu
+
+ int getaddrinfo(const char *nodename, const char *servname,
+ const struct addrinfo *hints,
+ struct addrinfo **res);
+ void freeaddrinfo(struct addrinfo *ai);
+
+ #ifndef AI_PASSIVE
+ #define AI_PASSIVE (1<<1)
+ #endif
+
+ #ifndef EAI_NONAME
+ #define EAI_NONAME -1
+ #define EAI_AGAIN -2
+ #define EAI_FAIL -3
+ #define EAI_NODATA -4
+ #define EAI_FAMILY -5
+ #define EAI_SOCKTYPE -6
+ #define EAI_SERVICE -7
+ #define EAI_ADDRFAMILY -8
+ #define EAI_SYSTEM -10
+ #define EAI_MEMORY -11
+ #endif
+
+ /* If we don't have getaddrinfo then we probably don't have
+ sockaddr_storage either (same RFC) so we definitely will not be
+ doing any IPv6 stuff. Do not use the members of this structure to
+ retain portability, cast to a sockaddr. */
+ #define sockaddr_storage sockaddr_in
+#endif
+
+// getnameinfo support (glibc2.0 has getaddrinfo only)
+#ifndef HAVE_GETNAMEINFO
+
+ int getnameinfo(const struct sockaddr *sa, socklen_t salen,
+ char *host, size_t hostlen,
+ char *serv, size_t servlen,
+ int flags);
+
+ #ifndef NI_MAXHOST
+ #define NI_MAXHOST 1025
+ #define NI_MAXSERV 32
+ #endif
+
+ #ifndef NI_NUMERICHOST
+ #define NI_NUMERICHOST (1<<0)
+ #define NI_NUMERICSERV (1<<1)
+// #define NI_NOFQDN (1<<2)
+ #define NI_NAMEREQD (1<<3)
+ #define NI_DATAGRAM (1<<4)
+ #endif
+
+ #define sockaddr_storage sockaddr_in
+#endif
+
+// Glibc 2.0.7 misses this one
+#ifndef AI_NUMERICHOST
+#define AI_NUMERICHOST 0
+#endif
+
+#endif
diff --git a/methods/rred.cc b/methods/rred.cc
new file mode 100644
index 000000000..2e5008d46
--- /dev/null
+++ b/methods/rred.cc
@@ -0,0 +1,785 @@
+// Copyright (c) 2014 Anthony Towns
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation; either version 2 of the License, or
+// (at your option) any later version.
+
+#include <config.h>
+
+#include <apt-pkg/init.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/hashes.h>
+#include <apt-pkg/configuration.h>
+#include "aptmethod.h"
+
+#include <stddef.h>
+#include <iostream>
+#include <string>
+#include <list>
+#include <vector>
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+
+#include <apti18n.h>
+
+#define BLOCK_SIZE (512*1024)
+
+class MemBlock {
+ char *start;
+ size_t size;
+ char *free;
+ MemBlock *next;
+
+ explicit MemBlock(size_t size) : size(size), next(NULL)
+ {
+ free = start = new char[size];
+ }
+
+ size_t avail(void) { return size - (free - start); }
+
+ public:
+
+ MemBlock(void) {
+ free = start = new char[BLOCK_SIZE];
+ size = BLOCK_SIZE;
+ next = NULL;
+ }
+
+ ~MemBlock() {
+ delete [] start;
+ delete next;
+ }
+
+ void clear(void) {
+ free = start;
+ if (next)
+ next->clear();
+ }
+
+ char *add_easy(char *src, size_t len, char *last)
+ {
+ if (last) {
+ for (MemBlock *k = this; k; k = k->next) {
+ if (k->free == last) {
+ if (len <= k->avail()) {
+ char *n = k->add(src, len);
+ assert(last == n);
+ if (last == n)
+ return NULL;
+ return n;
+ } else {
+ break;
+ }
+ } else if (last >= start && last < free) {
+ break;
+ }
+ }
+ }
+ return add(src, len);
+ }
+
+ char *add(char *src, size_t len) {
+ if (len > avail()) {
+ if (!next) {
+ if (len > BLOCK_SIZE) {
+ next = new MemBlock(len);
+ } else {
+ next = new MemBlock;
+ }
+ }
+ return next->add(src, len);
+ }
+ char *dst = free;
+ free += len;
+ memcpy(dst, src, len);
+ return dst;
+ }
+};
+
+struct Change {
+ /* Ordering:
+ *
+ * 1. write out <offset> lines unchanged
+ * 2. skip <del_cnt> lines from source
+ * 3. write out <add_cnt> lines (<add>/<add_len>)
+ */
+ size_t offset;
+ size_t del_cnt;
+ size_t add_cnt; /* lines */
+ size_t add_len; /* bytes */
+ char *add;
+
+ explicit Change(size_t off)
+ {
+ offset = off;
+ del_cnt = add_cnt = add_len = 0;
+ add = NULL;
+ }
+
+ /* actually, don't write <lines> lines from <add> */
+ void skip_lines(size_t lines)
+ {
+ while (lines > 0) {
+ char *s = (char*) memchr(add, '\n', add_len);
+ assert(s != NULL);
+ s++;
+ add_len -= (s - add);
+ add_cnt--;
+ lines--;
+ if (add_len == 0) {
+ add = NULL;
+ assert(add_cnt == 0);
+ assert(lines == 0);
+ } else {
+ add = s;
+ assert(add_cnt > 0);
+ }
+ }
+ }
+};
+
+class FileChanges {
+ std::list<struct Change> changes;
+ std::list<struct Change>::iterator where;
+ size_t pos; // line number is as far left of iterator as possible
+
+ bool pos_is_okay(void) const
+ {
+#ifdef POSDEBUG
+ size_t cpos = 0;
+ std::list<struct Change>::const_iterator x;
+ for (x = changes.begin(); x != where; ++x) {
+ assert(x != changes.end());
+ cpos += x->offset + x->add_cnt;
+ }
+ return cpos == pos;
+#else
+ return true;
+#endif
+ }
+
+ public:
+ FileChanges() {
+ where = changes.end();
+ pos = 0;
+ }
+
+ std::list<struct Change>::iterator begin(void) { return changes.begin(); }
+ std::list<struct Change>::iterator end(void) { return changes.end(); }
+
+ std::list<struct Change>::reverse_iterator rbegin(void) { return changes.rbegin(); }
+ std::list<struct Change>::reverse_iterator rend(void) { return changes.rend(); }
+
+ void add_change(Change c) {
+ assert(pos_is_okay());
+ go_to_change_for(c.offset);
+ assert(pos + where->offset == c.offset);
+ if (c.del_cnt > 0)
+ delete_lines(c.del_cnt);
+ assert(pos + where->offset == c.offset);
+ if (c.add_len > 0) {
+ assert(pos_is_okay());
+ if (where->add_len > 0)
+ new_change();
+ assert(where->add_len == 0 && where->add_cnt == 0);
+
+ where->add_len = c.add_len;
+ where->add_cnt = c.add_cnt;
+ where->add = c.add;
+ }
+ assert(pos_is_okay());
+ merge();
+ assert(pos_is_okay());
+ }
+
+ private:
+ void merge(void)
+ {
+ while (where->offset == 0 && where != changes.begin()) {
+ left();
+ }
+ std::list<struct Change>::iterator next = where;
+ ++next;
+
+ while (next != changes.end() && next->offset == 0) {
+ where->del_cnt += next->del_cnt;
+ next->del_cnt = 0;
+ if (next->add == NULL) {
+ next = changes.erase(next);
+ } else if (where->add == NULL) {
+ where->add = next->add;
+ where->add_len = next->add_len;
+ where->add_cnt = next->add_cnt;
+ next = changes.erase(next);
+ } else {
+ ++next;
+ }
+ }
+ }
+
+ void go_to_change_for(size_t line)
+ {
+ while(where != changes.end()) {
+ if (line < pos) {
+ left();
+ continue;
+ }
+ if (pos + where->offset + where->add_cnt <= line) {
+ right();
+ continue;
+ }
+ // line is somewhere in this slot
+ if (line < pos + where->offset) {
+ break;
+ } else if (line == pos + where->offset) {
+ return;
+ } else {
+ split(line - pos);
+ right();
+ return;
+ }
+ }
+ /* it goes before this patch */
+ insert(line-pos);
+ }
+
+ void new_change(void) { insert(where->offset); }
+
+ void insert(size_t offset)
+ {
+ assert(pos_is_okay());
+ assert(where == changes.end() || offset <= where->offset);
+ if (where != changes.end())
+ where->offset -= offset;
+ changes.insert(where, Change(offset));
+ --where;
+ assert(pos_is_okay());
+ }
+
+ void split(size_t offset)
+ {
+ assert(pos_is_okay());
+
+ assert(where->offset < offset);
+ assert(offset < where->offset + where->add_cnt);
+
+ size_t keep_lines = offset - where->offset;
+
+ Change before(*where);
+
+ where->del_cnt = 0;
+ where->offset = 0;
+ where->skip_lines(keep_lines);
+
+ before.add_cnt = keep_lines;
+ before.add_len -= where->add_len;
+
+ changes.insert(where, before);
+ --where;
+ assert(pos_is_okay());
+ }
+
+ void delete_lines(size_t cnt)
+ {
+ std::list<struct Change>::iterator x = where;
+ assert(pos_is_okay());
+ while (cnt > 0)
+ {
+ size_t del;
+ del = x->add_cnt;
+ if (del > cnt)
+ del = cnt;
+ x->skip_lines(del);
+ cnt -= del;
+
+ ++x;
+ if (x == changes.end()) {
+ del = cnt;
+ } else {
+ del = x->offset;
+ if (del > cnt)
+ del = cnt;
+ x->offset -= del;
+ }
+ where->del_cnt += del;
+ cnt -= del;
+ }
+ assert(pos_is_okay());
+ }
+
+ void left(void) {
+ assert(pos_is_okay());
+ --where;
+ pos -= where->offset + where->add_cnt;
+ assert(pos_is_okay());
+ }
+
+ void right(void) {
+ assert(pos_is_okay());
+ pos += where->offset + where->add_cnt;
+ ++where;
+ assert(pos_is_okay());
+ }
+};
+
+class Patch {
+ FileChanges filechanges;
+ MemBlock add_text;
+
+ static bool retry_fwrite(char *b, size_t l, FileFd &f, Hashes * const start_hash, Hashes * const end_hash = nullptr) APT_NONNULL(1)
+ {
+ if (f.Write(b, l) == false)
+ return false;
+ if (start_hash)
+ start_hash->Add((unsigned char*)b, l);
+ if (end_hash)
+ end_hash->Add((unsigned char*)b, l);
+ return true;
+ }
+
+ static void dump_rest(FileFd &o, FileFd &i,
+ Hashes * const start_hash, Hashes * const end_hash)
+ {
+ char buffer[BLOCK_SIZE];
+ unsigned long long l = 0;
+ while (i.Read(buffer, sizeof(buffer), &l)) {
+ if (l ==0 || !retry_fwrite(buffer, l, o, start_hash, end_hash))
+ break;
+ }
+ }
+
+ static void dump_lines(FileFd &o, FileFd &i, size_t n,
+ Hashes * const start_hash, Hashes * const end_hash)
+ {
+ char buffer[BLOCK_SIZE];
+ while (n > 0) {
+ if (i.ReadLine(buffer, sizeof(buffer)) == NULL)
+ buffer[0] = '\0';
+ size_t const l = strlen(buffer);
+ if (l == 0 || buffer[l-1] == '\n')
+ n--;
+ retry_fwrite(buffer, l, o, start_hash, end_hash);
+ }
+ }
+
+ static void skip_lines(FileFd &i, int n, Hashes * const start_hash)
+ {
+ char buffer[BLOCK_SIZE];
+ while (n > 0) {
+ if (i.ReadLine(buffer, sizeof(buffer)) == NULL)
+ buffer[0] = '\0';
+ size_t const l = strlen(buffer);
+ if (l == 0 || buffer[l-1] == '\n')
+ n--;
+ if (start_hash)
+ start_hash->Add((unsigned char*)buffer, l);
+ }
+ }
+
+ static void dump_mem(FileFd &o, char *p, size_t s, Hashes *hash) APT_NONNULL(2) {
+ retry_fwrite(p, s, o, nullptr, hash);
+ }
+
+ public:
+
+ bool read_diff(FileFd &f, Hashes * const h)
+ {
+ char buffer[BLOCK_SIZE];
+ bool cmdwanted = true;
+
+ Change ch(std::numeric_limits<size_t>::max());
+ if (f.ReadLine(buffer, sizeof(buffer)) == NULL)
+ return _error->Error("Reading first line of patchfile %s failed", f.Name().c_str());
+ do {
+ if (h != NULL)
+ h->Add(buffer);
+ if (cmdwanted) {
+ char *m, *c;
+ size_t s, e;
+ errno = 0;
+ s = strtoul(buffer, &m, 10);
+ if (unlikely(m == buffer || s == std::numeric_limits<unsigned long>::max() || errno != 0))
+ return _error->Error("Parsing patchfile %s failed: Expected an effected line start", f.Name().c_str());
+ else if (*m == ',') {
+ ++m;
+ e = strtol(m, &c, 10);
+ if (unlikely(m == c || e == std::numeric_limits<unsigned long>::max() || errno != 0))
+ return _error->Error("Parsing patchfile %s failed: Expected an effected line end", f.Name().c_str());
+ if (unlikely(e < s))
+ return _error->Error("Parsing patchfile %s failed: Effected lines end %lu is before start %lu", f.Name().c_str(), e, s);
+ } else {
+ e = s;
+ c = m;
+ }
+ if (s > ch.offset)
+ return _error->Error("Parsing patchfile %s failed: Effected line is after previous effected line", f.Name().c_str());
+ switch(*c) {
+ case 'a':
+ cmdwanted = false;
+ ch.add = NULL;
+ ch.add_cnt = 0;
+ ch.add_len = 0;
+ ch.offset = s;
+ ch.del_cnt = 0;
+ break;
+ case 'c':
+ if (unlikely(s == 0))
+ return _error->Error("Parsing patchfile %s failed: Change command can't effect line zero", f.Name().c_str());
+ cmdwanted = false;
+ ch.add = NULL;
+ ch.add_cnt = 0;
+ ch.add_len = 0;
+ ch.offset = s - 1;
+ ch.del_cnt = e - s + 1;
+ break;
+ case 'd':
+ if (unlikely(s == 0))
+ return _error->Error("Parsing patchfile %s failed: Delete command can't effect line zero", f.Name().c_str());
+ ch.offset = s - 1;
+ ch.del_cnt = e - s + 1;
+ ch.add = NULL;
+ ch.add_cnt = 0;
+ ch.add_len = 0;
+ filechanges.add_change(ch);
+ break;
+ default:
+ return _error->Error("Parsing patchfile %s failed: Unknown command", f.Name().c_str());
+ }
+ } else { /* !cmdwanted */
+ if (strcmp(buffer, ".\n") == 0) {
+ cmdwanted = true;
+ filechanges.add_change(ch);
+ } else {
+ char *last = NULL;
+ char *add;
+ size_t l;
+ if (ch.add)
+ last = ch.add + ch.add_len;
+ l = strlen(buffer);
+ add = add_text.add_easy(buffer, l, last);
+ if (!add) {
+ ch.add_len += l;
+ ch.add_cnt++;
+ } else {
+ if (ch.add) {
+ filechanges.add_change(ch);
+ ch.del_cnt = 0;
+ }
+ ch.offset += ch.add_cnt;
+ ch.add = add;
+ ch.add_len = l;
+ ch.add_cnt = 1;
+ }
+ }
+ }
+ } while(f.ReadLine(buffer, sizeof(buffer)));
+ return true;
+ }
+
+ void write_diff(FileFd &f)
+ {
+ unsigned long long line = 0;
+ std::list<struct Change>::reverse_iterator ch;
+ for (ch = filechanges.rbegin(); ch != filechanges.rend(); ++ch) {
+ line += ch->offset + ch->del_cnt;
+ }
+
+ for (ch = filechanges.rbegin(); ch != filechanges.rend(); ++ch) {
+ std::list<struct Change>::reverse_iterator mg_i, mg_e = ch;
+ while (ch->del_cnt == 0 && ch->offset == 0)
+ {
+ ++ch;
+ if (unlikely(ch == filechanges.rend()))
+ return;
+ }
+ line -= ch->del_cnt;
+ std::string buf;
+ if (ch->add_cnt > 0) {
+ if (ch->del_cnt == 0) {
+ strprintf(buf, "%llua\n", line);
+ } else if (ch->del_cnt == 1) {
+ strprintf(buf, "%lluc\n", line+1);
+ } else {
+ strprintf(buf, "%llu,%lluc\n", line+1, line+ch->del_cnt);
+ }
+ f.Write(buf.c_str(), buf.length());
+
+ mg_i = ch;
+ do {
+ dump_mem(f, mg_i->add, mg_i->add_len, NULL);
+ } while (mg_i-- != mg_e);
+
+ buf = ".\n";
+ f.Write(buf.c_str(), buf.length());
+ } else if (ch->del_cnt == 1) {
+ strprintf(buf, "%llud\n", line+1);
+ f.Write(buf.c_str(), buf.length());
+ } else if (ch->del_cnt > 1) {
+ strprintf(buf, "%llu,%llud\n", line+1, line+ch->del_cnt);
+ f.Write(buf.c_str(), buf.length());
+ }
+ line -= ch->offset;
+ }
+ }
+
+ void apply_against_file(FileFd &out, FileFd &in,
+ Hashes * const start_hash = nullptr, Hashes * const end_hash = nullptr)
+ {
+ std::list<struct Change>::iterator ch;
+ for (ch = filechanges.begin(); ch != filechanges.end(); ++ch) {
+ dump_lines(out, in, ch->offset, start_hash, end_hash);
+ skip_lines(in, ch->del_cnt, start_hash);
+ if (ch->add_len != 0)
+ dump_mem(out, ch->add, ch->add_len, end_hash);
+ }
+ dump_rest(out, in, start_hash, end_hash);
+ out.Flush();
+ }
+};
+
+class RredMethod : public aptMethod {
+ private:
+ bool Debug;
+
+ struct PDiffFile {
+ std::string FileName;
+ HashStringList ExpectedHashes;
+ PDiffFile(std::string const &FileName, HashStringList const &ExpectedHashes) :
+ FileName(FileName), ExpectedHashes(ExpectedHashes) {}
+ };
+
+ HashStringList ReadExpectedHashesForPatch(unsigned int const patch, std::string const &Message)
+ {
+ HashStringList ExpectedHashes;
+ for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
+ {
+ std::string tagname;
+ strprintf(tagname, "Patch-%d-%s-Hash", patch, *type);
+ std::string const hashsum = LookupTag(Message, tagname.c_str());
+ if (hashsum.empty() == false)
+ ExpectedHashes.push_back(HashString(*type, hashsum));
+ }
+ return ExpectedHashes;
+ }
+
+ protected:
+ virtual bool URIAcquire(std::string const &Message, FetchItem *Itm) APT_OVERRIDE {
+ Debug = DebugEnabled();
+ URI Get = Itm->Uri;
+ std::string Path = Get.Host + Get.Path; // rred:/path - no host
+
+ FetchResult Res;
+ Res.Filename = Itm->DestFile;
+ if (Itm->Uri.empty())
+ {
+ Path = Itm->DestFile;
+ Itm->DestFile.append(".result");
+ } else
+ URIStart(Res);
+
+ std::vector<PDiffFile> patchfiles;
+ Patch patch;
+
+ HashStringList StartHashes;
+ for (char const * const * type = HashString::SupportedHashes(); *type != nullptr; ++type)
+ {
+ std::string tagname;
+ strprintf(tagname, "Start-%s-Hash", *type);
+ std::string const hashsum = LookupTag(Message, tagname.c_str());
+ if (hashsum.empty() == false)
+ StartHashes.push_back(HashString(*type, hashsum));
+ }
+
+ if (FileExists(Path + ".ed") == true)
+ {
+ HashStringList const ExpectedHashes = ReadExpectedHashesForPatch(0, Message);
+ std::string const FileName = Path + ".ed";
+ if (ExpectedHashes.usable() == false)
+ return _error->Error("No hashes found for uncompressed patch: %s", FileName.c_str());
+ patchfiles.push_back(PDiffFile(FileName, ExpectedHashes));
+ }
+ else
+ {
+ _error->PushToStack();
+ std::vector<std::string> patches = GetListOfFilesInDir(flNotFile(Path), "gz", true, false);
+ _error->RevertToStack();
+
+ std::string const baseName = Path + ".ed.";
+ unsigned int seen_patches = 0;
+ for (std::vector<std::string>::const_iterator p = patches.begin();
+ p != patches.end(); ++p)
+ {
+ if (p->compare(0, baseName.length(), baseName) == 0)
+ {
+ HashStringList const ExpectedHashes = ReadExpectedHashesForPatch(seen_patches, Message);
+ if (ExpectedHashes.usable() == false)
+ return _error->Error("No hashes found for uncompressed patch %d: %s", seen_patches, p->c_str());
+ patchfiles.push_back(PDiffFile(*p, ExpectedHashes));
+ ++seen_patches;
+ }
+ }
+ }
+
+ std::string patch_name;
+ for (std::vector<PDiffFile>::iterator I = patchfiles.begin();
+ I != patchfiles.end();
+ ++I)
+ {
+ patch_name = I->FileName;
+ if (Debug == true)
+ std::clog << "Patching " << Path << " with " << patch_name
+ << std::endl;
+
+ FileFd p;
+ Hashes patch_hash(I->ExpectedHashes);
+ // all patches are compressed, even if the name doesn't reflect it
+ if (p.Open(patch_name, FileFd::ReadOnly, FileFd::Gzip) == false ||
+ patch.read_diff(p, &patch_hash) == false)
+ {
+ _error->DumpErrors(std::cerr, GlobalError::DEBUG, false);
+ return false;
+ }
+ p.Close();
+ HashStringList const hsl = patch_hash.GetHashStringList();
+ if (hsl != I->ExpectedHashes)
+ return _error->Error("Hash Sum mismatch for uncompressed patch %s", patch_name.c_str());
+ }
+
+ if (Debug == true)
+ std::clog << "Applying patches against " << Path
+ << " and writing results to " << Itm->DestFile
+ << std::endl;
+
+ FileFd inp, out;
+ if (inp.Open(Path, FileFd::ReadOnly, FileFd::Extension) == false)
+ {
+ if (Debug == true)
+ std::clog << "FAILED to open inp " << Path << std::endl;
+ return _error->Error("Failed to open inp %s", Path.c_str());
+ }
+ if (out.Open(Itm->DestFile, FileFd::WriteOnly | FileFd::Create | FileFd::Empty | FileFd::BufferedWrite, FileFd::Extension) == false)
+ {
+ if (Debug == true)
+ std::clog << "FAILED to open out " << Itm->DestFile << std::endl;
+ return _error->Error("Failed to open out %s", Itm->DestFile.c_str());
+ }
+
+ Hashes end_hash(Itm->ExpectedHashes);
+ if (StartHashes.usable())
+ {
+ Hashes start_hash(StartHashes);
+ patch.apply_against_file(out, inp, &start_hash, &end_hash);
+ if (start_hash.GetHashStringList() != StartHashes)
+ _error->Error("The input file hadn't the expected hash!");
+ }
+ else
+ patch.apply_against_file(out, inp, nullptr, &end_hash);
+
+ out.Close();
+ inp.Close();
+
+ if (_error->PendingError() == true) {
+ if (Debug == true)
+ std::clog << "FAILED to read or write files" << std::endl;
+ return false;
+ }
+
+ if (Debug == true) {
+ std::clog << "rred: finished file patching of " << Path << "." << std::endl;
+ }
+
+ struct stat bufbase, bufpatch;
+ if (stat(Path.c_str(), &bufbase) != 0 ||
+ stat(patch_name.c_str(), &bufpatch) != 0)
+ return _error->Errno("stat", _("Failed to stat %s"), Path.c_str());
+
+ struct timeval times[2];
+ times[0].tv_sec = bufbase.st_atime;
+ times[1].tv_sec = bufpatch.st_mtime;
+ times[0].tv_usec = times[1].tv_usec = 0;
+ if (utimes(Itm->DestFile.c_str(), times) != 0)
+ return _error->Errno("utimes",_("Failed to set modification time"));
+
+ if (stat(Itm->DestFile.c_str(), &bufbase) != 0)
+ return _error->Errno("stat", _("Failed to stat %s"), Itm->DestFile.c_str());
+
+ Res.LastModified = bufbase.st_mtime;
+ Res.Size = bufbase.st_size;
+ Res.TakeHashes(end_hash);
+ URIDone(Res);
+
+ return true;
+ }
+
+ public:
+ RredMethod() : aptMethod("rred", "2.0", SendConfig), Debug(false) {}
+};
+
+int main(int argc, char **argv)
+{
+ int i;
+ bool just_diff = true;
+ bool test = false;
+ Patch patch;
+
+ if (argc <= 1) {
+ return RredMethod().Run();
+ }
+
+ // Usage: rred -t input output diff ...
+ if (argc > 1 && strcmp(argv[1], "-t") == 0) {
+ // Read config files so we see compressors.
+ pkgInitConfig(*_config);
+ just_diff = false;
+ test = true;
+ i = 4;
+ } else if (argc > 1 && strcmp(argv[1], "-f") == 0) {
+ just_diff = false;
+ i = 2;
+ } else {
+ i = 1;
+ }
+
+ for (; i < argc; i++) {
+ FileFd p;
+ if (p.Open(argv[i], FileFd::ReadOnly) == false) {
+ _error->DumpErrors(std::cerr);
+ exit(1);
+ }
+ if (patch.read_diff(p, NULL) == false)
+ {
+ _error->DumpErrors(std::cerr);
+ exit(2);
+ }
+ }
+
+ if (test) {
+ FileFd out, inp;
+ std::cerr << "Patching " << argv[2] << " into " << argv[3] << "\n";
+ inp.Open(argv[2], FileFd::ReadOnly,FileFd::Extension);
+ out.Open(argv[3], FileFd::WriteOnly | FileFd::Create | FileFd::Empty | FileFd::BufferedWrite, FileFd::Extension);
+ patch.apply_against_file(out, inp);
+ out.Close();
+ } else if (just_diff) {
+ FileFd out;
+ out.OpenDescriptor(STDOUT_FILENO, FileFd::WriteOnly | FileFd::Create);
+ patch.write_diff(out);
+ out.Close();
+ } else {
+ FileFd out, inp;
+ out.OpenDescriptor(STDOUT_FILENO, FileFd::WriteOnly | FileFd::Create | FileFd::BufferedWrite);
+ inp.OpenDescriptor(STDIN_FILENO, FileFd::ReadOnly);
+ patch.apply_against_file(out, inp);
+ out.Close();
+ }
+ return 0;
+}
diff --git a/methods/rsh.cc b/methods/rsh.cc
new file mode 100644
index 000000000..7b8af6f9b
--- /dev/null
+++ b/methods/rsh.cc
@@ -0,0 +1,548 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: rsh.cc,v 1.6.2.1 2004/01/16 18:58:50 mdz Exp $
+/* ######################################################################
+
+ RSH method - Transfer files via rsh compatible program
+
+ Written by Ben Collins <bcollins@debian.org>, Copyright (c) 2000
+ Licensed under the GNU General Public License v2 [no exception clauses]
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/error.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/hashes.h>
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/strutl.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdarg.h>
+#include "rsh.h"
+
+#include <apti18n.h>
+ /*}}}*/
+
+unsigned long TimeOut = 120;
+Configuration::Item const *RshOptions = 0;
+time_t RSHMethod::FailTime = 0;
+std::string RSHMethod::FailFile;
+int RSHMethod::FailFd = -1;
+
+// RSHConn::RSHConn - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+RSHConn::RSHConn(std::string const &pProg, URI Srv) : Len(0), WriteFd(-1), ReadFd(-1),
+ ServerName(Srv), Prog(pProg), Process(-1) {
+ Buffer[0] = '\0';
+}
+ /*}}}*/
+// RSHConn::RSHConn - Destructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+RSHConn::~RSHConn()
+{
+ Close();
+}
+ /*}}}*/
+// RSHConn::Close - Forcibly terminate the connection /*{{{*/
+// ---------------------------------------------------------------------
+/* Often this is called when things have gone wrong to indicate that the
+ connection is no longer usable. */
+void RSHConn::Close()
+{
+ if (Process == -1)
+ return;
+
+ close(WriteFd);
+ close(ReadFd);
+ kill(Process,SIGINT);
+ ExecWait(Process,"",true);
+ WriteFd = -1;
+ ReadFd = -1;
+ Process = -1;
+}
+ /*}}}*/
+// RSHConn::Open - Connect to a host /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool RSHConn::Open()
+{
+ // Use the already open connection if possible.
+ if (Process != -1)
+ return true;
+
+ if (Connect(ServerName.Host,ServerName.Port,ServerName.User) == false)
+ return false;
+
+ return true;
+}
+ /*}}}*/
+// RSHConn::Connect - Fire up rsh and connect /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool RSHConn::Connect(std::string Host, unsigned int Port, std::string User)
+{
+ char *PortStr = NULL;
+ if (Port != 0)
+ {
+ if (asprintf (&PortStr, "%d", Port) == -1 || PortStr == NULL)
+ return _error->Errno("asprintf", _("Failed"));
+ }
+
+ // Create the pipes
+ int Pipes[4] = {-1,-1,-1,-1};
+ if (pipe(Pipes) != 0 || pipe(Pipes+2) != 0)
+ {
+ _error->Errno("pipe",_("Failed to create IPC pipe to subprocess"));
+ for (int I = 0; I != 4; I++)
+ close(Pipes[I]);
+ return false;
+ }
+ for (int I = 0; I != 4; I++)
+ SetCloseExec(Pipes[I],true);
+
+ Process = ExecFork();
+
+ // The child
+ if (Process == 0)
+ {
+ const char *Args[400];
+ unsigned int i = 0;
+
+ dup2(Pipes[1],STDOUT_FILENO);
+ dup2(Pipes[2],STDIN_FILENO);
+
+ // Probably should do
+ // dup2(open("/dev/null",O_RDONLY),STDERR_FILENO);
+
+ Args[i++] = Prog.c_str();
+
+ // Insert user-supplied command line options
+ Configuration::Item const *Opts = RshOptions;
+ if (Opts != 0)
+ {
+ Opts = Opts->Child;
+ for (; Opts != 0; Opts = Opts->Next)
+ {
+ if (Opts->Value.empty() == true)
+ continue;
+ Args[i++] = Opts->Value.c_str();
+ }
+ }
+
+ if (User.empty() == false) {
+ Args[i++] = "-l";
+ Args[i++] = User.c_str();
+ }
+ if (PortStr != NULL) {
+ Args[i++] = "-p";
+ Args[i++] = PortStr;
+ }
+ if (Host.empty() == false) {
+ Args[i++] = Host.c_str();
+ }
+ Args[i++] = "/bin/sh";
+ Args[i] = 0;
+ execvp(Args[0],(char **)Args);
+ exit(100);
+ }
+
+ if (PortStr != NULL)
+ free(PortStr);
+
+ ReadFd = Pipes[0];
+ WriteFd = Pipes[3];
+ SetNonBlock(Pipes[0],true);
+ SetNonBlock(Pipes[3],true);
+ close(Pipes[1]);
+ close(Pipes[2]);
+
+ return true;
+}
+bool RSHConn::Connect(std::string Host, std::string User)
+{
+ return Connect(Host, 0, User);
+}
+ /*}}}*/
+// RSHConn::ReadLine - Very simple buffered read with timeout /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool RSHConn::ReadLine(std::string &Text)
+{
+ if (Process == -1 || ReadFd == -1)
+ return false;
+
+ // Suck in a line
+ while (Len < sizeof(Buffer))
+ {
+ // Scan the buffer for a new line
+ for (unsigned int I = 0; I != Len; I++)
+ {
+ // Escape some special chars
+ if (Buffer[I] == 0)
+ Buffer[I] = '?';
+
+ // End of line?
+ if (Buffer[I] != '\n')
+ continue;
+
+ I++;
+ Text = std::string(Buffer,I);
+ memmove(Buffer,Buffer+I,Len - I);
+ Len -= I;
+ return true;
+ }
+
+ // Wait for some data..
+ if (WaitFd(ReadFd,false,TimeOut) == false)
+ {
+ Close();
+ return _error->Error(_("Connection timeout"));
+ }
+
+ // Suck it back
+ int Res = read(ReadFd,Buffer + Len,sizeof(Buffer) - Len);
+ if (Res <= 0)
+ {
+ _error->Errno("read",_("Read error"));
+ Close();
+ return false;
+ }
+ Len += Res;
+ }
+
+ return _error->Error(_("A response overflowed the buffer."));
+}
+ /*}}}*/
+// RSHConn::WriteMsg - Send a message with optional remote sync. /*{{{*/
+// ---------------------------------------------------------------------
+/* The remote sync flag appends a || echo which will insert blank line
+ once the command completes. */
+bool RSHConn::WriteMsg(std::string &Text,bool Sync,const char *Fmt,...)
+{
+ va_list args;
+ va_start(args,Fmt);
+
+ // sprintf into a buffer
+ char Tmp[1024];
+ vsnprintf(Tmp,sizeof(Tmp),Fmt,args);
+ va_end(args);
+
+ // concat to create the real msg
+ std::string Msg;
+ if (Sync == true)
+ Msg = std::string(Tmp) + " 2> /dev/null || echo\n";
+ else
+ Msg = std::string(Tmp) + " 2> /dev/null\n";
+
+ // Send it off
+ const char *S = Msg.c_str();
+ unsigned long Len = strlen(S);
+ unsigned long Start = 0;
+ while (Len != 0)
+ {
+ if (WaitFd(WriteFd,true,TimeOut) == false)
+ {
+
+ Close();
+ return _error->Error(_("Connection timeout"));
+ }
+
+ int Res = write(WriteFd,S + Start,Len);
+ if (Res <= 0)
+ {
+ _error->Errno("write",_("Write error"));
+ Close();
+ return false;
+ }
+
+ Len -= Res;
+ Start += Res;
+ }
+
+ if (Sync == true)
+ return ReadLine(Text);
+ return true;
+}
+ /*}}}*/
+// RSHConn::Size - Return the size of the file /*{{{*/
+// ---------------------------------------------------------------------
+/* Right now for successful transfer the file size must be known in
+ advance. */
+bool RSHConn::Size(const char *Path,unsigned long long &Size)
+{
+ // Query the size
+ std::string Msg;
+ Size = 0;
+
+ if (WriteMsg(Msg,true,"find %s -follow -printf '%%s\\n'",Path) == false)
+ return false;
+
+ // FIXME: Sense if the bad reply is due to a File Not Found.
+
+ char *End;
+ Size = strtoull(Msg.c_str(),&End,10);
+ if (End == Msg.c_str())
+ return _error->Error(_("File not found"));
+ return true;
+}
+ /*}}}*/
+// RSHConn::ModTime - Get the modification time in UTC /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool RSHConn::ModTime(const char *Path, time_t &Time)
+{
+ Time = time(&Time);
+ // Query the mod time
+ std::string Msg;
+
+ if (WriteMsg(Msg,true,"TZ=UTC find %s -follow -printf '%%TY%%Tm%%Td%%TH%%TM%%TS\\n'",Path) == false)
+ return false;
+
+ // Parse it
+ return FTPMDTMStrToTime(Msg.c_str(), Time);
+}
+ /*}}}*/
+// RSHConn::Get - Get a file /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool RSHConn::Get(const char *Path,FileFd &To,unsigned long long Resume,
+ Hashes &Hash,bool &Missing, unsigned long long Size)
+{
+ Missing = false;
+
+ // Round to a 2048 byte block
+ Resume = Resume - (Resume % 2048);
+
+ if (To.Truncate(Resume) == false)
+ return false;
+ if (To.Seek(0) == false)
+ return false;
+
+ if (Resume != 0) {
+ if (Hash.AddFD(To,Resume) == false) {
+ _error->Errno("read",_("Problem hashing file"));
+ return false;
+ }
+ }
+
+ // FIXME: Detect file-not openable type errors.
+ std::string Jnk;
+ if (WriteMsg(Jnk,false,"dd if=%s bs=2048 skip=%u", Path, Resume / 2048) == false)
+ return false;
+
+ // Copy loop
+ unsigned long long MyLen = Resume;
+ unsigned char Buffer[4096];
+ while (MyLen < Size)
+ {
+ // Wait for some data..
+ if (WaitFd(ReadFd,false,TimeOut) == false)
+ {
+ Close();
+ return _error->Error(_("Data socket timed out"));
+ }
+
+ // Read the data..
+ int Res = read(ReadFd,Buffer,sizeof(Buffer));
+ if (Res == 0)
+ {
+ Close();
+ return _error->Error(_("Connection closed prematurely"));
+ }
+
+ if (Res < 0)
+ {
+ if (errno == EAGAIN)
+ continue;
+ break;
+ }
+ MyLen += Res;
+
+ Hash.Add(Buffer,Res);
+ if (To.Write(Buffer,Res) == false)
+ {
+ Close();
+ return false;
+ }
+ }
+
+ return true;
+}
+ /*}}}*/
+
+// RSHMethod::RSHMethod - Constructor /*{{{*/
+RSHMethod::RSHMethod(std::string &&pProg) : aptMethod(std::move(pProg),"1.0",SendConfig)
+{
+ signal(SIGTERM,SigTerm);
+ signal(SIGINT,SigTerm);
+ Server = 0;
+ FailFd = -1;
+}
+ /*}}}*/
+// RSHMethod::Configuration - Handle a configuration message /*{{{*/
+// ---------------------------------------------------------------------
+bool RSHMethod::Configuration(std::string Message)
+{
+ // enabling privilege dropping for this method requires configuration…
+ // … which is otherwise lifted straight from root, so use it by default.
+ _config->Set(std::string("Binary::") + Binary + "::APT::Sandbox::User", "");
+
+ if (aptMethod::Configuration(Message) == false)
+ return false;
+
+ std::string const timeconf = std::string("Acquire::") + Binary + "::Timeout";
+ TimeOut = _config->FindI(timeconf, TimeOut);
+ std::string const optsconf = std::string("Acquire::") + Binary + "::Options";
+ RshOptions = _config->Tree(optsconf.c_str());
+
+ return true;
+}
+ /*}}}*/
+// RSHMethod::SigTerm - Clean up and timestamp the files on exit /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+void RSHMethod::SigTerm(int)
+{
+ if (FailFd == -1)
+ _exit(100);
+
+ // Transfer the modification times
+ struct timeval times[2];
+ times[0].tv_sec = FailTime;
+ times[1].tv_sec = FailTime;
+ times[0].tv_usec = times[1].tv_usec = 0;
+ utimes(FailFile.c_str(), times);
+ close(FailFd);
+
+ _exit(100);
+}
+ /*}}}*/
+// RSHMethod::Fetch - Fetch a URI /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool RSHMethod::Fetch(FetchItem *Itm)
+{
+ URI Get = Itm->Uri;
+ const char *File = Get.Path.c_str();
+ FetchResult Res;
+ Res.Filename = Itm->DestFile;
+ Res.IMSHit = false;
+
+ // Connect to the server
+ if (Server == 0 || Server->Comp(Get) == false) {
+ delete Server;
+ Server = new RSHConn(Binary, Get);
+ }
+
+ // Could not connect is a transient error..
+ if (Server->Open() == false) {
+ Server->Close();
+ Fail(true);
+ return true;
+ }
+
+ // We say this mainly because the pause here is for the
+ // ssh connection that is still going
+ Status(_("Connecting to %s"), Get.Host.c_str());
+
+ // Get the files information
+ unsigned long long Size;
+ if (Server->Size(File,Size) == false ||
+ Server->ModTime(File,FailTime) == false)
+ {
+ //Fail(true);
+ //_error->Error(_("File not found")); // Will be handled by Size
+ return false;
+ }
+ Res.Size = Size;
+
+ // See if it is an IMS hit
+ if (Itm->LastModified == FailTime) {
+ Res.Size = 0;
+ Res.IMSHit = true;
+ URIDone(Res);
+ return true;
+ }
+
+ // See if the file exists
+ struct stat Buf;
+ if (stat(Itm->DestFile.c_str(),&Buf) == 0) {
+ if (Size == (unsigned long long)Buf.st_size && FailTime == Buf.st_mtime) {
+ Res.Size = Buf.st_size;
+ Res.LastModified = Buf.st_mtime;
+ Res.ResumePoint = Buf.st_size;
+ URIDone(Res);
+ return true;
+ }
+
+ // Resume?
+ if (FailTime == Buf.st_mtime && Size > (unsigned long long)Buf.st_size)
+ Res.ResumePoint = Buf.st_size;
+ }
+
+ // Open the file
+ Hashes Hash(Itm->ExpectedHashes);
+ {
+ FileFd Fd(Itm->DestFile,FileFd::WriteAny);
+ if (_error->PendingError() == true)
+ return false;
+
+ URIStart(Res);
+
+ FailFile = Itm->DestFile;
+ FailFile.c_str(); // Make sure we don't do a malloc in the signal handler
+ FailFd = Fd.Fd();
+
+ bool Missing;
+ if (Server->Get(File,Fd,Res.ResumePoint,Hash,Missing,Res.Size) == false)
+ {
+ Fd.Close();
+
+ // Timestamp
+ struct timeval times[2];
+ times[0].tv_sec = FailTime;
+ times[1].tv_sec = FailTime;
+ times[0].tv_usec = times[1].tv_usec = 0;
+ utimes(FailFile.c_str(), times);
+
+ // If the file is missing we hard fail otherwise transient fail
+ if (Missing == true)
+ return false;
+ Fail(true);
+ return true;
+ }
+
+ Res.Size = Fd.Size();
+ struct timeval times[2];
+ times[0].tv_sec = FailTime;
+ times[1].tv_sec = FailTime;
+ times[0].tv_usec = times[1].tv_usec = 0;
+ utimes(Fd.Name().c_str(), times);
+ FailFd = -1;
+ }
+
+ Res.LastModified = FailTime;
+ Res.TakeHashes(Hash);
+
+ URIDone(Res);
+
+ return true;
+}
+ /*}}}*/
+
+int main(int, const char *argv[])
+{
+ return RSHMethod(flNotDir(argv[0])).Run();
+}
diff --git a/methods/rsh.h b/methods/rsh.h
new file mode 100644
index 000000000..dee4ad647
--- /dev/null
+++ b/methods/rsh.h
@@ -0,0 +1,76 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/// $Id: rsh.h,v 1.4 2002/11/09 23:33:26 doogie Exp $
+// $Id: rsh.h,v 1.4 2002/11/09 23:33:26 doogie Exp $
+/* ######################################################################
+
+ RSH method - Transfer files via rsh compatible program
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef APT_RSH_H
+#define APT_RSH_H
+
+#include <string>
+#include <time.h>
+
+#include <apt-pkg/strutl.h>
+
+class Hashes;
+class FileFd;
+
+class RSHConn
+{
+ char Buffer[1024*10];
+ unsigned long Len;
+ int WriteFd;
+ int ReadFd;
+ URI ServerName;
+ std::string const Prog;
+
+ // Private helper functions
+ bool ReadLine(std::string &Text);
+
+ public:
+
+ pid_t Process;
+
+ // Raw connection IO
+ bool WriteMsg(std::string &Text,bool Sync,const char *Fmt,...);
+ bool Connect(std::string Host, std::string User);
+ bool Connect(std::string Host, unsigned int Port, std::string User);
+ bool Comp(URI Other) const {return Other.Host == ServerName.Host && Other.Port == ServerName.Port;};
+
+ // Connection control
+ bool Open();
+ void Close();
+
+ // Query
+ bool Size(const char *Path,unsigned long long &Size);
+ bool ModTime(const char *Path, time_t &Time);
+ bool Get(const char *Path,FileFd &To,unsigned long long Resume,
+ Hashes &Hash,bool &Missing, unsigned long long Size);
+
+ RSHConn(std::string const &Prog, URI Srv);
+ ~RSHConn();
+};
+
+#include "aptmethod.h"
+
+class RSHMethod : public aptMethod
+{
+ virtual bool Fetch(FetchItem *Itm) APT_OVERRIDE;
+ virtual bool Configuration(std::string Message) APT_OVERRIDE;
+
+ RSHConn *Server;
+
+ static std::string FailFile;
+ static int FailFd;
+ static time_t FailTime;
+ static APT_NORETURN void SigTerm(int);
+
+ public:
+
+ explicit RSHMethod(std::string &&Prog);
+};
+
+#endif
diff --git a/methods/store.cc b/methods/store.cc
new file mode 100644
index 000000000..1faaa4fb4
--- /dev/null
+++ b/methods/store.cc
@@ -0,0 +1,146 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+/* ######################################################################
+
+ Store method - Takes a file URI and stores its content (for which it will
+ calculate the hashes) in the given destination. The input file will be
+ extracted based on its file extension (or with the given compressor if
+ called with one of the compatible symlinks) and potentially recompressed
+ based on the file extension of the destination filename.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <config.h>
+
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/hashes.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/aptconfiguration.h>
+#include "aptmethod.h"
+
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <string>
+#include <vector>
+
+#include <apti18n.h>
+ /*}}}*/
+
+class StoreMethod : public aptMethod
+{
+ virtual bool Fetch(FetchItem *Itm) APT_OVERRIDE;
+
+ public:
+
+ explicit StoreMethod(std::string &&pProg) : aptMethod(std::move(pProg),"1.2",SingleInstance | SendConfig)
+ {
+ if (Binary != "store")
+ methodNames.insert(methodNames.begin(), "store");
+ }
+};
+
+static bool OpenFileWithCompressorByName(FileFd &fileFd, std::string const &Filename, unsigned int const Mode, std::string const &Name)
+{
+ if (Name == "store")
+ return fileFd.Open(Filename, Mode, FileFd::Extension);
+
+ std::vector<APT::Configuration::Compressor> const compressors = APT::Configuration::getCompressors();
+ std::vector<APT::Configuration::Compressor>::const_iterator compressor = compressors.begin();
+ for (; compressor != compressors.end(); ++compressor)
+ if (compressor->Name == Name)
+ break;
+ if (compressor == compressors.end())
+ return _error->Error("Extraction of file %s requires unknown compressor %s", Filename.c_str(), Name.c_str());
+ return fileFd.Open(Filename, Mode, *compressor);
+}
+
+
+ /*}}}*/
+bool StoreMethod::Fetch(FetchItem *Itm) /*{{{*/
+{
+ URI Get = Itm->Uri;
+ std::string Path = Get.Host + Get.Path; // To account for relative paths
+
+ FetchResult Res;
+ Res.Filename = Itm->DestFile;
+ URIStart(Res);
+
+ // Open the source and destination files
+ FileFd From;
+ if (_config->FindB("Method::Compress", false) == false)
+ {
+ if (OpenFileWithCompressorByName(From, Path, FileFd::ReadOnly, Binary) == false)
+ return false;
+ if(From.IsCompressed() && From.FileSize() == 0)
+ return _error->Error(_("Empty files can't be valid archives"));
+ }
+ else
+ From.Open(Path, FileFd::ReadOnly, FileFd::Extension);
+ if (From.IsOpen() == false || From.Failed() == true)
+ return false;
+
+ FileFd To;
+ if (Itm->DestFile != "/dev/null" && Itm->DestFile != Path)
+ {
+ if (_config->FindB("Method::Compress", false) == false)
+ To.Open(Itm->DestFile, FileFd::WriteOnly | FileFd::Create | FileFd::Atomic, FileFd::Extension);
+ else if (OpenFileWithCompressorByName(To, Itm->DestFile, FileFd::WriteOnly | FileFd::Create | FileFd::Empty, Binary) == false)
+ return false;
+
+ if (To.IsOpen() == false || To.Failed() == true)
+ return false;
+ To.EraseOnFailure();
+ }
+
+ // Read data from source, generate checksums and write
+ Hashes Hash(Itm->ExpectedHashes);
+ bool Failed = false;
+ Res.Size = 0;
+ while (1)
+ {
+ unsigned char Buffer[4*1024];
+ unsigned long long Count = 0;
+
+ if (!From.Read(Buffer,sizeof(Buffer),&Count))
+ {
+ if (To.IsOpen())
+ To.OpFail();
+ return false;
+ }
+ if (Count == 0)
+ break;
+ Res.Size += Count;
+
+ Hash.Add(Buffer,Count);
+ if (To.IsOpen() && To.Write(Buffer,Count) == false)
+ {
+ Failed = true;
+ break;
+ }
+ }
+
+ From.Close();
+ To.Close();
+
+ if (Failed == true)
+ return false;
+
+ if (TransferModificationTimes(Path.c_str(), Itm->DestFile.c_str(), Res.LastModified) == false)
+ return false;
+
+ // Return a Done response
+ Res.TakeHashes(Hash);
+
+ URIDone(Res);
+ return true;
+}
+ /*}}}*/
+
+int main(int, char *argv[])
+{
+ return StoreMethod(flNotDir(argv[0])).Run();
+}