From d415fc795a69d6a5039964e88f97561183d6ab44 Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Sun, 31 Jul 2016 18:46:34 +0200 Subject: support all socks-proxy known to curl in https method --- methods/https.cc | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/methods/https.cc b/methods/https.cc index bbdbd8fa9..50121ec4d 100644 --- a/methods/https.cc +++ b/methods/https.cc @@ -205,9 +205,20 @@ void HttpsMethod::SetupProxy() /*{{{*/ } // Determine what host and port to use based on the proxy settings - if (UseProxy.empty() == false) + if (UseProxy.empty() == false) { Proxy = UseProxy; + if (Proxy.Access == "socks5h") + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5_HOSTNAME); + else if (Proxy.Access == "socks5") + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5); + else if (Proxy.Access == "socks4a") + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4A); + else if (Proxy.Access == "socks") + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); + else + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_HTTP); + if (Proxy.Port != 1) curl_easy_setopt(curl, CURLOPT_PROXYPORT, Proxy.Port); curl_easy_setopt(curl, CURLOPT_PROXY, Proxy.Host.c_str()); -- cgit v1.2.3 From d0ef571416e1ff6266c89e6285898d269768cf8f Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Mon, 1 Aug 2016 17:52:55 +0200 Subject: suggest transport-packages based on established namescheme apt-transports not shipped in apt directly are usually named apt-transport-% with % being what is in the name of the transport. tor additional introduced aliases via %+something, which isn't a bad idea, so be strip the +something part from the method name before suggesting the installation of an apt-transport-% package. This avoids us the maintainance of a list of existing transports creating a two class system of known and unknown transports which would be quite arbitrary and is unfriendly to backports. --- apt-pkg/acquire-worker.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/apt-pkg/acquire-worker.cc b/apt-pkg/acquire-worker.cc index 9ed7b5b28..39cc55bdf 100644 --- a/apt-pkg/acquire-worker.cc +++ b/apt-pkg/acquire-worker.cc @@ -97,8 +97,10 @@ bool pkgAcquire::Worker::Start() if (FileExists(Method) == false) { _error->Error(_("The method driver %s could not be found."),Method.c_str()); - if (Access == "https") - _error->Notice(_("Is the package %s installed?"), "apt-transport-https"); + std::string const A(Access.cbegin(), std::find(Access.cbegin(), Access.cend(), '+')); + std::string pkg; + strprintf(pkg, "apt-transport-%s", A.c_str()); + _error->Notice(_("Is the package %s installed?"), pkg.c_str()); return false; } -- cgit v1.2.3 From ece81b7517b1af6f86aff733498f6c11d5aa814f Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Wed, 3 Aug 2016 21:17:26 +0200 Subject: fail on unsupported http/https proxy settings Closes: #623443 --- methods/http.cc | 2 ++ methods/https.cc | 14 +++++---- methods/https.h | 2 +- .../test-bug-623443-fail-on-bad-proxies | 33 ++++++++++++++++++++++ 4 files changed, 45 insertions(+), 6 deletions(-) create mode 100755 test/integration/test-bug-623443-fail-on-bad-proxies diff --git a/methods/http.cc b/methods/http.cc index 9fcc80103..c61ca1c3f 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -347,6 +347,8 @@ bool HttpServerState::Open() Port = ServerName.Port; Host = ServerName.Host; } + else if (Proxy.Access != "http") + return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str()); else { if (Proxy.Port != 0) diff --git a/methods/https.cc b/methods/https.cc index 50121ec4d..7c0c3241d 100644 --- a/methods/https.cc +++ b/methods/https.cc @@ -162,7 +162,7 @@ APT_PURE Hashes * HttpsServerState::GetHashes() /*{{{*/ } /*}}}*/ -void HttpsMethod::SetupProxy() /*{{{*/ +bool HttpsMethod::SetupProxy() /*{{{*/ { URI ServerName = Queue->Uri; @@ -184,12 +184,12 @@ void HttpsMethod::SetupProxy() /*{{{*/ // User want to use NO proxy, so nothing to setup if (UseProxy == "DIRECT") - return; + return true; // Parse no_proxy, a comma (,) separated list of domains we don't want to use // a proxy for so we stop right here if it is in the list if (getenv("no_proxy") != 0 && CheckDomainList(ServerName.Host,getenv("no_proxy")) == true) - return; + return true; if (UseProxy.empty() == true) { @@ -216,8 +216,10 @@ void HttpsMethod::SetupProxy() /*{{{*/ curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4A); else if (Proxy.Access == "socks") curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); - else + else if (Proxy.Access == "http" || Proxy.Access == "https") curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_HTTP); + else + return false; if (Proxy.Port != 1) curl_easy_setopt(curl, CURLOPT_PROXYPORT, Proxy.Port); @@ -228,6 +230,7 @@ void HttpsMethod::SetupProxy() /*{{{*/ curl_easy_setopt(curl, CURLOPT_PROXYPASSWORD, Proxy.Password.c_str()); } } + return true; } /*}}}*/ // HttpsMethod::Fetch - Fetch an item /*{{{*/ // --------------------------------------------------------------------- @@ -247,7 +250,8 @@ bool HttpsMethod::Fetch(FetchItem *Itm) // - more debug options? (CURLOPT_DEBUGFUNCTION?) curl_easy_reset(curl); - SetupProxy(); + if (SetupProxy() == false) + return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str()); maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc")); diff --git a/methods/https.h b/methods/https.h index 2fa714c7b..8592570c6 100644 --- a/methods/https.h +++ b/methods/https.h @@ -64,7 +64,7 @@ class HttpsMethod : public ServerMethod static size_t write_data(void *buffer, size_t size, size_t nmemb, void *userp); static int progress_callback(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow); - void SetupProxy(); + bool SetupProxy(); CURL *curl; // Used by ServerMethods unused by https diff --git a/test/integration/test-bug-623443-fail-on-bad-proxies b/test/integration/test-bug-623443-fail-on-bad-proxies new file mode 100755 index 000000000..04542e0cd --- /dev/null +++ b/test/integration/test-bug-623443-fail-on-bad-proxies @@ -0,0 +1,33 @@ +#!/bin/sh +set -e + +TESTDIR="$(readlink -f "$(dirname "$0")")" +. "$TESTDIR/framework" +setupenvironment +configarchitecture 'amd64' + +buildsimplenativepackage 'unrelated' 'all' '0.5~squeeze1' 'unstable' + +setupaptarchive --no-update +changetowebserver + +testsuccess apt update +rm -rf rootdir/var/lib/apt/lists + +export http_proxy=enrico:password@proxy-cache.localnet:3128 +testfailure apt update +unset http_proxy +testsuccess grep 'Unsupported proxy configured' rootdir/tmp/testfailure.output + +changetohttpswebserver + +testsuccess apt update +rm -rf rootdir/var/lib/apt/lists + +export http_proxy=enrico:password@proxy-cache.localnet:3128 +testfailure apt update +unset http_proxy +testsuccess grep 'Unsupported proxy configured' rootdir/tmp/testfailure.output + + + -- cgit v1.2.3 From 57401c48fadc0c78733a67294f9cc20a57e527c9 Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Tue, 2 Aug 2016 22:44:50 +0200 Subject: detect redirection loops in acquire instead of workers Having the detection handled in specific (http) workers means that a redirection loop over different hostnames isn't detected. Its also not a good idea have this implement in each method independently even if it would work --- apt-pkg/acquire-item.cc | 69 +++++++++++++++++++++--------- apt-pkg/acquire-item.h | 5 ++- apt-pkg/acquire-worker.cc | 10 +++++ methods/aptmethod.h | 3 +- methods/http.cc | 6 +++ methods/http.h | 1 + methods/https.h | 1 + methods/server.cc | 75 ++++++++++++++------------------- methods/server.h | 1 + test/integration/test-apt-redirect-loop | 23 ++++++++++ 10 files changed, 129 insertions(+), 65 deletions(-) create mode 100755 test/integration/test-apt-redirect-loop diff --git a/apt-pkg/acquire-item.cc b/apt-pkg/acquire-item.cc index ad8cb7f24..f13d2f6ae 100644 --- a/apt-pkg/acquire-item.cc +++ b/apt-pkg/acquire-item.cc @@ -685,10 +685,15 @@ class APT_HIDDEN CleanupItem : public pkgAcqTransactionItem /*{{{*/ /*}}}*/ // Acquire::Item::Item - Constructor /*{{{*/ +class pkgAcquire::Item::Private +{ +public: + std::vector PastRedirections; +}; APT_IGNORE_DEPRECATED_PUSH pkgAcquire::Item::Item(pkgAcquire * const owner) : FileSize(0), PartialSize(0), Mode(0), ID(0), Complete(false), Local(false), - QueueCounter(0), ExpectedAdditionalItems(0), Owner(owner), d(NULL) + QueueCounter(0), ExpectedAdditionalItems(0), Owner(owner), d(new Private()) { Owner->Add(this); Status = StatIdle; @@ -699,6 +704,7 @@ APT_IGNORE_DEPRECATED_POP pkgAcquire::Item::~Item() { Owner->Remove(this); + delete d; } /*}}}*/ std::string pkgAcquire::Item::Custom600Headers() const /*{{{*/ @@ -766,32 +772,40 @@ void pkgAcquire::Item::Failed(string const &Message,pkgAcquire::MethodConfig con } string const FailReason = LookupTag(Message, "FailReason"); - enum { MAXIMUM_SIZE_EXCEEDED, HASHSUM_MISMATCH, WEAK_HASHSUMS, OTHER } failreason = OTHER; + enum { MAXIMUM_SIZE_EXCEEDED, HASHSUM_MISMATCH, WEAK_HASHSUMS, REDIRECTION_LOOP, OTHER } failreason = OTHER; if ( FailReason == "MaximumSizeExceeded") failreason = MAXIMUM_SIZE_EXCEEDED; else if ( FailReason == "WeakHashSums") failreason = WEAK_HASHSUMS; + else if (FailReason == "RedirectionLoop") + failreason = REDIRECTION_LOOP; else if (Status == StatAuthError) failreason = HASHSUM_MISMATCH; if(ErrorText.empty()) { + std::ostringstream out; + switch (failreason) + { + case HASHSUM_MISMATCH: + out << _("Hash Sum mismatch") << std::endl; + break; + case WEAK_HASHSUMS: + out << _("Insufficient information available to perform this download securely") << std::endl; + break; + case REDIRECTION_LOOP: + out << "Redirection loop encountered" << std::endl; + break; + case MAXIMUM_SIZE_EXCEEDED: + out << LookupTag(Message, "Message") << std::endl; + break; + case OTHER: + out << LookupTag(Message, "Message"); + break; + } + if (Status == StatAuthError) { - std::ostringstream out; - switch (failreason) - { - case HASHSUM_MISMATCH: - out << _("Hash Sum mismatch") << std::endl; - break; - case WEAK_HASHSUMS: - out << _("Insufficient information available to perform this download securely") << std::endl; - break; - case MAXIMUM_SIZE_EXCEEDED: - case OTHER: - out << LookupTag(Message, "Message") << std::endl; - break; - } auto const ExpectedHashes = GetExpectedHashes(); if (ExpectedHashes.empty() == false) { @@ -822,10 +836,8 @@ void pkgAcquire::Item::Failed(string const &Message,pkgAcquire::MethodConfig con } out << "Last modification reported: " << LookupTag(Message, "Last-Modified", "") << std::endl; } - ErrorText = out.str(); } - else - ErrorText = LookupTag(Message,"Message"); + ErrorText = out.str(); } switch (failreason) @@ -833,6 +845,7 @@ void pkgAcquire::Item::Failed(string const &Message,pkgAcquire::MethodConfig con case MAXIMUM_SIZE_EXCEEDED: RenameOnError(MaximumSizeExceeded); break; case HASHSUM_MISMATCH: RenameOnError(HashSumMismatch); break; case WEAK_HASHSUMS: break; + case REDIRECTION_LOOP: break; case OTHER: break; } @@ -976,6 +989,24 @@ std::string pkgAcquire::Item::HashSum() const /*{{{*/ return hs != NULL ? hs->toStr() : ""; } /*}}}*/ +bool pkgAcquire::Item::IsRedirectionLoop(std::string const &NewURI) /*{{{*/ +{ + if (d->PastRedirections.empty()) + { + d->PastRedirections.push_back(NewURI); + return false; + } + auto const LastURI = std::prev(d->PastRedirections.end()); + // redirections to the same file are a way of restarting/resheduling, + // individual methods will have to make sure that they aren't looping this way + if (*LastURI == NewURI) + return false; + if (std::find(d->PastRedirections.begin(), LastURI, NewURI) != LastURI) + return true; + d->PastRedirections.push_back(NewURI); + return false; +} + /*}}}*/ pkgAcqTransactionItem::pkgAcqTransactionItem(pkgAcquire * const Owner, /*{{{*/ pkgAcqMetaClearSig * const transactionManager, IndexTarget const &target) : diff --git a/apt-pkg/acquire-item.h b/apt-pkg/acquire-item.h index ac4994738..e6e5ea12b 100644 --- a/apt-pkg/acquire-item.h +++ b/apt-pkg/acquire-item.h @@ -304,6 +304,8 @@ class pkgAcquire::Item : public WeakPointable /*{{{*/ */ virtual ~Item(); + bool APT_HIDDEN IsRedirectionLoop(std::string const &NewURI); + protected: /** \brief The acquire object with which this item is associated. */ pkgAcquire * const Owner; @@ -357,7 +359,8 @@ class pkgAcquire::Item : public WeakPointable /*{{{*/ virtual std::string GetFinalFilename() const; private: - void * const d; + class Private; + Private * const d; friend class pkgAcqMetaBase; friend class pkgAcqMetaClearSig; diff --git a/apt-pkg/acquire-worker.cc b/apt-pkg/acquire-worker.cc index 39cc55bdf..1ee78d070 100644 --- a/apt-pkg/acquire-worker.cc +++ b/apt-pkg/acquire-worker.cc @@ -269,6 +269,16 @@ bool pkgAcquire::Worker::RunMessages() for (auto const &Owner: ItmOwners) { pkgAcquire::ItemDesc &desc = Owner->GetItemDesc(); + if (Owner->IsRedirectionLoop(NewURI)) + { + std::string msg = Message; + msg.append("\nFailReason: RedirectionLoop"); + Owner->Failed(msg, Config); + if (Log != nullptr) + Log->Fail(Owner->GetItemDesc()); + continue; + } + if (Log != nullptr) Log->Done(desc); diff --git a/methods/aptmethod.h b/methods/aptmethod.h index d3c948636..0963ea21e 100644 --- a/methods/aptmethod.h +++ b/methods/aptmethod.h @@ -17,7 +17,8 @@ class aptMethod : public pkgAcqMethod { - char const * const Binary; +protected: + std::string Binary; public: virtual bool Configuration(std::string Message) APT_OVERRIDE diff --git a/methods/http.cc b/methods/http.cc index c61ca1c3f..cf5eae06d 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -465,6 +465,12 @@ bool HttpServerState::RunData(FileFd * const File) return Owner->Flush() && !_error->PendingError(); } /*}}}*/ +bool HttpServerState::RunDataToDevNull() /*{{{*/ +{ + FileFd DevNull("/dev/null", FileFd::WriteOnly); + return RunData(&DevNull); +} + /*}}}*/ bool HttpServerState::ReadHeaderLines(std::string &Data) /*{{{*/ { return In.WriteTillEl(Data); diff --git a/methods/http.h b/methods/http.h index ac5f52314..2ac3b8c9b 100644 --- a/methods/http.h +++ b/methods/http.h @@ -106,6 +106,7 @@ struct HttpServerState: public ServerState virtual void Reset() APT_OVERRIDE { ServerState::Reset(); ServerFd = -1; }; virtual bool RunData(FileFd * const File) APT_OVERRIDE; + virtual bool RunDataToDevNull() APT_OVERRIDE; virtual bool Open() APT_OVERRIDE; virtual bool IsOpen() APT_OVERRIDE; diff --git a/methods/https.h b/methods/https.h index 8592570c6..85cc7824e 100644 --- a/methods/https.h +++ b/methods/https.h @@ -39,6 +39,7 @@ class HttpsServerState : public ServerState /** \brief Transfer the data from the socket */ virtual bool RunData(FileFd * const /*File*/) APT_OVERRIDE { return false; } + virtual bool RunDataToDevNull() APT_OVERRIDE { return false; } virtual bool Open() APT_OVERRIDE { return false; } virtual bool IsOpen() APT_OVERRIDE { return false; } diff --git a/methods/server.cc b/methods/server.cc index 6d147fe12..672a3d16d 100644 --- a/methods/server.cc +++ b/methods/server.cc @@ -297,12 +297,33 @@ ServerMethod::DealWithHeaders(FetchResult &Res) else NextURI.clear(); NextURI.append(DeQuoteString(Server->Location)); + if (Queue->Uri == NextURI) + { + SetFailReason("RedirectionLoop"); + _error->Error("Redirection loop encountered"); + if (Server->HaveContent == true) + return ERROR_WITH_CONTENT_PAGE; + return ERROR_UNRECOVERABLE; + } return TRY_AGAIN_OR_REDIRECT; } else { NextURI = DeQuoteString(Server->Location); URI tmpURI = NextURI; + if (tmpURI.Access == "http" && Binary == "https+http") + { + tmpURI.Access = "https+http"; + NextURI = tmpURI; + } + if (Queue->Uri == NextURI) + { + SetFailReason("RedirectionLoop"); + _error->Error("Redirection loop encountered"); + if (Server->HaveContent == true) + return ERROR_WITH_CONTENT_PAGE; + return ERROR_UNRECOVERABLE; + } URI Uri = Queue->Uri; // same protocol redirects are okay if (tmpURI.Access == Uri.Access) @@ -486,10 +507,6 @@ bool ServerMethod::Fetch(FetchItem *) // ServerMethod::Loop - Main loop /*{{{*/ int ServerMethod::Loop() { - typedef vector StringVector; - typedef vector::iterator StringVectorIterator; - map Redirected; - signal(SIGTERM,SigTerm); signal(SIGINT,SigTerm); @@ -720,46 +737,16 @@ int ServerMethod::Loop() File = 0; break; } - - // Try again with a new URL - case TRY_AGAIN_OR_REDIRECT: - { - // Clear rest of response if there is content - if (Server->HaveContent) - { - File = new FileFd("/dev/null",FileFd::WriteExists); - Server->RunData(File); - delete File; - File = 0; - } - - /* Detect redirect loops. No more redirects are allowed - after the same URI is seen twice in a queue item. */ - StringVector &R = Redirected[Queue->DestFile]; - bool StopRedirects = false; - if (R.empty() == true) - R.push_back(Queue->Uri); - else if (R[0] == "STOP" || R.size() > 10) - StopRedirects = true; - else - { - for (StringVectorIterator I = R.begin(); I != R.end(); ++I) - if (Queue->Uri == *I) - { - R[0] = "STOP"; - break; - } - - R.push_back(Queue->Uri); - } - - if (StopRedirects == false) - Redirect(NextURI); - else - Fail(); - - break; - } + + // Try again with a new URL + case TRY_AGAIN_OR_REDIRECT: + { + // Clear rest of response if there is content + if (Server->HaveContent) + Server->RunDataToDevNull(); + Redirect(NextURI); + break; + } default: Fail(_("Internal error")); diff --git a/methods/server.h b/methods/server.h index af0914b9c..b23b0e50a 100644 --- a/methods/server.h +++ b/methods/server.h @@ -91,6 +91,7 @@ struct ServerState /** \brief Transfer the data from the socket */ virtual bool RunData(FileFd * const File) = 0; + virtual bool RunDataToDevNull() = 0; virtual bool Open() = 0; virtual bool IsOpen() = 0; diff --git a/test/integration/test-apt-redirect-loop b/test/integration/test-apt-redirect-loop new file mode 100755 index 000000000..b54f74276 --- /dev/null +++ b/test/integration/test-apt-redirect-loop @@ -0,0 +1,23 @@ +#!/bin/sh +set -e + +TESTDIR="$(readlink -f "$(dirname "$0")")" +. "$TESTDIR/framework" + +setupenvironment +configarchitecture "i386" + +insertpackage 'stable' 'apt' 'all' '1' +setupaptarchive --no-update + +echo 'alright' > aptarchive/working +changetohttpswebserver +webserverconfig 'aptwebserver::redirect::replace::/redirectme3/' '/redirectme/' +webserverconfig 'aptwebserver::redirect::replace::/redirectme2/' '/redirectme3/' +webserverconfig 'aptwebserver::redirect::replace::/redirectme/' '/redirectme2/' + +testfailure apthelper download-file "http://localhost:${APTHTTPPORT}/redirectme/working" httpfile +testsuccess grep 'Redirection loop encountered' rootdir/tmp/testfailure.output + +testfailure apthelper download-file "https://localhost:${APTHTTPSPORT}/redirectme/working" httpsfile +testsuccess grep 'Redirection loop encountered' rootdir/tmp/testfailure.output -- cgit v1.2.3 From 4bba5a88d0f6afde4414b586b64c48a4851d5324 Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Tue, 2 Aug 2016 14:49:58 +0200 Subject: use the same redirection handling for http and https MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cURL which backs our https implementation can handle redirects on its own, but by dealing with them on our own we gain finer control over which redirections will be performed (we don't like https → http) and by whom so that redirections to other hosts correctly spawn a new https method dealing with these instead of letting the current one deal with it. --- methods/http.cc | 29 ++++++++ methods/http.h | 1 + methods/https.cc | 106 ++++++++++++---------------- methods/server.cc | 56 +++++---------- methods/server.h | 2 +- test/integration/test-apt-https-no-redirect | 6 +- test/integration/test-partial-file-support | 3 + 7 files changed, 102 insertions(+), 101 deletions(-) diff --git a/methods/http.cc b/methods/http.cc index cf5eae06d..b7a3aa4a1 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -797,3 +797,32 @@ void HttpMethod::RotateDNS() /*{{{*/ ::RotateDNS(); } /*}}}*/ +ServerMethod::DealWithHeadersResult HttpMethod::DealWithHeaders(FetchResult &Res)/*{{{*/ +{ + auto ret = ServerMethod::DealWithHeaders(Res); + if (ret != ServerMethod::FILE_IS_OPEN) + return ret; + + // Open the file + delete File; + File = new FileFd(Queue->DestFile,FileFd::WriteAny); + if (_error->PendingError() == true) + return ERROR_NOT_FROM_SERVER; + + FailFile = Queue->DestFile; + FailFile.c_str(); // Make sure we don't do a malloc in the signal handler + FailFd = File->Fd(); + FailTime = Server->Date; + + if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false) + { + _error->Errno("read",_("Problem hashing file")); + return ERROR_NOT_FROM_SERVER; + } + if (Server->StartPos > 0) + Res.ResumePoint = Server->StartPos; + + SetNonBlock(File->Fd(),true); + return FILE_IS_OPEN; +} + /*}}}*/ diff --git a/methods/http.h b/methods/http.h index 2ac3b8c9b..e6b11d642 100644 --- a/methods/http.h +++ b/methods/http.h @@ -130,6 +130,7 @@ class HttpMethod : public ServerMethod virtual std::unique_ptr CreateServerState(URI const &uri) APT_OVERRIDE; virtual void RotateDNS() APT_OVERRIDE; + virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res) APT_OVERRIDE; protected: std::string AutoDetectProxyCmd; diff --git a/methods/https.cc b/methods/https.cc index 7c0c3241d..a5a8a7cd1 100644 --- a/methods/https.cc +++ b/methods/https.cc @@ -60,6 +60,7 @@ HttpsMethod::parse_header(void *buffer, size_t size, size_t nmemb, void *userp) if (line.empty() == true) { + me->https->Server->JunkSize = 0; if (me->https->Server->Result != 416 && me->https->Server->StartPos != 0) ; else if (me->https->Server->Result == 416) @@ -99,11 +100,13 @@ HttpsMethod::parse_header(void *buffer, size_t size, size_t nmemb, void *userp) // we expect valid data, so tell our caller we get the file now if (me->https->Server->Result >= 200 && me->https->Server->Result < 300) { - if (me->https->Server->JunkSize == 0 && me->Res->Size != 0 && me->Res->Size > me->Res->ResumePoint) + if (me->Res->Size != 0 && me->Res->Size > me->Res->ResumePoint) me->https->URIStart(*me->Res); if (me->https->Server->AddPartialFileToHashes(*(me->https->File)) == false) return 0; } + else + me->https->Server->JunkSize = std::numeric_limitshttps->Server->JunkSize)>::max(); } else if (me->https->Server->HeaderLine(line) == false) return 0; @@ -266,6 +269,7 @@ bool HttpsMethod::Fetch(FetchItem *Itm) // options curl_easy_setopt(curl, CURLOPT_NOPROGRESS, true); curl_easy_setopt(curl, CURLOPT_FILETIME, true); + curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 0); // only allow curl to handle https, not the other stuff it supports curl_easy_setopt(curl, CURLOPT_PROTOCOLS, CURLPROTO_HTTPS); curl_easy_setopt(curl, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTPS); @@ -372,10 +376,6 @@ bool HttpsMethod::Fetch(FetchItem *Itm) curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, DL_MIN_SPEED); curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, timeout); - // set redirect options and default to 10 redirects - curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, AllowRedirect); - curl_easy_setopt(curl, CURLOPT_MAXREDIRS, 10); - // debug if (Debug == true) curl_easy_setopt(curl, CURLOPT_VERBOSE, true); @@ -428,6 +428,8 @@ bool HttpsMethod::Fetch(FetchItem *Itm) // met, CURLINFO_CONDITION_UNMET will be set to 1 long curl_condition_unmet = 0; curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &curl_condition_unmet); + if (curl_condition_unmet == 1) + Server->Result = 304; File->Close(); curl_slist_free_all(headers); @@ -460,70 +462,54 @@ bool HttpsMethod::Fetch(FetchItem *Itm) return false; } - // server says file not modified - if (Server->Result == 304 || curl_condition_unmet == 1) + switch (DealWithHeaders(Res)) { - RemoveFile("https", File->Name()); - Res.IMSHit = true; - Res.LastModified = Itm->LastModified; - Res.Size = 0; - URIDone(Res); - return true; - } - Res.IMSHit = false; + case ServerMethod::IMS_HIT: + URIDone(Res); + break; - if (Server->Result != 200 && // OK - Server->Result != 206 && // Partial - Server->Result != 416) // invalid Range - { - char err[255]; - snprintf(err, sizeof(err) - 1, "HttpError%i", Server->Result); - SetFailReason(err); - _error->Error("%i %s", Server->Result, Server->Code); - // unlink, no need keep 401/404 page content in partial/ - RemoveFile("https", File->Name()); - return false; - } + case ServerMethod::ERROR_WITH_CONTENT_PAGE: + // unlink, no need keep 401/404 page content in partial/ + RemoveFile(Binary.c_str(), File->Name()); + case ServerMethod::ERROR_UNRECOVERABLE: + case ServerMethod::ERROR_NOT_FROM_SERVER: + return false; - // invalid range-request - if (Server->Result == 416) - { - RemoveFile("https", File->Name()); - delete File; - Redirect(Itm->Uri); - return true; - } + case ServerMethod::TRY_AGAIN_OR_REDIRECT: + Redirect(NextURI); + break; - struct stat resultStat; - if (unlikely(stat(File->Name().c_str(), &resultStat) != 0)) - { - _error->Errno("stat", "Unable to access file %s", File->Name().c_str()); - return false; - } - Res.Size = resultStat.st_size; + case ServerMethod::FILE_IS_OPEN: + struct stat resultStat; + if (unlikely(stat(File->Name().c_str(), &resultStat) != 0)) + { + _error->Errno("stat", "Unable to access file %s", File->Name().c_str()); + return false; + } + Res.Size = resultStat.st_size; - // Timestamp - curl_easy_getinfo(curl, CURLINFO_FILETIME, &Res.LastModified); - if (Res.LastModified != -1) - { - struct timeval times[2]; - times[0].tv_sec = Res.LastModified; - times[1].tv_sec = Res.LastModified; - times[0].tv_usec = times[1].tv_usec = 0; - utimes(File->Name().c_str(), times); - } - else - Res.LastModified = resultStat.st_mtime; + // Timestamp + curl_easy_getinfo(curl, CURLINFO_FILETIME, &Res.LastModified); + if (Res.LastModified != -1) + { + struct timeval times[2]; + times[0].tv_sec = Res.LastModified; + times[1].tv_sec = Res.LastModified; + times[0].tv_usec = times[1].tv_usec = 0; + utimes(File->Name().c_str(), times); + } + else + Res.LastModified = resultStat.st_mtime; - // take hashes - Res.TakeHashes(*(Server->GetHashes())); + // take hashes + Res.TakeHashes(*(Server->GetHashes())); - // keep apt updated - URIDone(Res); + // keep apt updated + URIDone(Res); + break; + } - // cleanup delete File; - return true; } /*}}}*/ diff --git a/methods/server.cc b/methods/server.cc index 672a3d16d..66551b893 100644 --- a/methods/server.cc +++ b/methods/server.cc @@ -170,7 +170,7 @@ bool ServerState::HeaderLine(string Line) HaveContent = true; unsigned long long * DownloadSizePtr = &DownloadSize; - if (Result == 416) + if (Result == 416 || (Result >= 300 && Result < 400)) DownloadSizePtr = &JunkSize; *DownloadSizePtr = strtoull(Val.c_str(), NULL, 10); @@ -272,6 +272,7 @@ ServerMethod::DealWithHeaders(FetchResult &Res) RemoveFile("server", Queue->DestFile); Res.IMSHit = true; Res.LastModified = Queue->LastModified; + Res.Size = 0; return IMS_HIT; } @@ -288,7 +289,8 @@ ServerMethod::DealWithHeaders(FetchResult &Res) && Server->Result != 304 // Not Modified && Server->Result != 306)) // (Not part of HTTP/1.1, reserved) { - if (Server->Location.empty() == true); + if (Server->Location.empty() == true) + ; else if (Server->Location[0] == '/' && Queue->Uri.empty() == false) { URI Uri = Queue->Uri; @@ -329,8 +331,10 @@ ServerMethod::DealWithHeaders(FetchResult &Res) if (tmpURI.Access == Uri.Access) return TRY_AGAIN_OR_REDIRECT; // as well as http to https - else if (Uri.Access == "http" && tmpURI.Access == "https") + else if ((Uri.Access == "http" || Uri.Access == "https+http") && tmpURI.Access == "https") return TRY_AGAIN_OR_REDIRECT; + else + _error->Error("Redirection from %s to '%s' is forbidden", Uri.Access.c_str(), NextURI.c_str()); } /* else pass through for error message */ } @@ -358,11 +362,10 @@ ServerMethod::DealWithHeaders(FetchResult &Res) // the file is completely downloaded, but was not moved if (Server->HaveContent == true) { - // Send to error page to dev/null - FileFd DevNull("/dev/null",FileFd::WriteExists); - Server->RunData(&DevNull); + // nuke the sent error page + Server->RunDataToDevNull(); + Server->HaveContent = false; } - Server->HaveContent = false; Server->StartPos = Server->TotalFileSize; Server->Result = 200; } @@ -378,10 +381,13 @@ ServerMethod::DealWithHeaders(FetchResult &Res) failure */ if (Server->Result < 200 || Server->Result >= 300) { - std::string err; - strprintf(err, "HttpError%u", Server->Result); - SetFailReason(err); - _error->Error("%u %s", Server->Result, Server->Code); + if (_error->PendingError() == false) + { + std::string err; + strprintf(err, "HttpError%u", Server->Result); + SetFailReason(err); + _error->Error("%u %s", Server->Result, Server->Code); + } if (Server->HaveContent == true) return ERROR_WITH_CONTENT_PAGE; return ERROR_UNRECOVERABLE; @@ -390,27 +396,6 @@ ServerMethod::DealWithHeaders(FetchResult &Res) // This is some sort of 2xx 'data follows' reply Res.LastModified = Server->Date; Res.Size = Server->TotalFileSize; - - // Open the file - delete File; - File = new FileFd(Queue->DestFile,FileFd::WriteAny); - if (_error->PendingError() == true) - return ERROR_NOT_FROM_SERVER; - - FailFile = Queue->DestFile; - FailFile.c_str(); // Make sure we don't do a malloc in the signal handler - FailFd = File->Fd(); - FailTime = Server->Date; - - if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false) - { - _error->Errno("read",_("Problem hashing file")); - return ERROR_NOT_FROM_SERVER; - } - if (Server->StartPos > 0) - Res.ResumePoint = Server->StartPos; - - SetNonBlock(File->Fd(),true); return FILE_IS_OPEN; } /*}}}*/ @@ -729,12 +714,7 @@ int ServerMethod::Loop() case ERROR_WITH_CONTENT_PAGE: { Fail(); - - // Send to content to dev/null - File = new FileFd("/dev/null",FileFd::WriteExists); - Server->RunData(File); - delete File; - File = 0; + Server->RunDataToDevNull(); break; } diff --git a/methods/server.h b/methods/server.h index b23b0e50a..f2868c96a 100644 --- a/methods/server.h +++ b/methods/server.h @@ -141,7 +141,7 @@ class ServerMethod : public aptMethod TRY_AGAIN_OR_REDIRECT }; /** \brief Handle the retrieved header data */ - DealWithHeadersResult DealWithHeaders(FetchResult &Res); + virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res); // In the event of a fatal signal this file will be closed and timestamped. static std::string FailFile; diff --git a/test/integration/test-apt-https-no-redirect b/test/integration/test-apt-https-no-redirect index b437ac1e6..69e2ba57f 100755 --- a/test/integration/test-apt-https-no-redirect +++ b/test/integration/test-apt-https-no-redirect @@ -11,7 +11,9 @@ insertpackage 'stable' 'apt' 'all' '1' setupaptarchive --no-update echo 'alright' > aptarchive/working -changetohttpswebserver -o "aptwebserver::redirect::replace::/redirectme/=http://localhost:${APTHTTPPORT}/" +changetohttpswebserver +webserverconfig 'aptwebserver::redirect::replace::/redirectme/' "http://localhost:${APTHTTPPORT}/" +webserverconfig 'aptwebserver::redirect::replace::/redirectme2/' "https://localhost:${APTHTTPSPORT}/" msgtest 'download of a file works via' 'http' testsuccess --nomsg downloadfile "http://localhost:${APTHTTPPORT}/working" httpfile @@ -25,4 +27,4 @@ msgtest 'download of a file does not work if' 'https redirected to http' testfailure --nomsg downloadfile "https://localhost:${APTHTTPSPORT}/redirectme/working" redirectfile msgtest 'libcurl has forbidden access in last request to' 'http resource' -testsuccess --nomsg grep -q -E -- 'Protocol "?http"? not supported or disabled in libcurl' rootdir/tmp/testfailure.output +testsuccess --nomsg grep -q -E -- "Redirection from https to 'http://.*' is forbidden" rootdir/tmp/testfailure.output diff --git a/test/integration/test-partial-file-support b/test/integration/test-partial-file-support index e2d2743b3..1c5d120d8 100755 --- a/test/integration/test-partial-file-support +++ b/test/integration/test-partial-file-support @@ -146,3 +146,6 @@ serverconfigs "http://localhost:${APTHTTPPORT}" changetohttpswebserver serverconfigs "https://localhost:${APTHTTPSPORT}" + +webserverconfig 'aptwebserver::redirect::replace::/redirectme/' "https://localhost:${APTHTTPSPORT}/" +serverconfigs "http://localhost:${APTHTTPPORT}/redirectme" -- cgit v1.2.3 From 30060442025824c491f58887ca7369f3c572fa57 Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Sun, 31 Jul 2016 18:05:56 +0200 Subject: implement generic config fallback for methods MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The https method implemented for a long while now a hardcoded fallback to the same options in http, which, while it works, is rather inflexible if we want to allow the methods to use another name to change their behavior slightly, like apt-transport-tor does to https – most of the diff being s#https#tor#g which then fails to do the full circle fallthrough tor -> https -> http for https sources. With this config infrastructure this could be implemented now. --- methods/aptmethod.h | 72 ++++++- methods/cdrom.cc | 4 +- methods/gpgv.cc | 6 +- methods/http.cc | 59 +++--- methods/http.h | 10 +- methods/http_main.cc | 8 +- methods/https.cc | 226 ++++++++++----------- methods/https.h | 13 +- methods/mirror.cc | 4 +- methods/rred.cc | 2 +- methods/rsh.cc | 12 +- methods/rsh.h | 3 +- methods/server.cc | 57 +++++- methods/server.h | 2 +- methods/store.cc | 11 +- test/integration/test-bug-738785-switch-protocol | 2 +- .../test-different-methods-for-same-source | 15 +- test/integration/test-pdiff-usage | 2 +- 18 files changed, 303 insertions(+), 205 deletions(-) diff --git a/methods/aptmethod.h b/methods/aptmethod.h index 0963ea21e..38f451708 100644 --- a/methods/aptmethod.h +++ b/methods/aptmethod.h @@ -5,8 +5,10 @@ #include #include +#include #include #include +#include #include #include @@ -15,10 +17,15 @@ #include +static bool hasDoubleColon(std::string const &n) +{ + return n.find("::") != std::string::npos; +} + class aptMethod : public pkgAcqMethod { protected: - std::string Binary; + std::string const Binary; public: virtual bool Configuration(std::string Message) APT_OVERRIDE @@ -34,7 +41,7 @@ public: return true; } - bool CalculateHashes(FetchItem const * const Itm, FetchResult &Res) const + bool CalculateHashes(FetchItem const * const Itm, FetchResult &Res) const APT_NONNULL(2) { Hashes Hash(Itm->ExpectedHashes); FileFd Fd; @@ -52,7 +59,62 @@ public: va_end(args); } - bool TransferModificationTimes(char const * const From, char const * const To, time_t &LastModified) + std::vector methodNames; + void setPostfixForMethodNames(char const * const postfix) APT_NONNULL(2) + { + methodNames.erase(std::remove_if(methodNames.begin(), methodNames.end(), hasDoubleColon), methodNames.end()); + decltype(methodNames) toAdd; + for (auto && name: methodNames) + toAdd.emplace_back(name + "::" + postfix); + std::move(toAdd.begin(), toAdd.end(), std::back_inserter(methodNames)); + } + bool DebugEnabled() const + { + if (methodNames.empty()) + return false; + auto const sni = std::find_if_not(methodNames.crbegin(), methodNames.crend(), hasDoubleColon); + if (unlikely(sni == methodNames.crend())) + return false; + auto const ln = methodNames[methodNames.size() - 1]; + // worst case: all three are the same + std::string confln, confsn, confpn; + strprintf(confln, "Debug::Acquire::%s", ln.c_str()); + strprintf(confsn, "Debug::Acquire::%s", sni->c_str()); + auto const pni = sni->substr(0, sni->find('+')); + strprintf(confpn, "Debug::Acquire::%s", pni.c_str()); + return _config->FindB(confln,_config->FindB(confsn, _config->FindB(confpn, false))); + } + std::string ConfigFind(char const * const postfix, std::string const &defValue) const APT_NONNULL(2) + { + for (auto && name: methodNames) + { + std::string conf; + strprintf(conf, "Acquire::%s::%s", name.c_str(), postfix); + auto const value = _config->Find(conf); + if (value.empty() == false) + return value; + } + return defValue; + } + std::string ConfigFind(std::string const &postfix, std::string const &defValue) const + { + return ConfigFind(postfix.c_str(), defValue); + } + bool ConfigFindB(char const * const postfix, bool const defValue) const APT_NONNULL(2) + { + return StringToBool(ConfigFind(postfix, defValue ? "yes" : "no"), defValue); + } + int ConfigFindI(char const * const postfix, int const defValue) const APT_NONNULL(2) + { + char *End; + std::string const value = ConfigFind(postfix, ""); + auto const Res = strtol(value.c_str(), &End, 0); + if (value.c_str() == End) + return defValue; + return Res; + } + + bool TransferModificationTimes(char const * const From, char const * const To, time_t &LastModified) APT_NONNULL(2, 3) { if (strcmp(To, "/dev/null") == 0) return true; @@ -75,8 +137,8 @@ public: return true; } - aptMethod(char const * const Binary, char const * const Ver, unsigned long const Flags) : - pkgAcqMethod(Ver, Flags), Binary(Binary) + aptMethod(std::string &&Binary, char const * const Ver, unsigned long const Flags) APT_NONNULL(3) : + pkgAcqMethod(Ver, Flags), Binary(Binary), methodNames({Binary}) { try { std::locale::global(std::locale("")); diff --git a/methods/cdrom.cc b/methods/cdrom.cc index 161822ac6..87a58e948 100644 --- a/methods/cdrom.cc +++ b/methods/cdrom.cc @@ -177,7 +177,7 @@ bool CDROMMethod::Fetch(FetchItem *Itm) URI Get = Itm->Uri; string File = Get.Path; - Debug = _config->FindB("Debug::Acquire::cdrom", false); + Debug = DebugEnabled(); if (Debug) clog << "CDROMMethod::Fetch " << Itm->Uri << endl; @@ -224,7 +224,7 @@ bool CDROMMethod::Fetch(FetchItem *Itm) return true; } - bool AutoDetect = _config->FindB("Acquire::cdrom::AutoDetect", true); + bool const AutoDetect = ConfigFindB("AutoDetect", true); CDROM = _config->FindDir("Acquire::cdrom::mount"); if (Debug) clog << "Looking for CDROM at " << CDROM << endl; diff --git a/methods/gpgv.cc b/methods/gpgv.cc index fc6eb9159..2fed53a39 100644 --- a/methods/gpgv.cc +++ b/methods/gpgv.cc @@ -147,7 +147,7 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile, vector &SoonWorthlessSigners, vector &NoPubKeySigners) { - bool const Debug = _config->FindB("Debug::Acquire::gpgv", false); + bool const Debug = DebugEnabled(); if (Debug == true) std::clog << "inside VerifyGetSigners" << std::endl; @@ -415,10 +415,8 @@ bool GPGVMethod::URIAcquire(std::string const &Message, FetchItem *Itm) std::move(NoPubKeySigners.begin(), NoPubKeySigners.end(), std::back_inserter(Res.GPGVOutput)); URIDone(Res); - if (_config->FindB("Debug::Acquire::gpgv", false)) - { + if (DebugEnabled()) std::clog << "apt-key succeeded\n"; - } return true; } diff --git a/methods/http.cc b/methods/http.cc index b7a3aa4a1..0378d7c60 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -63,13 +63,13 @@ const unsigned int CircleBuf::BW_HZ=10; // CircleBuf::CircleBuf - Circular input buffer /*{{{*/ // --------------------------------------------------------------------- /* */ -CircleBuf::CircleBuf(unsigned long long Size) +CircleBuf::CircleBuf(HttpMethod const * const Owner, unsigned long long Size) : Size(Size), Hash(NULL), TotalWriten(0) { Buf = new unsigned char[Size]; Reset(); - CircleBuf::BwReadLimit = _config->FindI("Acquire::http::Dl-Limit",0)*1024; + CircleBuf::BwReadLimit = Owner->ConfigFindI("Dl-Limit", 0) * 1024; } /*}}}*/ // CircleBuf::Reset - Reset to the default state /*{{{*/ @@ -287,9 +287,9 @@ CircleBuf::~CircleBuf() } // HttpServerState::HttpServerState - Constructor /*{{{*/ -HttpServerState::HttpServerState(URI Srv,HttpMethod *Owner) : ServerState(Srv, Owner), In(64*1024), Out(4*1024) +HttpServerState::HttpServerState(URI Srv,HttpMethod *Owner) : ServerState(Srv, Owner), In(Owner, 64*1024), Out(Owner, 4*1024) { - TimeOut = _config->FindI("Acquire::http::Timeout",TimeOut); + TimeOut = Owner->ConfigFindI("Timeout", TimeOut); Reset(); } /*}}}*/ @@ -309,7 +309,7 @@ bool HttpServerState::Open() // Determine the proxy setting AutoDetectProxy(ServerName); - string SpecificProxy = _config->Find("Acquire::http::Proxy::" + ServerName.Host); + string SpecificProxy = Owner->ConfigFind("Proxy::" + ServerName.Host, ""); if (!SpecificProxy.empty()) { if (SpecificProxy == "DIRECT") @@ -319,7 +319,7 @@ bool HttpServerState::Open() } else { - string DefProxy = _config->Find("Acquire::http::Proxy"); + string DefProxy = Owner->ConfigFind("Proxy", ""); if (!DefProxy.empty()) { Proxy = DefProxy; @@ -616,7 +616,7 @@ bool HttpServerState::Go(bool ToFile, FileFd * const File) FD_SET(FileFD,&wfds); // Add stdin - if (_config->FindB("Acquire::http::DependOnSTDIN", true) == true) + if (Owner->ConfigFindB("DependOnSTDIN", true) == true) FD_SET(STDIN_FILENO,&rfds); // Figure out the max fd @@ -688,6 +688,11 @@ bool HttpServerState::Go(bool ToFile, FileFd * const File) void HttpMethod::SendReq(FetchItem *Itm) { URI Uri = Itm->Uri; + { + auto const plus = Binary.find('+'); + if (plus != std::string::npos) + Uri.Access = Binary.substr(plus + 1); + } // The HTTP server expects a hostname with a trailing :port std::stringstream Req; @@ -705,7 +710,7 @@ void HttpMethod::SendReq(FetchItem *Itm) if (Server->Proxy.empty() == true || Server->Proxy.Host.empty()) requesturi = Uri.Path; else - requesturi = Itm->Uri; + requesturi = Uri; // The "+" is encoded as a workaround for a amazon S3 bug // see LP bugs #1003633 and #1086997. @@ -722,12 +727,12 @@ void HttpMethod::SendReq(FetchItem *Itm) Req << "Host: " << ProperHost << "\r\n"; // generate a cache control header (if needed) - if (_config->FindB("Acquire::http::No-Cache",false) == true) + if (ConfigFindB("No-Cache",false) == true) Req << "Cache-Control: no-cache\r\n" << "Pragma: no-cache\r\n"; else if (Itm->IndexFile == true) - Req << "Cache-Control: max-age=" << std::to_string(_config->FindI("Acquire::http::Max-Age",0)) << "\r\n"; - else if (_config->FindB("Acquire::http::No-Store",false) == true) + Req << "Cache-Control: max-age=" << std::to_string(ConfigFindI("Max-Age", 0)) << "\r\n"; + else if (ConfigFindB("No-Store", false) == true) Req << "Cache-Control: no-store\r\n"; // If we ask for uncompressed files servers might respond with content- @@ -735,7 +740,7 @@ void HttpMethod::SendReq(FetchItem *Itm) // see 657029, 657560 and co, so if we have no extension on the request // ask for text only. As a sidenote: If there is nothing to negotate servers // seem to be nice and ignore it. - if (_config->FindB("Acquire::http::SendAccept", true) == true) + if (ConfigFindB("SendAccept", true) == true) { size_t const filepos = Itm->Uri.find_last_of('/'); string const file = Itm->Uri.substr(filepos + 1); @@ -760,7 +765,7 @@ void HttpMethod::SendReq(FetchItem *Itm) Req << "Authorization: Basic " << Base64Encode(Uri.User + ":" + Uri.Password) << "\r\n"; - Req << "User-Agent: " << _config->Find("Acquire::http::User-Agent", + Req << "User-Agent: " << ConfigFind("User-Agent", "Debian APT-HTTP/1.3 (" PACKAGE_VERSION ")") << "\r\n"; Req << "\r\n"; @@ -771,22 +776,6 @@ void HttpMethod::SendReq(FetchItem *Itm) Server->WriteResponse(Req.str()); } /*}}}*/ -// HttpMethod::Configuration - Handle a configuration message /*{{{*/ -// --------------------------------------------------------------------- -/* We stash the desired pipeline depth */ -bool HttpMethod::Configuration(string Message) -{ - if (ServerMethod::Configuration(Message) == false) - return false; - - AllowRedirect = _config->FindB("Acquire::http::AllowRedirect",true); - PipelineDepth = _config->FindI("Acquire::http::Pipeline-Depth", - PipelineDepth); - Debug = _config->FindB("Debug::Acquire::http",false); - - return true; -} - /*}}}*/ std::unique_ptr HttpMethod::CreateServerState(URI const &uri)/*{{{*/ { return std::unique_ptr(new HttpServerState(uri, this)); @@ -826,3 +815,15 @@ ServerMethod::DealWithHeadersResult HttpMethod::DealWithHeaders(FetchResult &Res return FILE_IS_OPEN; } /*}}}*/ +HttpMethod::HttpMethod(std::string &&pProg) : ServerMethod(pProg.c_str(), "1.2", Pipeline | SendConfig)/*{{{*/ +{ + auto addName = std::inserter(methodNames, methodNames.begin()); + if (Binary != "http") + addName = "http"; + auto const plus = Binary.find('+'); + if (plus != std::string::npos) + addName = Binary.substr(0, plus); + File = 0; + Server = 0; +} + /*}}}*/ diff --git a/methods/http.h b/methods/http.h index e6b11d642..4d22c88f9 100644 --- a/methods/http.h +++ b/methods/http.h @@ -86,7 +86,7 @@ class CircleBuf // Dump everything void Stats(); - explicit CircleBuf(unsigned long long Size); + CircleBuf(HttpMethod const * const Owner, unsigned long long Size); ~CircleBuf(); }; @@ -126,8 +126,6 @@ class HttpMethod : public ServerMethod public: virtual void SendReq(FetchItem *Itm) APT_OVERRIDE; - virtual bool Configuration(std::string Message) APT_OVERRIDE; - virtual std::unique_ptr CreateServerState(URI const &uri) APT_OVERRIDE; virtual void RotateDNS() APT_OVERRIDE; virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res) APT_OVERRIDE; @@ -138,11 +136,7 @@ class HttpMethod : public ServerMethod public: friend struct HttpServerState; - HttpMethod() : ServerMethod("http", "1.2",Pipeline | SendConfig) - { - File = 0; - Server = 0; - }; + explicit HttpMethod(std::string &&pProg); }; #endif diff --git a/methods/http_main.cc b/methods/http_main.cc index fa183ddb3..1e56044b7 100644 --- a/methods/http_main.cc +++ b/methods/http_main.cc @@ -5,11 +5,13 @@ #include "http.h" -int main() +int main(int, const char *argv[]) { // ignore SIGPIPE, this can happen on write() if the socket // closes the connection (this is dealt with via ServerDie()) signal(SIGPIPE, SIG_IGN); - - return HttpMethod().Loop(); + std::string Binary = flNotDir(argv[0]); + if (Binary.find('+') == std::string::npos && Binary != "http") + Binary.append("+http"); + return HttpMethod(std::move(Binary)).Loop(); } diff --git a/methods/https.cc b/methods/https.cc index a5a8a7cd1..47dce2ea0 100644 --- a/methods/https.cc +++ b/methods/https.cc @@ -25,11 +25,14 @@ #include #include #include -#include -#include #include #include +#include +#include +#include + + #include "https.h" #include @@ -148,7 +151,7 @@ HttpsMethod::write_data(void *buffer, size_t size, size_t nmemb, void *userp) // HttpsServerState::HttpsServerState - Constructor /*{{{*/ HttpsServerState::HttpsServerState(URI Srv,HttpsMethod * Owner) : ServerState(Srv, Owner), Hash(NULL) { - TimeOut = _config->FindI("Acquire::https::Timeout",TimeOut); + TimeOut = Owner->ConfigFindI("Timeout", TimeOut); Reset(); } /*}}}*/ @@ -179,32 +182,31 @@ bool HttpsMethod::SetupProxy() /*{{{*/ curl_easy_setopt(curl, CURLOPT_PROXY, ""); // Determine the proxy setting - try https first, fallback to http and use env at last - string UseProxy = _config->Find("Acquire::https::Proxy::" + ServerName.Host, - _config->Find("Acquire::http::Proxy::" + ServerName.Host).c_str()); - + string UseProxy = ConfigFind("Proxy::" + ServerName.Host, ""); if (UseProxy.empty() == true) - UseProxy = _config->Find("Acquire::https::Proxy", _config->Find("Acquire::http::Proxy").c_str()); - - // User want to use NO proxy, so nothing to setup + UseProxy = ConfigFind("Proxy", ""); + // User wants to use NO proxy, so nothing to setup if (UseProxy == "DIRECT") return true; - // Parse no_proxy, a comma (,) separated list of domains we don't want to use + // Parse no_proxy, a comma (,) separated list of domains we don't want to use // a proxy for so we stop right here if it is in the list if (getenv("no_proxy") != 0 && CheckDomainList(ServerName.Host,getenv("no_proxy")) == true) return true; if (UseProxy.empty() == true) { - const char* result = getenv("https_proxy"); + const char* result = nullptr; + if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end()) + result = getenv("https_proxy"); // FIXME: Fall back to http_proxy is to remain compatible with // existing setups and behaviour of apt.conf. This should be // deprecated in the future (including apt.conf). Most other // programs do not fall back to http proxy settings and neither // should Apt. - if (result == NULL) - result = getenv("http_proxy"); - UseProxy = result == NULL ? "" : result; + if (result == nullptr && std::find(methodNames.begin(), methodNames.end(), "http") != methodNames.end()) + result = getenv("http_proxy"); + UseProxy = result == nullptr ? "" : result; } // Determine what host and port to use based on the proxy settings @@ -245,12 +247,19 @@ bool HttpsMethod::Fetch(FetchItem *Itm) struct curl_slist *headers=NULL; char curl_errorstr[CURL_ERROR_SIZE]; URI Uri = Itm->Uri; - string remotehost = Uri.Host; + setPostfixForMethodNames(Uri.Host.c_str()); + AllowRedirect = ConfigFindB("AllowRedirect", true); + Debug = DebugEnabled(); // TODO: // - http::Pipeline-Depth // - error checking/reporting // - more debug options? (CURLOPT_DEBUGFUNCTION?) + { + auto const plus = Binary.find('+'); + if (plus != std::string::npos) + Uri.Access = Binary.substr(plus + 1); + } curl_easy_reset(curl); if (SetupProxy() == false) @@ -270,107 +279,81 @@ bool HttpsMethod::Fetch(FetchItem *Itm) curl_easy_setopt(curl, CURLOPT_NOPROGRESS, true); curl_easy_setopt(curl, CURLOPT_FILETIME, true); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 0); - // only allow curl to handle https, not the other stuff it supports - curl_easy_setopt(curl, CURLOPT_PROTOCOLS, CURLPROTO_HTTPS); - curl_easy_setopt(curl, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTPS); - - // SSL parameters are set by default to the common (non mirror-specific) value - // if available (or a default one) and gets overload by mirror-specific ones. - - // File containing the list of trusted CA. - string cainfo = _config->Find("Acquire::https::CaInfo",""); - string knob = "Acquire::https::"+remotehost+"::CaInfo"; - cainfo = _config->Find(knob.c_str(),cainfo.c_str()); - if(cainfo.empty() == false) - curl_easy_setopt(curl, CURLOPT_CAINFO,cainfo.c_str()); - - // Check server certificate against previous CA list ... - bool peer_verify = _config->FindB("Acquire::https::Verify-Peer",true); - knob = "Acquire::https::" + remotehost + "::Verify-Peer"; - peer_verify = _config->FindB(knob.c_str(), peer_verify); - curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, peer_verify); - - // ... and hostname against cert CN or subjectAltName - bool verify = _config->FindB("Acquire::https::Verify-Host",true); - knob = "Acquire::https::"+remotehost+"::Verify-Host"; - verify = _config->FindB(knob.c_str(),verify); - int const default_verify = (verify == true) ? 2 : 0; - curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, default_verify); - - // Also enforce issuer of server certificate using its cert - string issuercert = _config->Find("Acquire::https::IssuerCert",""); - knob = "Acquire::https::"+remotehost+"::IssuerCert"; - issuercert = _config->Find(knob.c_str(),issuercert.c_str()); - if(issuercert.empty() == false) - curl_easy_setopt(curl, CURLOPT_ISSUERCERT,issuercert.c_str()); - - // For client authentication, certificate file ... - string pem = _config->Find("Acquire::https::SslCert",""); - knob = "Acquire::https::"+remotehost+"::SslCert"; - pem = _config->Find(knob.c_str(),pem.c_str()); - if(pem.empty() == false) - curl_easy_setopt(curl, CURLOPT_SSLCERT, pem.c_str()); - - // ... and associated key. - string key = _config->Find("Acquire::https::SslKey",""); - knob = "Acquire::https::"+remotehost+"::SslKey"; - key = _config->Find(knob.c_str(),key.c_str()); - if(key.empty() == false) - curl_easy_setopt(curl, CURLOPT_SSLKEY, key.c_str()); - - // Allow forcing SSL version to SSLv3 or TLSv1 (SSLv2 is not - // supported by GnuTLS). - long final_version = CURL_SSLVERSION_DEFAULT; - string sslversion = _config->Find("Acquire::https::SslForceVersion",""); - knob = "Acquire::https::"+remotehost+"::SslForceVersion"; - sslversion = _config->Find(knob.c_str(),sslversion.c_str()); - if(sslversion == "TLSv1") - final_version = CURL_SSLVERSION_TLSv1; - else if(sslversion == "SSLv3") - final_version = CURL_SSLVERSION_SSLv3; - curl_easy_setopt(curl, CURLOPT_SSLVERSION, final_version); - - // CRL file - string crlfile = _config->Find("Acquire::https::CrlFile",""); - knob = "Acquire::https::"+remotehost+"::CrlFile"; - crlfile = _config->Find(knob.c_str(),crlfile.c_str()); - if(crlfile.empty() == false) - curl_easy_setopt(curl, CURLOPT_CRLFILE, crlfile.c_str()); + if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end()) + { + curl_easy_setopt(curl, CURLOPT_PROTOCOLS, CURLPROTO_HTTPS); + curl_easy_setopt(curl, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTPS); + + // File containing the list of trusted CA. + std::string const cainfo = ConfigFind("CaInfo", ""); + if(cainfo.empty() == false) + curl_easy_setopt(curl, CURLOPT_CAINFO, cainfo.c_str()); + // Check server certificate against previous CA list ... + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, ConfigFindB("Verify-Peer", true) ? 1 : 0); + // ... and hostname against cert CN or subjectAltName + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, ConfigFindB("Verify-Host", true) ? 2 : 0); + // Also enforce issuer of server certificate using its cert + std::string const issuercert = ConfigFind("IssuerCert", ""); + if(issuercert.empty() == false) + curl_easy_setopt(curl, CURLOPT_ISSUERCERT, issuercert.c_str()); + // For client authentication, certificate file ... + std::string const pem = ConfigFind("SslCert", ""); + if(pem.empty() == false) + curl_easy_setopt(curl, CURLOPT_SSLCERT, pem.c_str()); + // ... and associated key. + std::string const key = ConfigFind("SslKey", ""); + if(key.empty() == false) + curl_easy_setopt(curl, CURLOPT_SSLKEY, key.c_str()); + // Allow forcing SSL version to SSLv3 or TLSv1 + long final_version = CURL_SSLVERSION_DEFAULT; + std::string const sslversion = ConfigFind("SslForceVersion", ""); + if(sslversion == "TLSv1") + final_version = CURL_SSLVERSION_TLSv1; + else if(sslversion == "TLSv1.0") + final_version = CURL_SSLVERSION_TLSv1_0; + else if(sslversion == "TLSv1.1") + final_version = CURL_SSLVERSION_TLSv1_1; + else if(sslversion == "TLSv1.2") + final_version = CURL_SSLVERSION_TLSv1_2; + else if(sslversion == "SSLv3") + final_version = CURL_SSLVERSION_SSLv3; + curl_easy_setopt(curl, CURLOPT_SSLVERSION, final_version); + // CRL file + std::string const crlfile = ConfigFind("CrlFile", ""); + if(crlfile.empty() == false) + curl_easy_setopt(curl, CURLOPT_CRLFILE, crlfile.c_str()); + } + else + { + curl_easy_setopt(curl, CURLOPT_PROTOCOLS, CURLPROTO_HTTP); + curl_easy_setopt(curl, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTP); + } // cache-control - if(_config->FindB("Acquire::https::No-Cache", - _config->FindB("Acquire::http::No-Cache",false)) == false) + if(ConfigFindB("No-Cache", false) == false) { // cache enabled - if (_config->FindB("Acquire::https::No-Store", - _config->FindB("Acquire::http::No-Store",false)) == true) + if (ConfigFindB("No-Store", false) == true) headers = curl_slist_append(headers,"Cache-Control: no-store"); - stringstream ss; - ioprintf(ss, "Cache-Control: max-age=%u", _config->FindI("Acquire::https::Max-Age", - _config->FindI("Acquire::http::Max-Age",0))); - headers = curl_slist_append(headers, ss.str().c_str()); + std::string ss; + strprintf(ss, "Cache-Control: max-age=%u", ConfigFindI("Max-Age", 0)); + headers = curl_slist_append(headers, ss.c_str()); } else { // cache disabled by user headers = curl_slist_append(headers, "Cache-Control: no-cache"); headers = curl_slist_append(headers, "Pragma: no-cache"); } curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); - // speed limit - int const dlLimit = _config->FindI("Acquire::https::Dl-Limit", - _config->FindI("Acquire::http::Dl-Limit",0))*1024; + int const dlLimit = ConfigFindI("Dl-Limit", 0) * 1024; if (dlLimit > 0) curl_easy_setopt(curl, CURLOPT_MAX_RECV_SPEED_LARGE, dlLimit); // set header - curl_easy_setopt(curl, CURLOPT_USERAGENT, - _config->Find("Acquire::https::User-Agent", - _config->Find("Acquire::http::User-Agent", - "Debian APT-CURL/1.0 (" PACKAGE_VERSION ")").c_str()).c_str()); + curl_easy_setopt(curl, CURLOPT_USERAGENT, ConfigFind("User-Agent", "Debian APT-CURL/1.0 (" PACKAGE_VERSION ")").c_str()); // set timeout - int const timeout = _config->FindI("Acquire::https::Timeout", - _config->FindI("Acquire::http::Timeout",120)); + int const timeout = ConfigFindI("Timeout", 120); curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, timeout); //set really low lowspeed timeout (see #497983) curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, DL_MIN_SPEED); @@ -389,7 +372,7 @@ bool HttpsMethod::Fetch(FetchItem *Itm) // see 657029, 657560 and co, so if we have no extension on the request // ask for text only. As a sidenote: If there is nothing to negotate servers // seem to be nice and ignore it. - if (_config->FindB("Acquire::https::SendAccept", _config->FindB("Acquire::http::SendAccept", true)) == true) + if (ConfigFindB("SendAccept", true)) { size_t const filepos = Itm->Uri.find_last_of('/'); string const file = Itm->Uri.substr(filepos + 1); @@ -513,27 +496,40 @@ bool HttpsMethod::Fetch(FetchItem *Itm) return true; } /*}}}*/ -// HttpsMethod::Configuration - Handle a configuration message /*{{{*/ -bool HttpsMethod::Configuration(string Message) +std::unique_ptr HttpsMethod::CreateServerState(URI const &uri)/*{{{*/ { - if (ServerMethod::Configuration(Message) == false) - return false; - - AllowRedirect = _config->FindB("Acquire::https::AllowRedirect", - _config->FindB("Acquire::http::AllowRedirect", true)); - Debug = _config->FindB("Debug::Acquire::https",false); - - return true; + return std::unique_ptr(new HttpsServerState(uri, this)); } /*}}}*/ -std::unique_ptr HttpsMethod::CreateServerState(URI const &uri)/*{{{*/ +HttpsMethod::HttpsMethod(std::string &&pProg) : ServerMethod(std::move(pProg),"1.2",Pipeline | SendConfig)/*{{{*/ { - return std::unique_ptr(new HttpsServerState(uri, this)); + auto addName = std::inserter(methodNames, methodNames.begin()); + addName = "http"; + auto const plus = Binary.find('+'); + if (plus != std::string::npos) + { + addName = Binary.substr(plus + 1); + auto base = Binary.substr(0, plus); + if (base != "https") + addName = base; + } + if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end()) + curl_global_init(CURL_GLOBAL_SSL); + else + curl_global_init(CURL_GLOBAL_NOTHING); + curl = curl_easy_init(); } /*}}}*/ - -int main() +HttpsMethod::~HttpsMethod() /*{{{*/ { - return HttpsMethod().Run(); + curl_easy_cleanup(curl); } - + /*}}}*/ +int main(int, const char *argv[]) /*{{{*/ +{ + std::string Binary = flNotDir(argv[0]); + if (Binary.find('+') == std::string::npos && Binary != "https") + Binary.append("+https"); + return HttpsMethod(std::move(Binary)).Run(); +} + /*}}}*/ diff --git a/methods/https.h b/methods/https.h index 85cc7824e..04e72e815 100644 --- a/methods/https.h +++ b/methods/https.h @@ -74,21 +74,12 @@ class HttpsMethod : public ServerMethod public: - virtual bool Configuration(std::string Message) APT_OVERRIDE; virtual std::unique_ptr CreateServerState(URI const &uri) APT_OVERRIDE; using pkgAcqMethod::FetchResult; using pkgAcqMethod::FetchItem; - HttpsMethod() : ServerMethod("https","1.2",Pipeline | SendConfig) - { - curl_global_init(CURL_GLOBAL_SSL); - curl = curl_easy_init(); - }; - - ~HttpsMethod() - { - curl_easy_cleanup(curl); - }; + explicit HttpsMethod(std::string &&pProg); + virtual ~HttpsMethod(); }; #include diff --git a/methods/mirror.cc b/methods/mirror.cc index 9d900771b..71faaf591 100644 --- a/methods/mirror.cc +++ b/methods/mirror.cc @@ -57,7 +57,7 @@ using namespace std; */ MirrorMethod::MirrorMethod() - : HttpMethod(), DownloadedMirrorFile(false), Debug(false) + : HttpMethod("mirror"), DownloadedMirrorFile(false), Debug(false) { } @@ -68,7 +68,7 @@ bool MirrorMethod::Configuration(string Message) { if (pkgAcqMethod::Configuration(Message) == false) return false; - Debug = _config->FindB("Debug::Acquire::mirror",false); + Debug = DebugEnabled(); return true; } diff --git a/methods/rred.cc b/methods/rred.cc index 2d2333f91..0c641ad82 100644 --- a/methods/rred.cc +++ b/methods/rred.cc @@ -572,7 +572,7 @@ class RredMethod : public aptMethod { protected: virtual bool URIAcquire(std::string const &Message, FetchItem *Itm) APT_OVERRIDE { - Debug = _config->FindB("Debug::pkgAcquire::RRed", false); + Debug = DebugEnabled(); URI Get = Itm->Uri; std::string Path = Get.Host + Get.Path; // rred:/path - no host diff --git a/methods/rsh.cc b/methods/rsh.cc index 74a908ef7..7b8af6f9b 100644 --- a/methods/rsh.cc +++ b/methods/rsh.cc @@ -383,9 +383,7 @@ bool RSHConn::Get(const char *Path,FileFd &To,unsigned long long Resume, /*}}}*/ // RSHMethod::RSHMethod - Constructor /*{{{*/ -// --------------------------------------------------------------------- -/* */ -RSHMethod::RSHMethod(std::string const &pProg) : aptMethod(pProg.c_str(),"1.0",SendConfig), Prog(pProg) +RSHMethod::RSHMethod(std::string &&pProg) : aptMethod(std::move(pProg),"1.0",SendConfig) { signal(SIGTERM,SigTerm); signal(SIGINT,SigTerm); @@ -399,14 +397,14 @@ bool RSHMethod::Configuration(std::string Message) { // enabling privilege dropping for this method requires configuration… // … which is otherwise lifted straight from root, so use it by default. - _config->Set(std::string("Binary::") + Prog + "::APT::Sandbox::User", ""); + _config->Set(std::string("Binary::") + Binary + "::APT::Sandbox::User", ""); if (aptMethod::Configuration(Message) == false) return false; - std::string const timeconf = std::string("Acquire::") + Prog + "::Timeout"; + std::string const timeconf = std::string("Acquire::") + Binary + "::Timeout"; TimeOut = _config->FindI(timeconf, TimeOut); - std::string const optsconf = std::string("Acquire::") + Prog + "::Options"; + std::string const optsconf = std::string("Acquire::") + Binary + "::Options"; RshOptions = _config->Tree(optsconf.c_str()); return true; @@ -445,7 +443,7 @@ bool RSHMethod::Fetch(FetchItem *Itm) // Connect to the server if (Server == 0 || Server->Comp(Get) == false) { delete Server; - Server = new RSHConn(Prog, Get); + Server = new RSHConn(Binary, Get); } // Could not connect is a transient error.. diff --git a/methods/rsh.h b/methods/rsh.h index 571e38ba6..dee4ad647 100644 --- a/methods/rsh.h +++ b/methods/rsh.h @@ -58,7 +58,6 @@ class RSHConn class RSHMethod : public aptMethod { - std::string const Prog; virtual bool Fetch(FetchItem *Itm) APT_OVERRIDE; virtual bool Configuration(std::string Message) APT_OVERRIDE; @@ -71,7 +70,7 @@ class RSHMethod : public aptMethod public: - explicit RSHMethod(std::string const &Prog); + explicit RSHMethod(std::string &&Prog); }; #endif diff --git a/methods/server.cc b/methods/server.cc index 66551b893..7c85c8abb 100644 --- a/methods/server.cc +++ b/methods/server.cc @@ -313,10 +313,30 @@ ServerMethod::DealWithHeaders(FetchResult &Res) { NextURI = DeQuoteString(Server->Location); URI tmpURI = NextURI; - if (tmpURI.Access == "http" && Binary == "https+http") + if (tmpURI.Access.find('+') != std::string::npos) { - tmpURI.Access = "https+http"; - NextURI = tmpURI; + _error->Error("Server tried to trick us into using a specific implementation: %s", tmpURI.Access.c_str()); + if (Server->HaveContent == true) + return ERROR_WITH_CONTENT_PAGE; + return ERROR_UNRECOVERABLE; + } + URI Uri = Queue->Uri; + if (Binary.find('+') != std::string::npos) + { + auto base = Binary.substr(0, Binary.find('+')); + if (base != tmpURI.Access) + { + tmpURI.Access = base + '+' + tmpURI.Access; + if (tmpURI.Access == Binary) + { + std::string tmpAccess = Uri.Access; + std::swap(tmpURI.Access, Uri.Access); + NextURI = tmpURI; + std::swap(tmpURI.Access, Uri.Access); + } + else + NextURI = tmpURI; + } } if (Queue->Uri == NextURI) { @@ -326,7 +346,7 @@ ServerMethod::DealWithHeaders(FetchResult &Res) return ERROR_WITH_CONTENT_PAGE; return ERROR_UNRECOVERABLE; } - URI Uri = Queue->Uri; + Uri.Access = Binary; // same protocol redirects are okay if (tmpURI.Access == Uri.Access) return TRY_AGAIN_OR_REDIRECT; @@ -334,7 +354,22 @@ ServerMethod::DealWithHeaders(FetchResult &Res) else if ((Uri.Access == "http" || Uri.Access == "https+http") && tmpURI.Access == "https") return TRY_AGAIN_OR_REDIRECT; else - _error->Error("Redirection from %s to '%s' is forbidden", Uri.Access.c_str(), NextURI.c_str()); + { + auto const tmpplus = tmpURI.Access.find('+'); + if (tmpplus != std::string::npos && tmpURI.Access.substr(tmpplus + 1) == "https") + { + auto const uriplus = Uri.Access.find('+'); + if (uriplus == std::string::npos) + { + if (Uri.Access == tmpURI.Access.substr(0, tmpplus)) // foo -> foo+https + return TRY_AGAIN_OR_REDIRECT; + } + else if (Uri.Access.substr(uriplus + 1) == "http" && + Uri.Access.substr(0, uriplus) == tmpURI.Access.substr(0, tmpplus)) // foo+http -> foo+https + return TRY_AGAIN_OR_REDIRECT; + } + } + _error->Error("Redirection from %s to '%s' is forbidden", Uri.Access.c_str(), NextURI.c_str()); } /* else pass through for error message */ } @@ -513,7 +548,7 @@ int ServerMethod::Loop() if (Result != -1 && (Result != 0 || Queue == 0)) { if(FailReason.empty() == false || - _config->FindB("Acquire::http::DependOnSTDIN", true) == true) + ConfigFindB("DependOnSTDIN", true) == true) return 100; else return 0; @@ -524,7 +559,13 @@ int ServerMethod::Loop() // Connect to the server if (Server == 0 || Server->Comp(Queue->Uri) == false) + { Server = CreateServerState(Queue->Uri); + setPostfixForMethodNames(::URI(Queue->Uri).Host.c_str()); + AllowRedirect = ConfigFindB("AllowRedirect", true); + PipelineDepth = ConfigFindI("Pipeline-Depth", 10); + Debug = DebugEnabled(); + } /* If the server has explicitly said this is the last connection then we pre-emptively shut down the pipeline and tear down @@ -747,8 +788,8 @@ unsigned long long ServerMethod::FindMaximumObjectSizeInQueue() const /*{{{*/ return MaxSizeInQueue; } /*}}}*/ -ServerMethod::ServerMethod(char const * const Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/ - aptMethod(Binary, Ver, Flags), Server(nullptr), File(NULL), PipelineDepth(10), +ServerMethod::ServerMethod(std::string &&Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/ + aptMethod(std::move(Binary), Ver, Flags), Server(nullptr), File(NULL), PipelineDepth(10), AllowRedirect(false), Debug(false) { } diff --git a/methods/server.h b/methods/server.h index f2868c96a..f6a635dca 100644 --- a/methods/server.h +++ b/methods/server.h @@ -157,7 +157,7 @@ class ServerMethod : public aptMethod virtual std::unique_ptr CreateServerState(URI const &uri) = 0; virtual void RotateDNS() = 0; - ServerMethod(char const * const Binary, char const * const Ver,unsigned long const Flags); + ServerMethod(std::string &&Binary, char const * const Ver,unsigned long const Flags); virtual ~ServerMethod() {}; }; diff --git a/methods/store.cc b/methods/store.cc index fa02d4597..1faaa4fb4 100644 --- a/methods/store.cc +++ b/methods/store.cc @@ -32,12 +32,15 @@ class StoreMethod : public aptMethod { - std::string const Prog; virtual bool Fetch(FetchItem *Itm) APT_OVERRIDE; public: - explicit StoreMethod(std::string const &pProg) : aptMethod(pProg.c_str(),"1.2",SingleInstance | SendConfig), Prog(pProg) {}; + explicit StoreMethod(std::string &&pProg) : aptMethod(std::move(pProg),"1.2",SingleInstance | SendConfig) + { + if (Binary != "store") + methodNames.insert(methodNames.begin(), "store"); + } }; static bool OpenFileWithCompressorByName(FileFd &fileFd, std::string const &Filename, unsigned int const Mode, std::string const &Name) @@ -70,7 +73,7 @@ bool StoreMethod::Fetch(FetchItem *Itm) /*{{{*/ FileFd From; if (_config->FindB("Method::Compress", false) == false) { - if (OpenFileWithCompressorByName(From, Path, FileFd::ReadOnly, Prog) == false) + if (OpenFileWithCompressorByName(From, Path, FileFd::ReadOnly, Binary) == false) return false; if(From.IsCompressed() && From.FileSize() == 0) return _error->Error(_("Empty files can't be valid archives")); @@ -85,7 +88,7 @@ bool StoreMethod::Fetch(FetchItem *Itm) /*{{{*/ { if (_config->FindB("Method::Compress", false) == false) To.Open(Itm->DestFile, FileFd::WriteOnly | FileFd::Create | FileFd::Atomic, FileFd::Extension); - else if (OpenFileWithCompressorByName(To, Itm->DestFile, FileFd::WriteOnly | FileFd::Create | FileFd::Empty, Prog) == false) + else if (OpenFileWithCompressorByName(To, Itm->DestFile, FileFd::WriteOnly | FileFd::Create | FileFd::Empty, Binary) == false) return false; if (To.IsOpen() == false || To.Failed() == true) diff --git a/test/integration/test-bug-738785-switch-protocol b/test/integration/test-bug-738785-switch-protocol index 8f8216d04..fa6702eb0 100755 --- a/test/integration/test-bug-738785-switch-protocol +++ b/test/integration/test-bug-738785-switch-protocol @@ -63,4 +63,4 @@ ln -s "$OLDMETHODS/https" "$NEWMETHODS" # check that downgrades from https to http are not allowed webserverconfig 'aptwebserver::support::http' 'true' sed -i -e "s#:${APTHTTPPORT}/redirectme#:${APTHTTPSPORT}/downgrademe#" -e 's# http:# https:#' rootdir/etc/apt/sources.list.d/* -testfailure aptget update --allow-insecure-repositories +testfailure aptget update --allow-insecure-repositories -o Acquire::https::Timeout=1 diff --git a/test/integration/test-different-methods-for-same-source b/test/integration/test-different-methods-for-same-source index 6b3f796b1..40138ad5d 100755 --- a/test/integration/test-different-methods-for-same-source +++ b/test/integration/test-different-methods-for-same-source @@ -24,10 +24,23 @@ IFS="$backupIFS" ln -s "${OLDMETHODS}/http" "${NEWMETHODS}/http-ng" changetowebserver +webserverconfig 'aptwebserver::redirect::replace::/redirectme/' "http://localhost:${APTHTTPPORT}/" sed -i -e 's# http:# http-ng:#' $(find rootdir/etc/apt/sources.list.d -name '*-deb-src.list') -testsuccess apt update +testsuccess apt update -o Debug::Acquire::http-ng=1 cp rootdir/tmp/testsuccess.output update.log # all requests are folded into the first Release file testsuccess grep ' http-ng://' update.log testfailure grep ' http://' update.log +# see if method-specific debug was enabled +testsuccess grep '^Answer for: http-ng:' update.log + +rm -rf rootdir/var/lib/apt/lists +sed -i -e "s#:${APTHTTPPORT}/#:${APTHTTPPORT}/redirectme#" rootdir/etc/apt/sources.list.d/* +testsuccess apt update -o Debug::Acquire::http-ng=1 +cp rootdir/tmp/testsuccess.output update.log +# all requests are folded into the first Release file +testsuccess grep ' http-ng://' update.log +testfailure grep '^[^L].* http://' update.log +# see if method-specific debug was enabled +testsuccess grep '^Answer for: http-ng:' update.log diff --git a/test/integration/test-pdiff-usage b/test/integration/test-pdiff-usage index 91528389b..39d847203 100755 --- a/test/integration/test-pdiff-usage +++ b/test/integration/test-pdiff-usage @@ -317,7 +317,7 @@ Debug::Acquire::Transaction "true"; Debug::pkgAcquire::Worker "true"; Debug::Acquire::http "true"; Debug::pkgAcquire "true"; -Debug::pkgAcquire::rred "true";' > rootdir/etc/apt/apt.conf.d/rreddebug.conf +Debug::Acquire::rred "true";' > rootdir/etc/apt/apt.conf.d/rreddebug.conf testcase() { testrun -o Acquire::PDiffs::Merge=0 -o APT::Get::List-Cleanup=1 "$@" -- cgit v1.2.3 From 61db48241f2d46697a291bfedaf398a1ca9a70e3 Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Thu, 4 Aug 2016 08:45:38 +0200 Subject: implement socks5h proxy support for http method MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Socks support is a requested feature in sofar that the internet is actually believing Acquire::socks::Proxy would exist. It doesn't and this commit isn't adding it as that isn't how our configuration works, but it allows Acquire::http::Proxy="socks5h://…". The HTTPS method was changed already to support socks proxies (all versions) via curl. This commit implements only SOCKS5 (RFC1928) with no auth or pass&user auth (RFC1929), but not GSSAPI which is required by the RFC. The 'h' in the protocol name further indicates that DNS resolution is delegated to the socks proxy rather than performed locally. The implementation works and was tested with Tor as socks proxy for which implementing socks5h only can actually be considered a feature. Closes: 744934 --- apt-pkg/contrib/fileutl.cc | 31 ++++ apt-pkg/contrib/fileutl.h | 1 + methods/http.cc | 191 ++++++++++++++++++++++--- methods/http.h | 2 +- test/integration/skip-method-http-socks-client | 116 +++++++++++++++ 5 files changed, 318 insertions(+), 23 deletions(-) create mode 100755 test/integration/skip-method-http-socks-client diff --git a/apt-pkg/contrib/fileutl.cc b/apt-pkg/contrib/fileutl.cc index 342f9830d..a9d51a6bf 100644 --- a/apt-pkg/contrib/fileutl.cc +++ b/apt-pkg/contrib/fileutl.cc @@ -2443,6 +2443,37 @@ bool FileFd::Read(void *To,unsigned long long Size,unsigned long long *Actual) } return FileFdError(_("read, still have %llu to read but none left"), Size); +} +bool FileFd::Read(int const Fd, void *To, unsigned long long Size, unsigned long long * const Actual) +{ + ssize_t Res = 1; + errno = 0; + if (Actual != nullptr) + *Actual = 0; + *static_cast(To) = '\0'; + while (Res > 0 && Size > 0) + { + Res = read(Fd, To, Size); + if (Res < 0) + { + if (errno == EINTR) + { + Res = 1; + errno = 0; + continue; + } + return _error->Errno("read", _("Read error")); + } + To = static_cast(To) + Res; + Size -= Res; + if (Actual != 0) + *Actual += Res; + } + if (Size == 0) + return true; + if (Actual != nullptr) + return true; + return _error->Error(_("read, still have %llu to read but none left"), Size); } /*}}}*/ // FileFd::ReadLine - Read a complete line from the file /*{{{*/ diff --git a/apt-pkg/contrib/fileutl.h b/apt-pkg/contrib/fileutl.h index c13613171..4a1676dc2 100644 --- a/apt-pkg/contrib/fileutl.h +++ b/apt-pkg/contrib/fileutl.h @@ -86,6 +86,7 @@ class FileFd return Read(To,Size); } bool Read(void *To,unsigned long long Size,unsigned long long *Actual = 0); + bool static Read(int const Fd, void *To, unsigned long long Size, unsigned long long * const Actual = 0); char* ReadLine(char *To, unsigned long long const Size); bool Flush(); bool Write(const void *From,unsigned long long Size); diff --git a/methods/http.cc b/methods/http.cc index 0378d7c60..1ed2e3629 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -151,9 +152,9 @@ bool CircleBuf::Read(int Fd) // CircleBuf::Read - Put the string into the buffer /*{{{*/ // --------------------------------------------------------------------- /* This will hold the string in and fill the buffer with it as it empties */ -bool CircleBuf::Read(string Data) +bool CircleBuf::Read(string const &Data) { - OutQueue += Data; + OutQueue.append(Data); FillOut(); return true; } @@ -296,6 +297,24 @@ HttpServerState::HttpServerState(URI Srv,HttpMethod *Owner) : ServerState(Srv, O // HttpServerState::Open - Open a connection to the server /*{{{*/ // --------------------------------------------------------------------- /* This opens a connection to the server. */ +static bool TalkToSocksProxy(int const ServerFd, std::string const &Proxy, + char const * const type, bool const ReadWrite, uint8_t * const ToFrom, + unsigned int const Size, unsigned int const Timeout) +{ + if (WaitFd(ServerFd, ReadWrite, Timeout) == false) + return _error->Error("Waiting for the SOCKS proxy %s to %s timed out", URI::SiteOnly(Proxy).c_str(), type); + if (ReadWrite == false) + { + if (FileFd::Read(ServerFd, ToFrom, Size) == false) + return _error->Error("Reading the %s from SOCKS proxy %s failed", type, URI::SiteOnly(Proxy).c_str()); + } + else + { + if (FileFd::Write(ServerFd, ToFrom, Size) == false) + return _error->Error("Writing the %s to SOCKS proxy %s failed", type, URI::SiteOnly(Proxy).c_str()); + } + return true; +} bool HttpServerState::Open() { // Use the already open connection if possible. @@ -337,29 +356,156 @@ bool HttpServerState::Open() if (CheckDomainList(ServerName.Host,getenv("no_proxy")) == true) Proxy = ""; } - - // Determine what host and port to use based on the proxy settings - int Port = 0; - string Host; - if (Proxy.empty() == true || Proxy.Host.empty() == true) + + if (Proxy.Access == "socks5h") { - if (ServerName.Port != 0) - Port = ServerName.Port; - Host = ServerName.Host; + if (Connect(Proxy.Host, Proxy.Port, "socks", 1080, ServerFd, TimeOut, Owner) == false) + return false; + + /* We implement a very basic SOCKS5 client here complying mostly to RFC1928 expect + * for not offering GSSAPI auth which is a must (we only do no or user/pass auth). + * We also expect the SOCKS5 server to do hostname lookup (aka socks5h) */ + std::string const ProxyInfo = URI::SiteOnly(Proxy); + Owner->Status(_("Connecting to %s (%s)"),"SOCKS5h proxy",ProxyInfo.c_str()); + auto const Timeout = Owner->ConfigFindI("TimeOut", 120); + #define APT_WriteOrFail(TYPE, DATA, LENGTH) if (TalkToSocksProxy(ServerFd, ProxyInfo, TYPE, true, DATA, LENGTH, Timeout) == false) return false + #define APT_ReadOrFail(TYPE, DATA, LENGTH) if (TalkToSocksProxy(ServerFd, ProxyInfo, TYPE, false, DATA, LENGTH, Timeout) == false) return false + if (ServerName.Host.length() > 255) + return _error->Error("Can't use SOCKS5h as hostname %s is too long!", ServerName.Host.c_str()); + if (Proxy.User.length() > 255 || Proxy.Password.length() > 255) + return _error->Error("Can't use user&pass auth as they are too long (%lu and %lu) for the SOCKS5!", Proxy.User.length(), Proxy.Password.length()); + if (Proxy.User.empty()) + { + uint8_t greeting[] = { 0x05, 0x01, 0x00 }; + APT_WriteOrFail("greet-1", greeting, sizeof(greeting)); + } + else + { + uint8_t greeting[] = { 0x05, 0x02, 0x00, 0x02 }; + APT_WriteOrFail("greet-2", greeting, sizeof(greeting)); + } + uint8_t greeting[2]; + APT_ReadOrFail("greet back", greeting, sizeof(greeting)); + if (greeting[0] != 0x05) + return _error->Error("SOCKS proxy %s greets back with wrong version: %d", ProxyInfo.c_str(), greeting[0]); + if (greeting[1] == 0x00) + ; // no auth has no method-dependent sub-negotiations + else if (greeting[1] == 0x02) + { + if (Proxy.User.empty()) + return _error->Error("SOCKS proxy %s negotiated user&pass auth, but we had not offered it!", ProxyInfo.c_str()); + // user&pass auth sub-negotiations are defined by RFC1929 + std::vector auth = {{ 0x01, static_cast(Proxy.User.length()) }}; + std::copy(Proxy.User.begin(), Proxy.User.end(), std::back_inserter(auth)); + auth.push_back(static_cast(Proxy.Password.length())); + std::copy(Proxy.Password.begin(), Proxy.Password.end(), std::back_inserter(auth)); + APT_WriteOrFail("user&pass auth", auth.data(), auth.size()); + uint8_t authstatus[2]; + APT_ReadOrFail("auth report", authstatus, sizeof(authstatus)); + if (authstatus[0] != 0x01) + return _error->Error("SOCKS proxy %s auth status response with wrong version: %d", ProxyInfo.c_str(), authstatus[0]); + if (authstatus[1] != 0x00) + return _error->Error("SOCKS proxy %s reported authorization failure: username or password incorrect? (%d)", ProxyInfo.c_str(), authstatus[1]); + } + else + return _error->Error("SOCKS proxy %s greets back having not found a common authorization method: %d", ProxyInfo.c_str(), greeting[1]); + union { uint16_t * i; uint8_t * b; } portu; + uint16_t port = htons(static_cast(ServerName.Port == 0 ? 80 : ServerName.Port)); + portu.i = &port; + std::vector request = {{ 0x05, 0x01, 0x00, 0x03, static_cast(ServerName.Host.length()) }}; + std::copy(ServerName.Host.begin(), ServerName.Host.end(), std::back_inserter(request)); + request.push_back(portu.b[0]); + request.push_back(portu.b[1]); + APT_WriteOrFail("request", request.data(), request.size()); + uint8_t response[4]; + APT_ReadOrFail("first part of response", response, sizeof(response)); + if (response[0] != 0x05) + return _error->Error("SOCKS proxy %s response with wrong version: %d", ProxyInfo.c_str(), response[0]); + if (response[2] != 0x00) + return _error->Error("SOCKS proxy %s has unexpected non-zero reserved field value: %d", ProxyInfo.c_str(), response[2]); + std::string bindaddr; + if (response[3] == 0x01) // IPv4 address + { + uint8_t ip4port[6]; + APT_ReadOrFail("IPv4+Port of response", ip4port, sizeof(ip4port)); + portu.b[0] = ip4port[4]; + portu.b[1] = ip4port[5]; + port = ntohs(*portu.i); + strprintf(bindaddr, "%d.%d.%d.%d:%d", ip4port[0], ip4port[1], ip4port[2], ip4port[3], port); + } + else if (response[3] == 0x03) // hostname + { + uint8_t namelength; + APT_ReadOrFail("hostname length of response", &namelength, 1); + uint8_t hostname[namelength + 2]; + APT_ReadOrFail("hostname of response", hostname, sizeof(hostname)); + portu.b[0] = hostname[namelength]; + portu.b[1] = hostname[namelength + 1]; + port = ntohs(*portu.i); + hostname[namelength] = '\0'; + strprintf(bindaddr, "%s:%d", hostname, port); + } + else if (response[3] == 0x04) // IPv6 address + { + uint8_t ip6port[18]; + APT_ReadOrFail("IPv6+port of response", ip6port, sizeof(ip6port)); + portu.b[0] = ip6port[16]; + portu.b[1] = ip6port[17]; + port = ntohs(*portu.i); + strprintf(bindaddr, "[%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X]:%d", + ip6port[0], ip6port[1], ip6port[2], ip6port[3], ip6port[4], ip6port[5], ip6port[6], ip6port[7], + ip6port[8], ip6port[9], ip6port[10], ip6port[11], ip6port[12], ip6port[13], ip6port[14], ip6port[15], + port); + } + else + return _error->Error("SOCKS proxy %s destination address is of unknown type: %d", + ProxyInfo.c_str(), response[3]); + if (response[1] != 0x00) + { + char const * errstr; + switch (response[1]) + { + case 0x01: errstr = "general SOCKS server failure"; Owner->SetFailReason("SOCKS"); break; + case 0x02: errstr = "connection not allowed by ruleset"; Owner->SetFailReason("SOCKS"); break; + case 0x03: errstr = "Network unreachable"; Owner->SetFailReason("ConnectionTimedOut"); break; + case 0x04: errstr = "Host unreachable"; Owner->SetFailReason("ConnectionTimedOut"); break; + case 0x05: errstr = "Connection refused"; Owner->SetFailReason("ConnectionRefused"); break; + case 0x06: errstr = "TTL expired"; Owner->SetFailReason("Timeout"); break; + case 0x07: errstr = "Command not supported"; Owner->SetFailReason("SOCKS"); break; + case 0x08: errstr = "Address type not supported"; Owner->SetFailReason("SOCKS"); break; + default: errstr = "Unknown error"; Owner->SetFailReason("SOCKS"); break; + } + return _error->Error("SOCKS proxy %s didn't grant the connect to %s due to: %s (%d)", ProxyInfo.c_str(), bindaddr.c_str(), errstr, response[1]); + } + else if (Owner->DebugEnabled()) + ioprintf(std::clog, "http: SOCKS proxy %s connection established to %s\n", ProxyInfo.c_str(), bindaddr.c_str()); + + if (WaitFd(ServerFd, true, Timeout) == false) + return _error->Error("SOCKS proxy %s reported connection, but timed out", ProxyInfo.c_str()); + #undef APT_ReadOrFail + #undef APT_WriteOrFail } - else if (Proxy.Access != "http") - return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str()); else { - if (Proxy.Port != 0) - Port = Proxy.Port; - Host = Proxy.Host; + // Determine what host and port to use based on the proxy settings + int Port = 0; + string Host; + if (Proxy.empty() == true || Proxy.Host.empty() == true) + { + if (ServerName.Port != 0) + Port = ServerName.Port; + Host = ServerName.Host; + } + else if (Proxy.Access != "http") + return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str()); + else + { + if (Proxy.Port != 0) + Port = Proxy.Port; + Host = Proxy.Host; + } + return Connect(Host,Port,"http",80,ServerFd,TimeOut,Owner); } - - // Connect to the remote server - if (Connect(Host,Port,"http",80,ServerFd,TimeOut,Owner) == false) - return false; - return true; } /*}}}*/ @@ -707,7 +853,7 @@ void HttpMethod::SendReq(FetchItem *Itm) but while its a must for all servers to accept absolute URIs, it is assumed clients will sent an absolute path for non-proxies */ std::string requesturi; - if (Server->Proxy.empty() == true || Server->Proxy.Host.empty()) + if (Server->Proxy.Access != "http" || Server->Proxy.empty() == true || Server->Proxy.Host.empty()) requesturi = Uri.Path; else requesturi = Uri; @@ -756,7 +902,8 @@ void HttpMethod::SendReq(FetchItem *Itm) else if (Itm->LastModified != 0) Req << "If-Modified-Since: " << TimeRFC1123(Itm->LastModified, false).c_str() << "\r\n"; - if (Server->Proxy.User.empty() == false || Server->Proxy.Password.empty() == false) + if (Server->Proxy.Access == "http" && + (Server->Proxy.User.empty() == false || Server->Proxy.Password.empty() == false)) Req << "Proxy-Authorization: Basic " << Base64Encode(Server->Proxy.User + ":" + Server->Proxy.Password) << "\r\n"; diff --git a/methods/http.h b/methods/http.h index 4d22c88f9..644e576f0 100644 --- a/methods/http.h +++ b/methods/http.h @@ -67,7 +67,7 @@ class CircleBuf // Read data in bool Read(int Fd); - bool Read(std::string Data); + bool Read(std::string const &Data); // Write data out bool Write(int Fd); diff --git a/test/integration/skip-method-http-socks-client b/test/integration/skip-method-http-socks-client new file mode 100755 index 000000000..ee08f6d34 --- /dev/null +++ b/test/integration/skip-method-http-socks-client @@ -0,0 +1,116 @@ +#!/bin/sh +set -e + +TESTDIR="$(readlink -f "$(dirname "$0")")" +. "$TESTDIR/framework" + +setupenvironment + +# We don't do a real proxy here, we just look how the implementation +# reacts to certain responses from a "proxy" provided by socat +# Checks HTTP, but requesting https instead will check HTTPS (curl) which +# uses different error messages through – also: https://github.com/curl/curl/issues/944 + +# FIXME: Not run automatically as it uses a hardcoded port (5555) + +msgtest 'Check that everything is installed' 'socat' +if dpkg-checkbuilddeps -d 'socat' /dev/null >/dev/null 2>&1; then + msgpass +else + msgskip "$(command dpkg -l socat)" + exit +fi + +runclient() { + # this doesn't need to be an actually reachable webserver for this test + # in fact, its better if it isn't. + rm -f index.html + apthelper download-file http://localhost:2903/ index.html \ + -o Acquire::http::Proxy="socks5h://${1}localhost:5555" \ + -o Acquire::http::Timeout=2 -o Debug::Acquire::http=1 > client.output 2>&1 || true +} +runserver() { + socat -x tcp-listen:5555,reuseaddr \ + system:"echo -n '$*' | xxd -r -p; echo 'HTTP/1.1 200 OK'; echo 'Content-Length: 5'; echo 'Connection: close'; echo; echo 'HTML'" \ + > server.output 2>&1 & +} +PROXY="socks5h://localhost:5555" + +msgmsg 'SOCKS does not run' +runclient +testsuccess grep 'Could not connect to localhost:5555' client.output + +msgmsg 'SOCKS greets back with wrong version' +runserver '04 00' +runclient +testsuccess grep 'greets back with wrong version: 4' client.output + +msgmsg 'SOCKS tries GSSAPI auth we have not advertised' +runserver '05 01' +runclient +testsuccess grep 'greets back having not found a common authorization method: 1' client.output + +msgmsg 'SOCKS tries user&pass auth we have not advertised' +runserver '05 02' +runclient +testsuccess grep 'pass auth, but we had not offered it' client.output + +msgmsg 'SOCKS user:pass wrong version' +runserver '05 02' '05 00' +runclient 'user:pass@' +testsuccess grep 'auth status response with wrong version: 5' client.output + +msgmsg 'SOCKS user:pass wrong auth' +runserver '05 02' '01 01' +runclient 'user:pass@' +testsuccess grep 'reported authorization failure: username or password incorrect? (1)' client.output + +msgmsg 'SOCKS user:pass request not granted no hostname' +runserver '05 02' '01 00' '05 01 00 03 00 1f 90' +runclient 'user:pass@' +testsuccess grep 'grant the connect to :8080 due to: general SOCKS server failure (1)' client.output + +msgmsg 'SOCKS user:pass request not granted with hostname' +runserver '05 02' '01 00' '05 01 00 03 09 68 6f 73 74 6c 6f 63 61 6c 1f 90' +runclient 'user:pass@' +testsuccess grep 'grant the connect to hostlocal:8080 due to: general SOCKS server failure (1)' client.output + +msgmsg 'SOCKS user:pass request not granted ipv4' +runserver '05 02' '01 00' '05 04 00 01 ac 10 fe 01 1f 90' +runclient 'user:pass@' +testsuccess grep 'grant the connect to 172.16.254.1:8080 due to: Host unreachable (4)' client.output + +msgmsg 'SOCKS user:pass request not granted ipv6' +runserver '05 02' '01 00' '05 12 00 04 20 01 0d b8 ac 10 fe 00 00 00 00 00 00 00 00 00 1f 90' +runclient 'user:pass@' +testsuccess grep 'grant the connect to \[2001:0DB8:AC10:FE00:0000:0000:0000:0000\]:8080 due to: Unknown error (18)' client.output + +msgmsg 'SOCKS user:pass request granted ipv4' +runserver '05 02' '01 00' '05 00 00 01 ac 10 fe 01 1f 90' +runclient 'user:pass@' +testequal "http: SOCKS proxy $PROXY connection established to 172.16.254.1:8080" head -n 1 client.output +testfileequal index.html 'HTML' + +msgmsg 'SOCKS user:pass request granted ipv6' +runserver '05 02' '01 00' '05 00 00 04 20 01 0d b8 ac 10 fe 00 00 00 00 00 00 00 00 00 1f 90' +runclient 'user:pass@' +testequal "http: SOCKS proxy $PROXY connection established to [2001:0DB8:AC10:FE00:0000:0000:0000:0000]:8080" head -n 1 client.output +testfileequal index.html 'HTML' + +msgmsg 'SOCKS no auth no hostname' +runserver '05 00 05 00 00 03 00 1f 90' +runclient +testequal "http: SOCKS proxy $PROXY connection established to :8080" head -n 1 client.output +testfileequal index.html 'HTML' + +msgmsg 'SOCKS no auth with hostname' +runserver '05 00 05 00 00 03 09 68 6f 73 74 6c 6f 63 61 6c 1f 90' +runclient +testequal "http: SOCKS proxy $PROXY connection established to hostlocal:8080" head -n 1 client.output +testfileequal index.html 'HTML' + +msgmsg 'SOCKS user-only request granted ipv4' +runserver '05 02' '01 00' '05 00 00 01 ac 10 fe 01 1f 90' +runclient 'apt@' +testequal "http: SOCKS proxy $PROXY connection established to 172.16.254.1:8080" head -n 1 client.output +testfileequal index.html 'HTML' -- cgit v1.2.3 From c9c910695185b59aa27b787c1a250497e47b492b Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Sat, 6 Aug 2016 19:59:57 +0200 Subject: allow methods to be disabled and redirected via config To prevent accidents like adding http-sources while using tor+http it can make sense to allow disabling methods. It might even make sense to allow "redirections" and adding "symlinked" methods via configuration. This could e.g. allow using different options for certain sources by adding and configuring a "virtual" new method which picks up the config based on the name it was called with like e.g. http does if called as tor+http. --- apt-pkg/acquire-worker.cc | 31 +++++++++++++++++----- test/integration/test-apt-https-no-redirect | 15 +++++++++++ .../test-apt-update-failure-propagation | 21 ++++++++++----- test/integration/test-bug-738785-switch-protocol | 20 ++------------ .../test-different-methods-for-same-source | 15 ++--------- 5 files changed, 58 insertions(+), 44 deletions(-) diff --git a/apt-pkg/acquire-worker.cc b/apt-pkg/acquire-worker.cc index 1ee78d070..7a4f8177f 100644 --- a/apt-pkg/acquire-worker.cc +++ b/apt-pkg/acquire-worker.cc @@ -93,9 +93,22 @@ pkgAcquire::Worker::~Worker() bool pkgAcquire::Worker::Start() { // Get the method path - string Method = _config->FindDir("Dir::Bin::Methods") + Access; + constexpr char const * const methodsDir = "Dir::Bin::Methods"; + std::string const confItem = std::string(methodsDir) + "::" + Access; + std::string Method; + if (_config->Exists(confItem)) + Method = _config->FindFile(confItem.c_str()); + else + Method = _config->FindDir(methodsDir) + Access; if (FileExists(Method) == false) { + if (flNotDir(Method) == "false") + { + _error->Error(_("The method '%s' is explicitly disabled via configuration."), Access.c_str()); + if (Access == "http" || Access == "https") + _error->Notice(_("If you meant to use Tor remember to use %s instead of %s."), ("tor+" + Access).c_str(), Access.c_str()); + return false; + } _error->Error(_("The method driver %s could not be found."),Method.c_str()); std::string const A(Access.cbegin(), std::find(Access.cbegin(), Access.cend(), '+')); std::string pkg; @@ -103,9 +116,15 @@ bool pkgAcquire::Worker::Start() _error->Notice(_("Is the package %s installed?"), pkg.c_str()); return false; } + std::string const Calling = _config->FindDir(methodsDir) + Access; if (Debug == true) - clog << "Starting method '" << Method << '\'' << endl; + { + std::clog << "Starting method '" << Calling << "'"; + if (Calling != Method) + std::clog << " ( via " << Method << " )"; + std::clog << endl; + } // Create the pipes int Pipes[4] = {-1,-1,-1,-1}; @@ -130,11 +149,9 @@ bool pkgAcquire::Worker::Start() SetCloseExec(STDIN_FILENO,false); SetCloseExec(STDERR_FILENO,false); - const char *Args[2]; - Args[0] = Method.c_str(); - Args[1] = 0; - execv(Args[0],(char **)Args); - cerr << "Failed to exec method " << Args[0] << endl; + const char * const Args[] = { Calling.c_str(), nullptr }; + execv(Method.c_str() ,const_cast(Args)); + std::cerr << "Failed to exec method " << Calling << " ( via " << Method << ")" << endl; _exit(100); } diff --git a/test/integration/test-apt-https-no-redirect b/test/integration/test-apt-https-no-redirect index 69e2ba57f..d6c630d5f 100755 --- a/test/integration/test-apt-https-no-redirect +++ b/test/integration/test-apt-https-no-redirect @@ -14,6 +14,7 @@ echo 'alright' > aptarchive/working changetohttpswebserver webserverconfig 'aptwebserver::redirect::replace::/redirectme/' "http://localhost:${APTHTTPPORT}/" webserverconfig 'aptwebserver::redirect::replace::/redirectme2/' "https://localhost:${APTHTTPSPORT}/" +echo 'Dir::Bin::Methods::https+http "https";' > rootdir/etc/apt/apt.conf.d/99add-https-http-method msgtest 'download of a file works via' 'http' testsuccess --nomsg downloadfile "http://localhost:${APTHTTPPORT}/working" httpfile @@ -22,9 +23,23 @@ testfileequal httpfile 'alright' msgtest 'download of a file works via' 'https' testsuccess --nomsg downloadfile "https://localhost:${APTHTTPSPORT}/working" httpsfile testfileequal httpsfile 'alright' +rm -f httpfile httpsfile + +msgtest 'download of http file works via' 'https+http' +testsuccess --nomsg downloadfile "http://localhost:${APTHTTPPORT}/working" httpfile +testfileequal httpfile 'alright' + +msgtest 'download of https file works via' 'https+http' +testsuccess --nomsg downloadfile "https://localhost:${APTHTTPSPORT}/working" httpsfile +testfileequal httpsfile 'alright' +rm -f httpfile httpsfile msgtest 'download of a file does not work if' 'https redirected to http' testfailure --nomsg downloadfile "https://localhost:${APTHTTPSPORT}/redirectme/working" redirectfile msgtest 'libcurl has forbidden access in last request to' 'http resource' testsuccess --nomsg grep -q -E -- "Redirection from https to 'http://.*' is forbidden" rootdir/tmp/testfailure.output + +msgtest 'download of a file does work if' 'https+http redirected to https' +testsuccess --nomsg downloadfile "https+http://localhost:${APTHTTPPORT}/redirectme2/working" redirectfile +testfileequal redirectfile 'alright' diff --git a/test/integration/test-apt-update-failure-propagation b/test/integration/test-apt-update-failure-propagation index ec6bf4a48..1361b1b93 100755 --- a/test/integration/test-apt-update-failure-propagation +++ b/test/integration/test-apt-update-failure-propagation @@ -27,10 +27,11 @@ for FILE in rootdir/etc/apt/sources.list.d/*-sid-* ; do done pretest() { + msgmsg "$@" rm -rf rootdir/var/lib/apt/lists testsuccessequal 'N: Unable to locate package foo' aptcache policy foo } -pretest +pretest 'initialize test' 'update' testsuccess aptget update testsuccessequal "foo: Installed: (none) @@ -41,7 +42,7 @@ testsuccessequal "foo: 1 500 500 https://localhost:${APTHTTPSPORT} stable/main all Packages" aptcache policy foo -pretest +pretest 'not found' 'release files' mv aptarchive/dists/stable aptarchive/dists/stable.good testfailuremsg "E: The repository 'https://localhost:${APTHTTPSPORT} stable Release' does not have a Release file. N: Updating from such a repository can't be done securely, and is therefore disabled by default. @@ -76,7 +77,16 @@ posttest() { } posttest -pretest +pretest 'method disabled' 'https' +echo 'Dir::Bin::Methods::https "false";' > rootdir/etc/apt/apt.conf.d/99disable-https +testfailuremsg "E: The method 'https' is explicitly disabled via configuration. +N: If you meant to use Tor remember to use tor+https instead of https. +E: Failed to fetch https://localhost:${APTHTTPSPORT}/dists/stable/InRelease +E: Some index files failed to download. They have been ignored, or old ones used instead." aptget update +rm -f rootdir/etc/apt/apt.conf.d/99disable-https +posttest + +pretest 'method not installed' 'https' rm "${NEWMETHODS}/https" testfailuremsg "E: The method driver ${TMPWORKINGDIRECTORY}/rootdir/usr/lib/apt/methods/https could not be found. N: Is the package apt-transport-https installed? @@ -84,13 +94,12 @@ E: Failed to fetch https://localhost:${APTHTTPSPORT}/dists/stable/InRelease E: Some index files failed to download. They have been ignored, or old ones used instead." aptget update posttest -ln -s "$OLDMETHODS/https" "$NEWMETHODS" -pretest +pretest 'https connection refused' 'doom port' for FILE in rootdir/etc/apt/sources.list.d/*-stable-* ; do # lets see how many testservers run also Doom sed -i -e "s#:${APTHTTPSPORT}/#:666/#" "$FILE" done -testwarning aptget update +testwarning aptget update -o Dir::Bin::Methods::https="${OLDMETHODS}/https" testequalor2 "W: Failed to fetch https://localhost:666/dists/stable/InRelease Failed to connect to localhost port 666: Connection refused W: Some index files failed to download. They have been ignored, or old ones used instead." "W: Failed to fetch https://localhost:666/dists/stable/InRelease couldn't connect to host W: Some index files failed to download. They have been ignored, or old ones used instead." tail -n 2 rootdir/tmp/testwarning.output diff --git a/test/integration/test-bug-738785-switch-protocol b/test/integration/test-bug-738785-switch-protocol index fa6702eb0..690e8727e 100755 --- a/test/integration/test-bug-738785-switch-protocol +++ b/test/integration/test-bug-738785-switch-protocol @@ -38,28 +38,12 @@ cd - >/dev/null testsuccess aptget install apt -y testdpkginstalled 'apt' -# install a slowed down file: otherwise its to fast to reproduce combining -NEWMETHODS="$(readlink -f rootdir)/usr/lib/apt/methods" -OLDMETHODS="$(readlink -f rootdir/usr/lib/apt/methods)" -rm "$NEWMETHODS" -mkdir "$NEWMETHODS" -backupIFS="$IFS" -IFS="$(printf "\n\b")" -for METH in $(find "$OLDMETHODS" -maxdepth 1 ! -type d); do - ln -s "$OLDMETHODS/$(basename "$METH")" "$NEWMETHODS" -done -IFS="$backupIFS" -rm "$NEWMETHODS/https" - cd downloaded -testfailureequal "E: The method driver $(readlink -f './../')/rootdir/usr/lib/apt/methods/https could not be found. -N: Is the package apt-transport-https installed?" aptget download apt +testfailureequal "E: The method 'https' is explicitly disabled via configuration. +N: If you meant to use Tor remember to use tor+https instead of https." aptget download apt -o Dir::Bin::Methods::https=false testfailure test -e apt_1.0_all.deb cd - >/dev/null -# revert to all methods -ln -s "$OLDMETHODS/https" "$NEWMETHODS" - # check that downgrades from https to http are not allowed webserverconfig 'aptwebserver::support::http' 'true' sed -i -e "s#:${APTHTTPPORT}/redirectme#:${APTHTTPSPORT}/downgrademe#" -e 's# http:# https:#' rootdir/etc/apt/sources.list.d/* diff --git a/test/integration/test-different-methods-for-same-source b/test/integration/test-different-methods-for-same-source index 40138ad5d..791a84cb7 100755 --- a/test/integration/test-different-methods-for-same-source +++ b/test/integration/test-different-methods-for-same-source @@ -10,21 +10,10 @@ insertpackage 'stable' 'foo' 'all' '1' insertsource 'stable' 'foo' 'all' '1' setupaptarchive --no-update -# install a slowed down file: otherwise its to fast to reproduce combining -NEWMETHODS="$(readlink -f rootdir)/usr/lib/apt/methods" -OLDMETHODS="$(readlink -f rootdir/usr/lib/apt/methods)" -rm "$NEWMETHODS" -mkdir "$NEWMETHODS" -backupIFS="$IFS" -IFS="$(printf "\n\b")" -for METH in $(find "$OLDMETHODS" -maxdepth 1 ! -type d); do - ln -s "$OLDMETHODS/$(basename "$METH")" "$NEWMETHODS" -done -IFS="$backupIFS" -ln -s "${OLDMETHODS}/http" "${NEWMETHODS}/http-ng" - changetowebserver webserverconfig 'aptwebserver::redirect::replace::/redirectme/' "http://localhost:${APTHTTPPORT}/" + +echo 'Dir::Bin::Methods::http-ng "http";' > rootdir/etc/apt/apt.conf.d/99add-http-ng-method sed -i -e 's# http:# http-ng:#' $(find rootdir/etc/apt/sources.list.d -name '*-deb-src.list') testsuccess apt update -o Debug::Acquire::http-ng=1 -- cgit v1.2.3 From 8665dceb5cf2a197ae270b08066f05c8a2870223 Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Sat, 6 Aug 2016 13:53:05 +0200 Subject: block direct connections to .onion domains (RFC7687) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Doing a direct connect to an .onion address (if you don't happen to use it as a local domain, which you shouldn't) is bound to fail and does leak the information that you do use Tor and which hidden service you wanted to connect to to a DNS server. Worse, if the DNS is poisoned and actually resolves tricking a user into believing the setup would work correctly… This does block also the usage of wrappers like torsocks with apt, but with native support available and advertised in the error message this shouldn't really be an issue. Inspired-by: https://bugzilla.mozilla.org/show_bug.cgi?id=1228457 --- methods/connect.cc | 20 +++++++++++++++++++- test/integration/test-method-connect | 13 +++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) create mode 100755 test/integration/test-method-connect diff --git a/methods/connect.cc b/methods/connect.cc index f768169d1..c819c1dfb 100644 --- a/methods/connect.cc +++ b/methods/connect.cc @@ -61,10 +61,23 @@ void RotateDNS() LastUsed = LastHostAddr; } /*}}}*/ +static bool ConnectionAllowed(char const * const Service, std::string const &Host)/*{{{*/ +{ + if (APT::String::Endswith(Host, ".onion") && _config->FindB("Acquire::BlockDotOnion", true)) + { + // TRANSLATOR: %s is e.g. Tor's ".onion" which would likely fail or leak info (RFC7686) + _error->Error(_("Direct connection to %s domains is blocked by default."), ".onion"); + if (strcmp(Service, "http") == 0) + _error->Error(_("If you meant to use Tor remember to use %s instead of %s."), "tor+http", "http"); + return false; + } + return true; +} + /*}}}*/ // DoConnect - Attempt a connect operation /*{{{*/ // --------------------------------------------------------------------- /* This helper function attempts a connection to a single address. */ -static bool DoConnect(struct addrinfo *Addr,std::string Host, +static bool DoConnect(struct addrinfo *Addr,std::string const &Host, unsigned long TimeOut,int &Fd,pkgAcqMethod *Owner) { // Show a status indicator @@ -138,6 +151,8 @@ static bool ConnectToHostname(std::string const &Host, int const Port, const char * const Service, int DefPort, int &Fd, unsigned long const TimeOut, pkgAcqMethod * const Owner) { + if (ConnectionAllowed(Service, Host) == false) + return false; // Convert the port name/number char ServStr[300]; if (Port != 0) @@ -274,6 +289,9 @@ bool Connect(std::string Host,int Port,const char *Service, if (_error->PendingError() == true) return false; + if (ConnectionAllowed(Service, Host) == false) + return false; + if(LastHost != Host || LastPort != Port) { SrvRecords.clear(); diff --git a/test/integration/test-method-connect b/test/integration/test-method-connect new file mode 100755 index 000000000..b35f96dc3 --- /dev/null +++ b/test/integration/test-method-connect @@ -0,0 +1,13 @@ +#!/bin/sh +set -e + +TESTDIR="$(readlink -f "$(dirname "$0")")" +. "$TESTDIR/framework" + +setupenvironment + +cd downloaded +testfailureequal 'Err:1 http://vwakviie2ienjx6t.onion/ + Direct connection to .onion domains is blocked by default. If you meant to use Tor remember to use tor+http instead of http. +E: Failed to fetch http://vwakviie2ienjx6t.onion/ Direct connection to .onion domains is blocked by default. If you meant to use Tor remember to use tor+http instead of http. +E: Download Failed' apthelper download-file 'http://vwakviie2ienjx6t.onion/' ftp.debian.org.html -- cgit v1.2.3 From 0568d325ad8660a9966d552634aa17c90ed22516 Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Sat, 6 Aug 2016 22:54:31 +0200 Subject: http: auto-configure for local Tor proxy if called as 'tor' With apts http transport supporting socks5h proxies and all the work in terms of configuration of methods based on the name it is called with it becomes surprisingly easy to implement Tor support equally (and perhaps even a bit exceeding) what is available currently in apt-transport-tor. How this will turn out to be handled packaging wise we will see in https://lists.debian.org/deity/2016/08/msg00012.html , but until this is resolved we can add the needed support without actively enabling it for now, so that this can be tested better. --- methods/http.cc | 3 +++ methods/https.cc | 2 ++ methods/server.cc | 26 ++++++++++++++++++++++++++ methods/server.h | 3 +++ 4 files changed, 34 insertions(+) diff --git a/methods/http.cc b/methods/http.cc index 1ed2e3629..0358b50cd 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -357,6 +357,9 @@ bool HttpServerState::Open() Proxy = ""; } + if (Proxy.empty() == false) + Owner->AddProxyAuth(Proxy, ServerName); + if (Proxy.Access == "socks5h") { if (Connect(Proxy.Host, Proxy.Port, "socks", 1080, ServerFd, TimeOut, Owner) == false) diff --git a/methods/https.cc b/methods/https.cc index 47dce2ea0..283126f6b 100644 --- a/methods/https.cc +++ b/methods/https.cc @@ -213,6 +213,8 @@ bool HttpsMethod::SetupProxy() /*{{{*/ if (UseProxy.empty() == false) { Proxy = UseProxy; + AddProxyAuth(Proxy, ServerName); + if (Proxy.Access == "socks5h") curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5_HOSTNAME); else if (Proxy.Access == "socks5") diff --git a/methods/server.cc b/methods/server.cc index 7c85c8abb..0888617b1 100644 --- a/methods/server.cc +++ b/methods/server.cc @@ -794,3 +794,29 @@ ServerMethod::ServerMethod(std::string &&Binary, char const * const Ver,unsigned { } /*}}}*/ +bool ServerMethod::Configuration(std::string Message) /*{{{*/ +{ + if (aptMethod::Configuration(Message) == false) + return false; + + _config->CndSet("Acquire::tor::Proxy", + "socks5h://apt-transport-tor@localhost:9050"); + return true; +} + /*}}}*/ +bool ServerMethod::AddProxyAuth(URI &Proxy, URI const &Server) const /*{{{*/ +{ + if (std::find(methodNames.begin(), methodNames.end(), "tor") != methodNames.end() && + Proxy.User == "apt-transport-tor" && Proxy.Password.empty()) + { + std::string pass = Server.Host; + pass.erase(std::remove_if(pass.begin(), pass.end(), [](char const c) { return std::isalnum(c) == 0; }), pass.end()); + if (pass.length() > 255) + Proxy.Password = pass.substr(0, 255); + else + Proxy.Password = std::move(pass); + } + // FIXME: should we support auth.conf for proxies? + return true; +} + /*}}}*/ diff --git a/methods/server.h b/methods/server.h index f6a635dca..1d114354f 100644 --- a/methods/server.h +++ b/methods/server.h @@ -156,6 +156,9 @@ class ServerMethod : public aptMethod virtual void SendReq(FetchItem *Itm) = 0; virtual std::unique_ptr CreateServerState(URI const &uri) = 0; virtual void RotateDNS() = 0; + virtual bool Configuration(std::string Message) APT_OVERRIDE; + + bool AddProxyAuth(URI &Proxy, URI const &Server) const; ServerMethod(std::string &&Binary, char const * const Ver,unsigned long const Flags); virtual ~ServerMethod() {}; -- cgit v1.2.3