From c6a428e4d17b408c2701def5daa46ca950948980 Mon Sep 17 00:00:00 2001 From: Julian Andres Klode Date: Fri, 30 Jun 2017 16:33:09 +0200 Subject: Switch to 'http' as the default https method The old curl based method is still available as 'curl', 'curl+http', and 'curl+https'. --- .travis.yml | 2 +- CMakeLists.txt | 1 + debian/NEWS | 9 + debian/apt-transport-https.install | 4 +- debian/control | 13 +- debian/rules | 2 +- methods/CMakeLists.txt | 17 +- methods/curl.cc | 549 +++++++++++++++++++++ methods/curl.h | 88 ++++ methods/https.cc | 549 --------------------- methods/https.h | 88 ---- po/CMakeLists.txt | 2 +- .../test-apt-update-failure-propagation | 2 +- 13 files changed, 675 insertions(+), 651 deletions(-) create mode 100644 methods/curl.cc create mode 100644 methods/curl.h delete mode 100644 methods/https.cc delete mode 100644 methods/https.h diff --git a/.travis.yml b/.travis.yml index b3c4fb806..1f461422b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,8 +5,8 @@ services: - docker env: - USER=travis CMAKE_FLAGS= - - USER=travis CMAKE_FLAGS="-DWITH_DOC=OFF -DWITH_CURL=OFF" - USER=root CMAKE_FLAGS=-DWITH_DOC=OFF + - USER=travis CMAKE_FLAGS="-DWITH_DOC=OFF -DFORCE_CURL=ON" install: - sed -i -e "s#1000#$(id -u)#g" Dockerfile - docker build --tag=apt-ci . diff --git a/CMakeLists.txt b/CMakeLists.txt index bc15851ce..8bc52036b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -79,6 +79,7 @@ endif() # apt-transport-https dependencies option(WITH_CURL "Build curl-based methods" ON) if (WITH_CURL) + option(FORCE_CURL "Use curl-based methods" OFF) find_package(CURL REQUIRED) if (CURL_FOUND) set(HAVE_CURL 1) diff --git a/debian/NEWS b/debian/NEWS index 5722ca0c5..4afb403ca 100644 --- a/debian/NEWS +++ b/debian/NEWS @@ -1,3 +1,12 @@ +apt (1.5~alpha4) UNRELEASED; urgency=medium + + The apt package now installs the http method for https access, the + apt-transport-https package is deprecated, and installs a curl method + now that can be used as a fallback with curl+https URLs, or by setting + Dir::Bin::Methods::https to "curl". + + -- Julian Andres Klode Fri, 30 Jun 2017 15:26:44 +0200 + apt (1.5~alpha1) experimental; urgency=medium [ Changes to unauthenticated repositories ] diff --git a/debian/apt-transport-https.install b/debian/apt-transport-https.install index 7b14886df..4849f1484 100644 --- a/debian/apt-transport-https.install +++ b/debian/apt-transport-https.install @@ -1 +1,3 @@ -usr/lib/apt/methods/https +usr/lib/apt/methods/curl +usr/lib/apt/methods/curl+http +usr/lib/apt/methods/curl+https diff --git a/debian/control b/debian/control index 4f9043d22..896f98783 100644 --- a/debian/control +++ b/debian/control @@ -36,8 +36,9 @@ Depends: adduser, ${apt:keyring}, ${misc:Depends}, ${shlibs:Depends} -Replaces: apt-utils (<< 1.3~exp2~) -Breaks: apt-utils (<< 1.3~exp2~) +Replaces: apt-utils (<< 1.3~exp2~), apt-transport-https (<< 1.5~alpha4~) +Breaks: apt-utils (<< 1.3~exp2~), apt-transport-https (<< 1.5~alpha4~) +Provides: apt-transport-https (= ${binary:Version}) Recommends: ca-certificates Suggests: apt-doc, aptitude | synaptic | wajig, @@ -148,10 +149,10 @@ Description: package management related utility programs Package: apt-transport-https Architecture: any -Depends: ${misc:Depends}, ${shlibs:Depends} +Depends: ${misc:Depends}, ${shlibs:Depends}, apt (>= 1.5~alpha4~) Recommends: ca-certificates Priority: optional -Description: https download transport for APT +Description: Transitional package: curl-https download transport for APT This package enables the usage of 'deb https://foo distro main' lines in the /etc/apt/sources.list so that all package managers using the libapt-pkg library can access metadata and packages available in sources @@ -159,3 +160,7 @@ Description: https download transport for APT . This transport supports server as well as client authentication with certificates. + . + This package is no longer needed, https support was integrated into apt, + and is only provided as a fallback if some bugs are found in apt's native + https support. diff --git a/debian/rules b/debian/rules index ede9591d1..d4e9600a2 100755 --- a/debian/rules +++ b/debian/rules @@ -22,7 +22,7 @@ override_dh_install-indep: override_dh_install-arch: dh_install -papt-utils -X/dump - dh_install -papt -Xmethods/https + dh_install -papt -Xmethods/curl -Xmethods/curl+https -Xmethods/curl+http dh_install --remaining --list-missing install -m 644 debian/apt.conf.autoremove debian/apt/etc/apt/apt.conf.d/01autoremove install -m 755 debian/apt.auto-removal.sh debian/apt/etc/kernel/postinst.d/apt-auto-removal diff --git a/methods/CMakeLists.txt b/methods/CMakeLists.txt index 9f01ec506..3ae3f9963 100644 --- a/methods/CMakeLists.txt +++ b/methods/CMakeLists.txt @@ -7,7 +7,7 @@ add_executable(cdrom cdrom.cc) add_executable(http http.cc http_main.cc rfc2553emu.cc connect.cc basehttp.cc) add_executable(mirror mirror.cc http.cc rfc2553emu.cc connect.cc basehttp.cc) if (HAVE_CURL) - add_executable(https https.cc basehttp.cc) + add_executable(curl curl.cc basehttp.cc) endif() add_executable(ftp ftp.cc rfc2553emu.cc connect.cc) add_executable(rred rred.cc) @@ -16,7 +16,7 @@ add_executable(rsh rsh.cc) target_compile_definitions(http PRIVATE ${GNUTLS_DEFINITIONS}) target_include_directories(http PRIVATE ${GNUTLS_INCLUDE_DIR}) if (HAVE_CURL) -target_include_directories(https PRIVATE ${CURL_INCLUDE_DIRS}) +target_include_directories(curl PRIVATE ${CURL_INCLUDE_DIRS}) endif() # Link the executables against the libraries @@ -28,7 +28,7 @@ target_link_libraries(cdrom apt-pkg) target_link_libraries(http apt-pkg ${GNUTLS_LIBRARIES}) target_link_libraries(mirror apt-pkg ${RESOLV_LIBRARIES} ${GNUTLS_LIBRARIES}) if (HAVE_CURL) - target_link_libraries(https apt-pkg ${CURL_LIBRARIES}) + target_link_libraries(curl apt-pkg ${CURL_LIBRARIES}) endif() target_link_libraries(ftp apt-pkg ${GNUTLS_LIBRARIES}) target_link_libraries(rred apt-pkg) @@ -40,8 +40,15 @@ install(TARGETS file copy store gpgv cdrom http ftp rred rsh mirror add_slaves(${CMAKE_INSTALL_LIBEXECDIR}/apt/methods store gzip lzma bzip2 xz) add_slaves(${CMAKE_INSTALL_LIBEXECDIR}/apt/methods rsh ssh) -if (HAVE_CURL) - install(TARGETS https RUNTIME DESTINATION ${CMAKE_INSTALL_LIBEXECDIR}/apt/methods) + +set(curl_slaves curl+https curl+http) + +if (FORCE_CURL) + set(curl_slaves ${curl_slaves} https) else() add_slaves(${CMAKE_INSTALL_LIBEXECDIR}/apt/methods http https) endif() +if (HAVE_CURL) + install(TARGETS curl RUNTIME DESTINATION ${CMAKE_INSTALL_LIBEXECDIR}/apt/methods) + add_slaves(${CMAKE_INSTALL_LIBEXECDIR}/apt/methods curl ${curl_slaves}) +endif() diff --git a/methods/curl.cc b/methods/curl.cc new file mode 100644 index 000000000..ac3f77ab6 --- /dev/null +++ b/methods/curl.cc @@ -0,0 +1,549 @@ +//-*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: http.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $ +/* ###################################################################### + + HTTPS Acquire Method - This is the HTTPS acquire method for APT. + + It uses libcurl + + ##################################################################### */ + /*}}}*/ +// Include Files /*{{{*/ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +#include "curl.h" + +#include + /*}}}*/ +using namespace std; + +struct APT_HIDDEN CURLUserPointer { + HttpsMethod * const https; + HttpsMethod::FetchResult * const Res; + HttpsMethod::FetchItem const * const Itm; + RequestState * const Req; + CURLUserPointer(HttpsMethod * const https, HttpsMethod::FetchResult * const Res, + HttpsMethod::FetchItem const * const Itm, RequestState * const Req) : https(https), Res(Res), Itm(Itm), Req(Req) {} +}; + +size_t +HttpsMethod::parse_header(void *buffer, size_t size, size_t nmemb, void *userp) +{ + size_t len = size * nmemb; + CURLUserPointer *me = static_cast(userp); + std::string line((char*) buffer, len); + for (--len; len > 0; --len) + if (isspace_ascii(line[len]) == 0) + { + ++len; + break; + } + line.erase(len); + + if (line.empty() == true) + { + if (me->Req->File.Open(me->Itm->DestFile, FileFd::WriteAny) == false) + return ERROR_NOT_FROM_SERVER; + + me->Req->JunkSize = 0; + if (me->Req->Result != 416 && me->Req->StartPos != 0) + ; + else if (me->Req->Result == 416) + { + bool partialHit = false; + if (me->Itm->ExpectedHashes.usable() == true) + { + Hashes resultHashes(me->Itm->ExpectedHashes); + FileFd file(me->Itm->DestFile, FileFd::ReadOnly); + me->Req->TotalFileSize = file.FileSize(); + me->Req->Date = file.ModificationTime(); + resultHashes.AddFD(file); + HashStringList const hashList = resultHashes.GetHashStringList(); + partialHit = (me->Itm->ExpectedHashes == hashList); + } + else if (me->Req->Result == 416 && me->Req->TotalFileSize == me->Req->File.FileSize()) + partialHit = true; + + if (partialHit == true) + { + me->Req->Result = 200; + me->Req->StartPos = me->Req->TotalFileSize; + // the actual size is not important for https as curl will deal with it + // by itself and e.g. doesn't bother us with transport-encoding… + me->Req->JunkSize = std::numeric_limits::max(); + } + else + me->Req->StartPos = 0; + } + else + me->Req->StartPos = 0; + + me->Res->LastModified = me->Req->Date; + me->Res->Size = me->Req->TotalFileSize; + me->Res->ResumePoint = me->Req->StartPos; + + // we expect valid data, so tell our caller we get the file now + if (me->Req->Result >= 200 && me->Req->Result < 300) + { + if (me->Res->Size != 0 && me->Res->Size > me->Res->ResumePoint) + me->https->URIStart(*me->Res); + if (me->Req->AddPartialFileToHashes(me->Req->File) == false) + return 0; + } + else + me->Req->JunkSize = std::numeric_limitsReq->JunkSize)>::max(); + } + else if (me->Req->HeaderLine(line) == false) + return 0; + + return size*nmemb; +} + +size_t +HttpsMethod::write_data(void *buffer, size_t size, size_t nmemb, void *userp) +{ + CURLUserPointer *me = static_cast(userp); + size_t buffer_size = size * nmemb; + // we don't need to count the junk here, just drop anything we get as + // we don't always know how long it would be, e.g. in chunked encoding. + if (me->Req->JunkSize != 0) + return buffer_size; + + if(me->Req->File.Write(buffer, buffer_size) != true) + return 0; + + if(me->https->Queue->MaximumSize > 0) + { + unsigned long long const TotalWritten = me->Req->File.Tell(); + if (TotalWritten > me->https->Queue->MaximumSize) + { + me->https->SetFailReason("MaximumSizeExceeded"); + _error->Error("Writing more data than expected (%llu > %llu)", + TotalWritten, me->https->Queue->MaximumSize); + return 0; + } + } + + if (me->https->Server->GetHashes()->Add((unsigned char const * const)buffer, buffer_size) == false) + return 0; + + return buffer_size; +} + +// HttpsServerState::HttpsServerState - Constructor /*{{{*/ +HttpsServerState::HttpsServerState(URI Srv,HttpsMethod * Owner) : ServerState(Srv, Owner), Hash(NULL) +{ + TimeOut = Owner->ConfigFindI("Timeout", TimeOut); + Reset(); +} + /*}}}*/ +bool HttpsServerState::InitHashes(HashStringList const &ExpectedHashes) /*{{{*/ +{ + delete Hash; + Hash = new Hashes(ExpectedHashes); + return true; +} + /*}}}*/ +APT_PURE Hashes * HttpsServerState::GetHashes() /*{{{*/ +{ + return Hash; +} + /*}}}*/ + +bool HttpsMethod::SetupProxy() /*{{{*/ +{ + URI ServerName = Queue->Uri; + + // Determine the proxy setting + AutoDetectProxy(ServerName); + + // Curl should never read proxy settings from the environment, as + // we determine which proxy to use. Do this for consistency among + // methods and prevent an environment variable overriding a + // no-proxy ("DIRECT") setting in apt.conf. + curl_easy_setopt(curl, CURLOPT_PROXY, ""); + + // Determine the proxy setting - try https first, fallback to http and use env at last + string UseProxy = ConfigFind("Proxy::" + ServerName.Host, ""); + if (UseProxy.empty() == true) + UseProxy = ConfigFind("Proxy", ""); + // User wants to use NO proxy, so nothing to setup + if (UseProxy == "DIRECT") + return true; + + // Parse no_proxy, a comma (,) separated list of domains we don't want to use + // a proxy for so we stop right here if it is in the list + if (getenv("no_proxy") != 0 && CheckDomainList(ServerName.Host,getenv("no_proxy")) == true) + return true; + + if (UseProxy.empty() == true) + { + const char* result = nullptr; + if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end()) + result = getenv("https_proxy"); + // FIXME: Fall back to http_proxy is to remain compatible with + // existing setups and behaviour of apt.conf. This should be + // deprecated in the future (including apt.conf). Most other + // programs do not fall back to http proxy settings and neither + // should Apt. + if (result == nullptr && std::find(methodNames.begin(), methodNames.end(), "http") != methodNames.end()) + result = getenv("http_proxy"); + UseProxy = result == nullptr ? "" : result; + } + + // Determine what host and port to use based on the proxy settings + if (UseProxy.empty() == false) + { + Proxy = UseProxy; + AddProxyAuth(Proxy, ServerName); + + if (Proxy.Access == "socks5h") + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5_HOSTNAME); + else if (Proxy.Access == "socks5") + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5); + else if (Proxy.Access == "socks4a") + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4A); + else if (Proxy.Access == "socks") + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); + else if (Proxy.Access == "http" || Proxy.Access == "https") + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_HTTP); + else + return false; + + if (Proxy.Port != 1) + curl_easy_setopt(curl, CURLOPT_PROXYPORT, Proxy.Port); + curl_easy_setopt(curl, CURLOPT_PROXY, Proxy.Host.c_str()); + if (Proxy.User.empty() == false || Proxy.Password.empty() == false) + { + curl_easy_setopt(curl, CURLOPT_PROXYUSERNAME, Proxy.User.c_str()); + curl_easy_setopt(curl, CURLOPT_PROXYPASSWORD, Proxy.Password.c_str()); + } + } + return true; +} /*}}}*/ +// HttpsMethod::Fetch - Fetch an item /*{{{*/ +// --------------------------------------------------------------------- +/* This adds an item to the pipeline. We keep the pipeline at a fixed + depth. */ +bool HttpsMethod::Fetch(FetchItem *Itm) +{ + struct stat SBuf; + struct curl_slist *headers=NULL; + char curl_errorstr[CURL_ERROR_SIZE]; + URI Uri = Itm->Uri; + setPostfixForMethodNames(Uri.Host.c_str()); + AllowRedirect = ConfigFindB("AllowRedirect", true); + Debug = DebugEnabled(); + + // TODO: + // - http::Pipeline-Depth + // - error checking/reporting + // - more debug options? (CURLOPT_DEBUGFUNCTION?) + { + auto const plus = Binary.find('+'); + if (plus != std::string::npos) + Uri.Access = Binary.substr(plus + 1); + } + + curl_easy_reset(curl); + if (SetupProxy() == false) + return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str()); + + maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc")); + if (Server == nullptr || Server->Comp(Itm->Uri) == false) + Server = CreateServerState(Itm->Uri); + + // The "+" is encoded as a workaround for a amazon S3 bug + // see LP bugs #1003633 and #1086997. (taken from http method) + Uri.Path = QuoteString(Uri.Path, "+~ "); + + FetchResult Res; + RequestState Req(this, Server.get()); + CURLUserPointer userp(this, &Res, Itm, &Req); + // callbacks + curl_easy_setopt(curl, CURLOPT_URL, static_cast(Uri).c_str()); + curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, parse_header); + curl_easy_setopt(curl, CURLOPT_WRITEHEADER, &userp); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &userp); + // options + curl_easy_setopt(curl, CURLOPT_NOPROGRESS, true); + curl_easy_setopt(curl, CURLOPT_FILETIME, true); + curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 0); + + if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end()) + { + curl_easy_setopt(curl, CURLOPT_PROTOCOLS, CURLPROTO_HTTPS); + curl_easy_setopt(curl, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTPS); + + // File containing the list of trusted CA. + std::string const cainfo = ConfigFind("CaInfo", ""); + if(cainfo.empty() == false) + curl_easy_setopt(curl, CURLOPT_CAINFO, cainfo.c_str()); + // Check server certificate against previous CA list ... + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, ConfigFindB("Verify-Peer", true) ? 1 : 0); + // ... and hostname against cert CN or subjectAltName + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, ConfigFindB("Verify-Host", true) ? 2 : 0); + // Also enforce issuer of server certificate using its cert + std::string const issuercert = ConfigFind("IssuerCert", ""); + if(issuercert.empty() == false) + curl_easy_setopt(curl, CURLOPT_ISSUERCERT, issuercert.c_str()); + // For client authentication, certificate file ... + std::string const pem = ConfigFind("SslCert", ""); + if(pem.empty() == false) + curl_easy_setopt(curl, CURLOPT_SSLCERT, pem.c_str()); + // ... and associated key. + std::string const key = ConfigFind("SslKey", ""); + if(key.empty() == false) + curl_easy_setopt(curl, CURLOPT_SSLKEY, key.c_str()); + // Allow forcing SSL version to SSLv3 or TLSv1 + long final_version = CURL_SSLVERSION_DEFAULT; + std::string const sslversion = ConfigFind("SslForceVersion", ""); + if(sslversion == "TLSv1") + final_version = CURL_SSLVERSION_TLSv1; + else if(sslversion == "TLSv1.0") + final_version = CURL_SSLVERSION_TLSv1_0; + else if(sslversion == "TLSv1.1") + final_version = CURL_SSLVERSION_TLSv1_1; + else if(sslversion == "TLSv1.2") + final_version = CURL_SSLVERSION_TLSv1_2; + else if(sslversion == "SSLv3") + final_version = CURL_SSLVERSION_SSLv3; + curl_easy_setopt(curl, CURLOPT_SSLVERSION, final_version); + // CRL file + std::string const crlfile = ConfigFind("CrlFile", ""); + if(crlfile.empty() == false) + curl_easy_setopt(curl, CURLOPT_CRLFILE, crlfile.c_str()); + } + else + { + curl_easy_setopt(curl, CURLOPT_PROTOCOLS, CURLPROTO_HTTP); + curl_easy_setopt(curl, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTP); + } + // cache-control + if(ConfigFindB("No-Cache", false) == false) + { + // cache enabled + if (ConfigFindB("No-Store", false) == true) + headers = curl_slist_append(headers,"Cache-Control: no-store"); + std::string ss; + strprintf(ss, "Cache-Control: max-age=%u", ConfigFindI("Max-Age", 0)); + headers = curl_slist_append(headers, ss.c_str()); + } else { + // cache disabled by user + headers = curl_slist_append(headers, "Cache-Control: no-cache"); + headers = curl_slist_append(headers, "Pragma: no-cache"); + } + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + // speed limit + int const dlLimit = ConfigFindI("Dl-Limit", 0) * 1024; + if (dlLimit > 0) + curl_easy_setopt(curl, CURLOPT_MAX_RECV_SPEED_LARGE, dlLimit); + + // set header + curl_easy_setopt(curl, CURLOPT_USERAGENT, ConfigFind("User-Agent", "Debian APT-CURL/1.0 (" PACKAGE_VERSION ")").c_str()); + + // set timeout + int const timeout = ConfigFindI("Timeout", 120); + curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, timeout); + //set really low lowspeed timeout (see #497983) + curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, DL_MIN_SPEED); + curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, timeout); + + if(_config->FindB("Acquire::ForceIPv4", false) == true) + curl_easy_setopt(curl, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4); + else if(_config->FindB("Acquire::ForceIPv6", false) == true) + curl_easy_setopt(curl, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V6); + + // debug + if (Debug == true) + curl_easy_setopt(curl, CURLOPT_VERBOSE, true); + + // error handling + curl_errorstr[0] = '\0'; + curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_errorstr); + + // If we ask for uncompressed files servers might respond with content- + // negotiation which lets us end up with compressed files we do not support, + // see 657029, 657560 and co, so if we have no extension on the request + // ask for text only. As a sidenote: If there is nothing to negotate servers + // seem to be nice and ignore it. + if (ConfigFindB("SendAccept", true)) + { + size_t const filepos = Itm->Uri.find_last_of('/'); + string const file = Itm->Uri.substr(filepos + 1); + if (flExtension(file) == file) + headers = curl_slist_append(headers, "Accept: text/*"); + } + + // if we have the file send an if-range query with a range header + if (Server->RangesAllowed && stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) + { + std::string Buf; + strprintf(Buf, "Range: bytes=%lli-", (long long) SBuf.st_size); + headers = curl_slist_append(headers, Buf.c_str()); + strprintf(Buf, "If-Range: %s", TimeRFC1123(SBuf.st_mtime, false).c_str()); + headers = curl_slist_append(headers, Buf.c_str()); + } + else if(Itm->LastModified > 0) + { + curl_easy_setopt(curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_IFMODSINCE); + curl_easy_setopt(curl, CURLOPT_TIMEVALUE, Itm->LastModified); + } + + if (Server->InitHashes(Itm->ExpectedHashes) == false) + return false; + + // keep apt updated + Res.Filename = Itm->DestFile; + + // get it! + CURLcode success = curl_easy_perform(curl); + + // If the server returns 200 OK but the If-Modified-Since condition is not + // met, CURLINFO_CONDITION_UNMET will be set to 1 + long curl_condition_unmet = 0; + curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &curl_condition_unmet); + if (curl_condition_unmet == 1) + Req.Result = 304; + + Req.File.Close(); + curl_slist_free_all(headers); + + // cleanup + if (success != CURLE_OK) + { +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wswitch" + switch (success) + { + case CURLE_COULDNT_RESOLVE_PROXY: + case CURLE_COULDNT_RESOLVE_HOST: + SetFailReason("ResolveFailure"); + break; + case CURLE_COULDNT_CONNECT: + SetFailReason("ConnectionRefused"); + break; + case CURLE_OPERATION_TIMEDOUT: + SetFailReason("Timeout"); + break; + } +#pragma GCC diagnostic pop + // only take curls technical errors if we haven't our own + // (e.g. for the maximum size limit we have and curls can be confusing) + if (_error->PendingError() == false) + _error->Error("%s", curl_errorstr); + else + _error->Warning("curl: %s", curl_errorstr); + return false; + } + + switch (DealWithHeaders(Res, Req)) + { + case BaseHttpMethod::IMS_HIT: + URIDone(Res); + break; + + case BaseHttpMethod::ERROR_WITH_CONTENT_PAGE: + // unlink, no need keep 401/404 page content in partial/ + RemoveFile(Binary.c_str(), Req.File.Name()); + // Fall through. + case BaseHttpMethod::ERROR_UNRECOVERABLE: + case BaseHttpMethod::ERROR_NOT_FROM_SERVER: + return false; + + case BaseHttpMethod::TRY_AGAIN_OR_REDIRECT: + Redirect(NextURI); + break; + + case BaseHttpMethod::FILE_IS_OPEN: + struct stat resultStat; + if (unlikely(stat(Req.File.Name().c_str(), &resultStat) != 0)) + { + _error->Errno("stat", "Unable to access file %s", Req.File.Name().c_str()); + return false; + } + Res.Size = resultStat.st_size; + + // Timestamp + curl_easy_getinfo(curl, CURLINFO_FILETIME, &Res.LastModified); + if (Res.LastModified != -1) + { + struct timeval times[2]; + times[0].tv_sec = Res.LastModified; + times[1].tv_sec = Res.LastModified; + times[0].tv_usec = times[1].tv_usec = 0; + utimes(Req.File.Name().c_str(), times); + } + else + Res.LastModified = resultStat.st_mtime; + + // take hashes + Res.TakeHashes(*(Server->GetHashes())); + + // keep apt updated + URIDone(Res); + break; + } + return true; +} + /*}}}*/ +std::unique_ptr HttpsMethod::CreateServerState(URI const &uri)/*{{{*/ +{ + return std::unique_ptr(new HttpsServerState(uri, this)); +} + /*}}}*/ +HttpsMethod::HttpsMethod(std::string &&pProg) : BaseHttpMethod(std::move(pProg),"1.2",Pipeline | SendConfig)/*{{{*/ +{ + auto addName = std::inserter(methodNames, methodNames.begin()); + addName = "http"; + auto const plus = Binary.find('+'); + if (plus != std::string::npos) + { + addName = Binary.substr(plus + 1); + auto base = Binary.substr(0, plus); + if (base != "https") + addName = base; + } + if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end()) + curl_global_init(CURL_GLOBAL_SSL); + else + curl_global_init(CURL_GLOBAL_NOTHING); + curl = curl_easy_init(); +} + /*}}}*/ +HttpsMethod::~HttpsMethod() /*{{{*/ +{ + curl_easy_cleanup(curl); +} + /*}}}*/ +int main(int, const char *argv[]) /*{{{*/ +{ + std::string Binary = flNotDir(argv[0]); + if (Binary.find('+') == std::string::npos && Binary != "https") + Binary.append("+https"); + return HttpsMethod(std::move(Binary)).Run(); +} + /*}}}*/ diff --git a/methods/curl.h b/methods/curl.h new file mode 100644 index 000000000..fbbf34501 --- /dev/null +++ b/methods/curl.h @@ -0,0 +1,88 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $ +// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $ +/* ###################################################################### + + HTTP Acquire Method - This is the HTTP acquire method for APT. + + ##################################################################### */ + /*}}}*/ + +#ifndef APT_HTTPS_H +#define APT_HTTPS_H + +#include +#include +#include +#include +#include + +#include "basehttp.h" + +using std::cout; +using std::endl; + +class Hashes; +class HttpsMethod; +class FileFd; + +class HttpsServerState : public ServerState +{ + Hashes * Hash; + + protected: + virtual bool ReadHeaderLines(std::string &/*Data*/) APT_OVERRIDE { return false; } + virtual bool LoadNextResponse(bool const /*ToFile*/, RequestState &/*Req*/) APT_OVERRIDE { return false; } + + public: + virtual bool WriteResponse(std::string const &/*Data*/) APT_OVERRIDE { return false; } + + /** \brief Transfer the data from the socket */ + virtual bool RunData(RequestState &) APT_OVERRIDE { return false; } + virtual bool RunDataToDevNull(RequestState &) APT_OVERRIDE { return false; } + + virtual bool Open() APT_OVERRIDE { return false; } + virtual bool IsOpen() APT_OVERRIDE { return false; } + virtual bool Close() APT_OVERRIDE { return false; } + virtual bool InitHashes(HashStringList const &ExpectedHashes) APT_OVERRIDE; + virtual Hashes * GetHashes() APT_OVERRIDE; + virtual bool Die(RequestState &/*Req*/) APT_OVERRIDE { return false; } + virtual bool Flush(FileFd * const /*File*/) APT_OVERRIDE { return false; } + virtual bool Go(bool /*ToFile*/, RequestState &/*Req*/) APT_OVERRIDE { return false; } + + HttpsServerState(URI Srv, HttpsMethod *Owner); + virtual ~HttpsServerState() {Close();}; +}; + +class HttpsMethod : public BaseHttpMethod +{ + // minimum speed in bytes/se that triggers download timeout handling + static const int DL_MIN_SPEED = 10; + + virtual bool Fetch(FetchItem *) APT_OVERRIDE; + + static size_t parse_header(void *buffer, size_t size, size_t nmemb, void *userp); + static size_t write_data(void *buffer, size_t size, size_t nmemb, void *userp); + static int progress_callback(void *clientp, double dltotal, double dlnow, + double ultotal, double ulnow); + bool SetupProxy(); + CURL *curl; + + // Used by BaseHttpMethods unused by https + virtual void SendReq(FetchItem *) APT_OVERRIDE { exit(42); } + virtual void RotateDNS() APT_OVERRIDE { exit(42); } + + public: + + virtual std::unique_ptr CreateServerState(URI const &uri) APT_OVERRIDE; + using pkgAcqMethod::FetchResult; + using pkgAcqMethod::FetchItem; + + explicit HttpsMethod(std::string &&pProg); + virtual ~HttpsMethod(); +}; + +#include +URI Proxy; + +#endif diff --git a/methods/https.cc b/methods/https.cc deleted file mode 100644 index 80ce048ca..000000000 --- a/methods/https.cc +++ /dev/null @@ -1,549 +0,0 @@ -//-*- mode: cpp; mode: fold -*- -// Description /*{{{*/ -// $Id: http.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $ -/* ###################################################################### - - HTTPS Acquire Method - This is the HTTPS acquire method for APT. - - It uses libcurl - - ##################################################################### */ - /*}}}*/ -// Include Files /*{{{*/ -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - - -#include "https.h" - -#include - /*}}}*/ -using namespace std; - -struct APT_HIDDEN CURLUserPointer { - HttpsMethod * const https; - HttpsMethod::FetchResult * const Res; - HttpsMethod::FetchItem const * const Itm; - RequestState * const Req; - CURLUserPointer(HttpsMethod * const https, HttpsMethod::FetchResult * const Res, - HttpsMethod::FetchItem const * const Itm, RequestState * const Req) : https(https), Res(Res), Itm(Itm), Req(Req) {} -}; - -size_t -HttpsMethod::parse_header(void *buffer, size_t size, size_t nmemb, void *userp) -{ - size_t len = size * nmemb; - CURLUserPointer *me = static_cast(userp); - std::string line((char*) buffer, len); - for (--len; len > 0; --len) - if (isspace_ascii(line[len]) == 0) - { - ++len; - break; - } - line.erase(len); - - if (line.empty() == true) - { - if (me->Req->File.Open(me->Itm->DestFile, FileFd::WriteAny) == false) - return ERROR_NOT_FROM_SERVER; - - me->Req->JunkSize = 0; - if (me->Req->Result != 416 && me->Req->StartPos != 0) - ; - else if (me->Req->Result == 416) - { - bool partialHit = false; - if (me->Itm->ExpectedHashes.usable() == true) - { - Hashes resultHashes(me->Itm->ExpectedHashes); - FileFd file(me->Itm->DestFile, FileFd::ReadOnly); - me->Req->TotalFileSize = file.FileSize(); - me->Req->Date = file.ModificationTime(); - resultHashes.AddFD(file); - HashStringList const hashList = resultHashes.GetHashStringList(); - partialHit = (me->Itm->ExpectedHashes == hashList); - } - else if (me->Req->Result == 416 && me->Req->TotalFileSize == me->Req->File.FileSize()) - partialHit = true; - - if (partialHit == true) - { - me->Req->Result = 200; - me->Req->StartPos = me->Req->TotalFileSize; - // the actual size is not important for https as curl will deal with it - // by itself and e.g. doesn't bother us with transport-encoding… - me->Req->JunkSize = std::numeric_limits::max(); - } - else - me->Req->StartPos = 0; - } - else - me->Req->StartPos = 0; - - me->Res->LastModified = me->Req->Date; - me->Res->Size = me->Req->TotalFileSize; - me->Res->ResumePoint = me->Req->StartPos; - - // we expect valid data, so tell our caller we get the file now - if (me->Req->Result >= 200 && me->Req->Result < 300) - { - if (me->Res->Size != 0 && me->Res->Size > me->Res->ResumePoint) - me->https->URIStart(*me->Res); - if (me->Req->AddPartialFileToHashes(me->Req->File) == false) - return 0; - } - else - me->Req->JunkSize = std::numeric_limitsReq->JunkSize)>::max(); - } - else if (me->Req->HeaderLine(line) == false) - return 0; - - return size*nmemb; -} - -size_t -HttpsMethod::write_data(void *buffer, size_t size, size_t nmemb, void *userp) -{ - CURLUserPointer *me = static_cast(userp); - size_t buffer_size = size * nmemb; - // we don't need to count the junk here, just drop anything we get as - // we don't always know how long it would be, e.g. in chunked encoding. - if (me->Req->JunkSize != 0) - return buffer_size; - - if(me->Req->File.Write(buffer, buffer_size) != true) - return 0; - - if(me->https->Queue->MaximumSize > 0) - { - unsigned long long const TotalWritten = me->Req->File.Tell(); - if (TotalWritten > me->https->Queue->MaximumSize) - { - me->https->SetFailReason("MaximumSizeExceeded"); - _error->Error("Writing more data than expected (%llu > %llu)", - TotalWritten, me->https->Queue->MaximumSize); - return 0; - } - } - - if (me->https->Server->GetHashes()->Add((unsigned char const * const)buffer, buffer_size) == false) - return 0; - - return buffer_size; -} - -// HttpsServerState::HttpsServerState - Constructor /*{{{*/ -HttpsServerState::HttpsServerState(URI Srv,HttpsMethod * Owner) : ServerState(Srv, Owner), Hash(NULL) -{ - TimeOut = Owner->ConfigFindI("Timeout", TimeOut); - Reset(); -} - /*}}}*/ -bool HttpsServerState::InitHashes(HashStringList const &ExpectedHashes) /*{{{*/ -{ - delete Hash; - Hash = new Hashes(ExpectedHashes); - return true; -} - /*}}}*/ -APT_PURE Hashes * HttpsServerState::GetHashes() /*{{{*/ -{ - return Hash; -} - /*}}}*/ - -bool HttpsMethod::SetupProxy() /*{{{*/ -{ - URI ServerName = Queue->Uri; - - // Determine the proxy setting - AutoDetectProxy(ServerName); - - // Curl should never read proxy settings from the environment, as - // we determine which proxy to use. Do this for consistency among - // methods and prevent an environment variable overriding a - // no-proxy ("DIRECT") setting in apt.conf. - curl_easy_setopt(curl, CURLOPT_PROXY, ""); - - // Determine the proxy setting - try https first, fallback to http and use env at last - string UseProxy = ConfigFind("Proxy::" + ServerName.Host, ""); - if (UseProxy.empty() == true) - UseProxy = ConfigFind("Proxy", ""); - // User wants to use NO proxy, so nothing to setup - if (UseProxy == "DIRECT") - return true; - - // Parse no_proxy, a comma (,) separated list of domains we don't want to use - // a proxy for so we stop right here if it is in the list - if (getenv("no_proxy") != 0 && CheckDomainList(ServerName.Host,getenv("no_proxy")) == true) - return true; - - if (UseProxy.empty() == true) - { - const char* result = nullptr; - if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end()) - result = getenv("https_proxy"); - // FIXME: Fall back to http_proxy is to remain compatible with - // existing setups and behaviour of apt.conf. This should be - // deprecated in the future (including apt.conf). Most other - // programs do not fall back to http proxy settings and neither - // should Apt. - if (result == nullptr && std::find(methodNames.begin(), methodNames.end(), "http") != methodNames.end()) - result = getenv("http_proxy"); - UseProxy = result == nullptr ? "" : result; - } - - // Determine what host and port to use based on the proxy settings - if (UseProxy.empty() == false) - { - Proxy = UseProxy; - AddProxyAuth(Proxy, ServerName); - - if (Proxy.Access == "socks5h") - curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5_HOSTNAME); - else if (Proxy.Access == "socks5") - curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5); - else if (Proxy.Access == "socks4a") - curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4A); - else if (Proxy.Access == "socks") - curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); - else if (Proxy.Access == "http" || Proxy.Access == "https") - curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_HTTP); - else - return false; - - if (Proxy.Port != 1) - curl_easy_setopt(curl, CURLOPT_PROXYPORT, Proxy.Port); - curl_easy_setopt(curl, CURLOPT_PROXY, Proxy.Host.c_str()); - if (Proxy.User.empty() == false || Proxy.Password.empty() == false) - { - curl_easy_setopt(curl, CURLOPT_PROXYUSERNAME, Proxy.User.c_str()); - curl_easy_setopt(curl, CURLOPT_PROXYPASSWORD, Proxy.Password.c_str()); - } - } - return true; -} /*}}}*/ -// HttpsMethod::Fetch - Fetch an item /*{{{*/ -// --------------------------------------------------------------------- -/* This adds an item to the pipeline. We keep the pipeline at a fixed - depth. */ -bool HttpsMethod::Fetch(FetchItem *Itm) -{ - struct stat SBuf; - struct curl_slist *headers=NULL; - char curl_errorstr[CURL_ERROR_SIZE]; - URI Uri = Itm->Uri; - setPostfixForMethodNames(Uri.Host.c_str()); - AllowRedirect = ConfigFindB("AllowRedirect", true); - Debug = DebugEnabled(); - - // TODO: - // - http::Pipeline-Depth - // - error checking/reporting - // - more debug options? (CURLOPT_DEBUGFUNCTION?) - { - auto const plus = Binary.find('+'); - if (plus != std::string::npos) - Uri.Access = Binary.substr(plus + 1); - } - - curl_easy_reset(curl); - if (SetupProxy() == false) - return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str()); - - maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc")); - if (Server == nullptr || Server->Comp(Itm->Uri) == false) - Server = CreateServerState(Itm->Uri); - - // The "+" is encoded as a workaround for a amazon S3 bug - // see LP bugs #1003633 and #1086997. (taken from http method) - Uri.Path = QuoteString(Uri.Path, "+~ "); - - FetchResult Res; - RequestState Req(this, Server.get()); - CURLUserPointer userp(this, &Res, Itm, &Req); - // callbacks - curl_easy_setopt(curl, CURLOPT_URL, static_cast(Uri).c_str()); - curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, parse_header); - curl_easy_setopt(curl, CURLOPT_WRITEHEADER, &userp); - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, &userp); - // options - curl_easy_setopt(curl, CURLOPT_NOPROGRESS, true); - curl_easy_setopt(curl, CURLOPT_FILETIME, true); - curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 0); - - if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end()) - { - curl_easy_setopt(curl, CURLOPT_PROTOCOLS, CURLPROTO_HTTPS); - curl_easy_setopt(curl, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTPS); - - // File containing the list of trusted CA. - std::string const cainfo = ConfigFind("CaInfo", ""); - if(cainfo.empty() == false) - curl_easy_setopt(curl, CURLOPT_CAINFO, cainfo.c_str()); - // Check server certificate against previous CA list ... - curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, ConfigFindB("Verify-Peer", true) ? 1 : 0); - // ... and hostname against cert CN or subjectAltName - curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, ConfigFindB("Verify-Host", true) ? 2 : 0); - // Also enforce issuer of server certificate using its cert - std::string const issuercert = ConfigFind("IssuerCert", ""); - if(issuercert.empty() == false) - curl_easy_setopt(curl, CURLOPT_ISSUERCERT, issuercert.c_str()); - // For client authentication, certificate file ... - std::string const pem = ConfigFind("SslCert", ""); - if(pem.empty() == false) - curl_easy_setopt(curl, CURLOPT_SSLCERT, pem.c_str()); - // ... and associated key. - std::string const key = ConfigFind("SslKey", ""); - if(key.empty() == false) - curl_easy_setopt(curl, CURLOPT_SSLKEY, key.c_str()); - // Allow forcing SSL version to SSLv3 or TLSv1 - long final_version = CURL_SSLVERSION_DEFAULT; - std::string const sslversion = ConfigFind("SslForceVersion", ""); - if(sslversion == "TLSv1") - final_version = CURL_SSLVERSION_TLSv1; - else if(sslversion == "TLSv1.0") - final_version = CURL_SSLVERSION_TLSv1_0; - else if(sslversion == "TLSv1.1") - final_version = CURL_SSLVERSION_TLSv1_1; - else if(sslversion == "TLSv1.2") - final_version = CURL_SSLVERSION_TLSv1_2; - else if(sslversion == "SSLv3") - final_version = CURL_SSLVERSION_SSLv3; - curl_easy_setopt(curl, CURLOPT_SSLVERSION, final_version); - // CRL file - std::string const crlfile = ConfigFind("CrlFile", ""); - if(crlfile.empty() == false) - curl_easy_setopt(curl, CURLOPT_CRLFILE, crlfile.c_str()); - } - else - { - curl_easy_setopt(curl, CURLOPT_PROTOCOLS, CURLPROTO_HTTP); - curl_easy_setopt(curl, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTP); - } - // cache-control - if(ConfigFindB("No-Cache", false) == false) - { - // cache enabled - if (ConfigFindB("No-Store", false) == true) - headers = curl_slist_append(headers,"Cache-Control: no-store"); - std::string ss; - strprintf(ss, "Cache-Control: max-age=%u", ConfigFindI("Max-Age", 0)); - headers = curl_slist_append(headers, ss.c_str()); - } else { - // cache disabled by user - headers = curl_slist_append(headers, "Cache-Control: no-cache"); - headers = curl_slist_append(headers, "Pragma: no-cache"); - } - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); - // speed limit - int const dlLimit = ConfigFindI("Dl-Limit", 0) * 1024; - if (dlLimit > 0) - curl_easy_setopt(curl, CURLOPT_MAX_RECV_SPEED_LARGE, dlLimit); - - // set header - curl_easy_setopt(curl, CURLOPT_USERAGENT, ConfigFind("User-Agent", "Debian APT-CURL/1.0 (" PACKAGE_VERSION ")").c_str()); - - // set timeout - int const timeout = ConfigFindI("Timeout", 120); - curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, timeout); - //set really low lowspeed timeout (see #497983) - curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, DL_MIN_SPEED); - curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, timeout); - - if(_config->FindB("Acquire::ForceIPv4", false) == true) - curl_easy_setopt(curl, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4); - else if(_config->FindB("Acquire::ForceIPv6", false) == true) - curl_easy_setopt(curl, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V6); - - // debug - if (Debug == true) - curl_easy_setopt(curl, CURLOPT_VERBOSE, true); - - // error handling - curl_errorstr[0] = '\0'; - curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_errorstr); - - // If we ask for uncompressed files servers might respond with content- - // negotiation which lets us end up with compressed files we do not support, - // see 657029, 657560 and co, so if we have no extension on the request - // ask for text only. As a sidenote: If there is nothing to negotate servers - // seem to be nice and ignore it. - if (ConfigFindB("SendAccept", true)) - { - size_t const filepos = Itm->Uri.find_last_of('/'); - string const file = Itm->Uri.substr(filepos + 1); - if (flExtension(file) == file) - headers = curl_slist_append(headers, "Accept: text/*"); - } - - // if we have the file send an if-range query with a range header - if (Server->RangesAllowed && stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) - { - std::string Buf; - strprintf(Buf, "Range: bytes=%lli-", (long long) SBuf.st_size); - headers = curl_slist_append(headers, Buf.c_str()); - strprintf(Buf, "If-Range: %s", TimeRFC1123(SBuf.st_mtime, false).c_str()); - headers = curl_slist_append(headers, Buf.c_str()); - } - else if(Itm->LastModified > 0) - { - curl_easy_setopt(curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_IFMODSINCE); - curl_easy_setopt(curl, CURLOPT_TIMEVALUE, Itm->LastModified); - } - - if (Server->InitHashes(Itm->ExpectedHashes) == false) - return false; - - // keep apt updated - Res.Filename = Itm->DestFile; - - // get it! - CURLcode success = curl_easy_perform(curl); - - // If the server returns 200 OK but the If-Modified-Since condition is not - // met, CURLINFO_CONDITION_UNMET will be set to 1 - long curl_condition_unmet = 0; - curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &curl_condition_unmet); - if (curl_condition_unmet == 1) - Req.Result = 304; - - Req.File.Close(); - curl_slist_free_all(headers); - - // cleanup - if (success != CURLE_OK) - { -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wswitch" - switch (success) - { - case CURLE_COULDNT_RESOLVE_PROXY: - case CURLE_COULDNT_RESOLVE_HOST: - SetFailReason("ResolveFailure"); - break; - case CURLE_COULDNT_CONNECT: - SetFailReason("ConnectionRefused"); - break; - case CURLE_OPERATION_TIMEDOUT: - SetFailReason("Timeout"); - break; - } -#pragma GCC diagnostic pop - // only take curls technical errors if we haven't our own - // (e.g. for the maximum size limit we have and curls can be confusing) - if (_error->PendingError() == false) - _error->Error("%s", curl_errorstr); - else - _error->Warning("curl: %s", curl_errorstr); - return false; - } - - switch (DealWithHeaders(Res, Req)) - { - case BaseHttpMethod::IMS_HIT: - URIDone(Res); - break; - - case BaseHttpMethod::ERROR_WITH_CONTENT_PAGE: - // unlink, no need keep 401/404 page content in partial/ - RemoveFile(Binary.c_str(), Req.File.Name()); - // Fall through. - case BaseHttpMethod::ERROR_UNRECOVERABLE: - case BaseHttpMethod::ERROR_NOT_FROM_SERVER: - return false; - - case BaseHttpMethod::TRY_AGAIN_OR_REDIRECT: - Redirect(NextURI); - break; - - case BaseHttpMethod::FILE_IS_OPEN: - struct stat resultStat; - if (unlikely(stat(Req.File.Name().c_str(), &resultStat) != 0)) - { - _error->Errno("stat", "Unable to access file %s", Req.File.Name().c_str()); - return false; - } - Res.Size = resultStat.st_size; - - // Timestamp - curl_easy_getinfo(curl, CURLINFO_FILETIME, &Res.LastModified); - if (Res.LastModified != -1) - { - struct timeval times[2]; - times[0].tv_sec = Res.LastModified; - times[1].tv_sec = Res.LastModified; - times[0].tv_usec = times[1].tv_usec = 0; - utimes(Req.File.Name().c_str(), times); - } - else - Res.LastModified = resultStat.st_mtime; - - // take hashes - Res.TakeHashes(*(Server->GetHashes())); - - // keep apt updated - URIDone(Res); - break; - } - return true; -} - /*}}}*/ -std::unique_ptr HttpsMethod::CreateServerState(URI const &uri)/*{{{*/ -{ - return std::unique_ptr(new HttpsServerState(uri, this)); -} - /*}}}*/ -HttpsMethod::HttpsMethod(std::string &&pProg) : BaseHttpMethod(std::move(pProg),"1.2",Pipeline | SendConfig)/*{{{*/ -{ - auto addName = std::inserter(methodNames, methodNames.begin()); - addName = "http"; - auto const plus = Binary.find('+'); - if (plus != std::string::npos) - { - addName = Binary.substr(plus + 1); - auto base = Binary.substr(0, plus); - if (base != "https") - addName = base; - } - if (std::find(methodNames.begin(), methodNames.end(), "https") != methodNames.end()) - curl_global_init(CURL_GLOBAL_SSL); - else - curl_global_init(CURL_GLOBAL_NOTHING); - curl = curl_easy_init(); -} - /*}}}*/ -HttpsMethod::~HttpsMethod() /*{{{*/ -{ - curl_easy_cleanup(curl); -} - /*}}}*/ -int main(int, const char *argv[]) /*{{{*/ -{ - std::string Binary = flNotDir(argv[0]); - if (Binary.find('+') == std::string::npos && Binary != "https") - Binary.append("+https"); - return HttpsMethod(std::move(Binary)).Run(); -} - /*}}}*/ diff --git a/methods/https.h b/methods/https.h deleted file mode 100644 index fbbf34501..000000000 --- a/methods/https.h +++ /dev/null @@ -1,88 +0,0 @@ -// -*- mode: cpp; mode: fold -*- -// Description /*{{{*/// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $ -// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $ -/* ###################################################################### - - HTTP Acquire Method - This is the HTTP acquire method for APT. - - ##################################################################### */ - /*}}}*/ - -#ifndef APT_HTTPS_H -#define APT_HTTPS_H - -#include -#include -#include -#include -#include - -#include "basehttp.h" - -using std::cout; -using std::endl; - -class Hashes; -class HttpsMethod; -class FileFd; - -class HttpsServerState : public ServerState -{ - Hashes * Hash; - - protected: - virtual bool ReadHeaderLines(std::string &/*Data*/) APT_OVERRIDE { return false; } - virtual bool LoadNextResponse(bool const /*ToFile*/, RequestState &/*Req*/) APT_OVERRIDE { return false; } - - public: - virtual bool WriteResponse(std::string const &/*Data*/) APT_OVERRIDE { return false; } - - /** \brief Transfer the data from the socket */ - virtual bool RunData(RequestState &) APT_OVERRIDE { return false; } - virtual bool RunDataToDevNull(RequestState &) APT_OVERRIDE { return false; } - - virtual bool Open() APT_OVERRIDE { return false; } - virtual bool IsOpen() APT_OVERRIDE { return false; } - virtual bool Close() APT_OVERRIDE { return false; } - virtual bool InitHashes(HashStringList const &ExpectedHashes) APT_OVERRIDE; - virtual Hashes * GetHashes() APT_OVERRIDE; - virtual bool Die(RequestState &/*Req*/) APT_OVERRIDE { return false; } - virtual bool Flush(FileFd * const /*File*/) APT_OVERRIDE { return false; } - virtual bool Go(bool /*ToFile*/, RequestState &/*Req*/) APT_OVERRIDE { return false; } - - HttpsServerState(URI Srv, HttpsMethod *Owner); - virtual ~HttpsServerState() {Close();}; -}; - -class HttpsMethod : public BaseHttpMethod -{ - // minimum speed in bytes/se that triggers download timeout handling - static const int DL_MIN_SPEED = 10; - - virtual bool Fetch(FetchItem *) APT_OVERRIDE; - - static size_t parse_header(void *buffer, size_t size, size_t nmemb, void *userp); - static size_t write_data(void *buffer, size_t size, size_t nmemb, void *userp); - static int progress_callback(void *clientp, double dltotal, double dlnow, - double ultotal, double ulnow); - bool SetupProxy(); - CURL *curl; - - // Used by BaseHttpMethods unused by https - virtual void SendReq(FetchItem *) APT_OVERRIDE { exit(42); } - virtual void RotateDNS() APT_OVERRIDE { exit(42); } - - public: - - virtual std::unique_ptr CreateServerState(URI const &uri) APT_OVERRIDE; - using pkgAcqMethod::FetchResult; - using pkgAcqMethod::FetchItem; - - explicit HttpsMethod(std::string &&pProg); - virtual ~HttpsMethod(); -}; - -#include -URI Proxy; - -#endif diff --git a/po/CMakeLists.txt b/po/CMakeLists.txt index 2630a2f89..743b2f0eb 100644 --- a/po/CMakeLists.txt +++ b/po/CMakeLists.txt @@ -11,7 +11,7 @@ apt_add_translation_domain( ) if (HAVE_CURL) - set(curl_methods https) + set(curl_methods curl) else() set(curl_methods) endif() diff --git a/test/integration/test-apt-update-failure-propagation b/test/integration/test-apt-update-failure-propagation index 9ca6e481f..fb2b92b8c 100755 --- a/test/integration/test-apt-update-failure-propagation +++ b/test/integration/test-apt-update-failure-propagation @@ -100,7 +100,7 @@ for FILE in rootdir/etc/apt/sources.list.d/*-stable-* ; do sed -i -e "s#:${APTHTTPSPORT}/#:666/#" "$FILE" done testwarning aptget update -o Dir::Bin::Methods::https="${OLDMETHODS}/https" -if grep -q WITH_CURL:BOOL=OFF $PROJECT_BINARY_DIR/CMakeCache.txt; then +if grep -q FORCE_CURL:BOOL=OFF $PROJECT_BINARY_DIR/CMakeCache.txt; then testequalor2 "W: Failed to fetch https://localhost:666/dists/stable/InRelease Failed to connect to localhost port 666: Connection refused W: Some index files failed to download. They have been ignored, or old ones used instead." "W: Failed to fetch https://localhost:666/dists/stable/InRelease Could not connect to localhost:666 (127.0.0.1). - connect (111: Connection refused) W: Some index files failed to download. They have been ignored, or old ones used instead." tail -n 2 rootdir/tmp/testwarning.output -- cgit v1.2.3