summaryrefslogtreecommitdiff
path: root/methods
diff options
context:
space:
mode:
authorDavid Kalnischkies <david@kalnischkies.de>2016-11-09 12:25:44 +0100
committerDavid Kalnischkies <david@kalnischkies.de>2016-12-31 02:29:21 +0100
commit13a9f08de18dea0dfc1951992b0ddeda9c2fa2dd (patch)
tree8ad39c2360ffe9cc7fee09baba04fa6fe3033dbd /methods
parentcfc11b2e1d8480727208b9d3e9577172de9a4038 (diff)
separating state variables regarding server/request
Having a Reset(bool) method to partially reset certain variables like the download size always were strange, so this commit splits the ServerState into an additional RequestState living on the stack for as long as we deal with this request causing an automatic "reset". There is much to do still to make this code look better, but this is a good first step which compiles cleanly and passes all tests, so keeping it as history might be beneficial and due to avoiding explicit memory allocations it ends up fixing a small memory leak in https, too. Closes: #440057
Diffstat (limited to 'methods')
-rw-r--r--methods/http.cc99
-rw-r--r--methods/http.h14
-rw-r--r--methods/https.cc90
-rw-r--r--methods/https.h10
-rw-r--r--methods/server.cc151
-rw-r--r--methods/server.h73
6 files changed, 211 insertions, 226 deletions
diff --git a/methods/http.cc b/methods/http.cc
index 8d3c569c1..b460644dd 100644
--- a/methods/http.cc
+++ b/methods/http.cc
@@ -555,12 +555,12 @@ bool HttpServerState::Close()
}
/*}}}*/
// HttpServerState::RunData - Transfer the data from the socket /*{{{*/
-bool HttpServerState::RunData(FileFd * const File)
+bool HttpServerState::RunData(RequestState &Req)
{
- State = Data;
+ Req.State = RequestState::Data;
// Chunked transfer encoding is fun..
- if (Encoding == Chunked)
+ if (Req.Encoding == RequestState::Chunked)
{
while (1)
{
@@ -573,7 +573,7 @@ bool HttpServerState::RunData(FileFd * const File)
if (In.WriteTillEl(Data,true) == true)
break;
}
- while ((Last = Go(false, File)) == true);
+ while ((Last = Go(false, Req)) == true);
if (Last == false)
return false;
@@ -591,7 +591,7 @@ bool HttpServerState::RunData(FileFd * const File)
if (In.WriteTillEl(Data,true) == true && Data.length() <= 2)
break;
}
- while ((Last = Go(false, File)) == true);
+ while ((Last = Go(false, Req)) == true);
if (Last == false)
return false;
return !_error->PendingError();
@@ -599,7 +599,7 @@ bool HttpServerState::RunData(FileFd * const File)
// Transfer the block
In.Limit(Len);
- while (Go(true, File) == true)
+ while (Go(true, Req) == true)
if (In.IsLimit() == true)
break;
@@ -615,7 +615,7 @@ bool HttpServerState::RunData(FileFd * const File)
if (In.WriteTillEl(Data,true) == true)
break;
}
- while ((Last = Go(false, File)) == true);
+ while ((Last = Go(false, Req)) == true);
if (Last == false)
return false;
}
@@ -624,10 +624,10 @@ bool HttpServerState::RunData(FileFd * const File)
{
/* Closes encoding is used when the server did not specify a size, the
loss of the connection means we are done */
- if (JunkSize != 0)
- In.Limit(JunkSize);
- else if (DownloadSize != 0)
- In.Limit(DownloadSize);
+ if (Req.JunkSize != 0)
+ In.Limit(Req.JunkSize);
+ else if (Req.DownloadSize != 0)
+ In.Limit(Req.DownloadSize);
else if (Persistent == false)
In.Limit(-1);
@@ -640,19 +640,19 @@ bool HttpServerState::RunData(FileFd * const File)
In.Limit(-1);
return !_error->PendingError();
}
- while (Go(true, File) == true);
+ while (Go(true, Req) == true);
}
- return Owner->Flush() && !_error->PendingError();
+ return Flush(&Req.File) && !_error->PendingError();
}
/*}}}*/
-bool HttpServerState::RunDataToDevNull() /*{{{*/
+bool HttpServerState::RunDataToDevNull(RequestState &Req) /*{{{*/
{
// no need to clean up if we discard the connection anyhow
if (Persistent == false)
return true;
- FileFd DevNull("/dev/null", FileFd::WriteOnly);
- return RunData(&DevNull);
+ Req.File.Open("/dev/null", FileFd::WriteOnly);
+ return RunData(Req);
}
/*}}}*/
bool HttpServerState::ReadHeaderLines(std::string &Data) /*{{{*/
@@ -660,9 +660,9 @@ bool HttpServerState::ReadHeaderLines(std::string &Data) /*{{{*/
return In.WriteTillEl(Data);
}
/*}}}*/
-bool HttpServerState::LoadNextResponse(bool const ToFile, FileFd * const File)/*{{{*/
+bool HttpServerState::LoadNextResponse(bool const ToFile, RequestState &Req)/*{{{*/
{
- return Go(ToFile, File);
+ return Go(ToFile, Req);
}
/*}}}*/
bool HttpServerState::WriteResponse(const std::string &Data) /*{{{*/
@@ -682,11 +682,10 @@ bool HttpServerState::InitHashes(HashStringList const &ExpectedHashes) /*{{{*/
return true;
}
/*}}}*/
-void HttpServerState::Reset(bool const Everything) /*{{{*/
+void HttpServerState::Reset() /*{{{*/
{
- ServerState::Reset(Everything);
- if (Everything)
- ServerFd = -1;
+ ServerState::Reset();
+ ServerFd = -1;
}
/*}}}*/
@@ -696,22 +695,22 @@ APT_PURE Hashes * HttpServerState::GetHashes() /*{{{*/
}
/*}}}*/
// HttpServerState::Die - The server has closed the connection. /*{{{*/
-bool HttpServerState::Die(FileFd * const File)
+bool HttpServerState::Die(RequestState &Req)
{
unsigned int LErrno = errno;
// Dump the buffer to the file
- if (State == ServerState::Data)
+ if (Req.State == RequestState::Data)
{
- if (File == nullptr)
+ if (Req.File.IsOpen() == false)
return true;
// on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
// can't be set
- if (File->Name() != "/dev/null")
- SetNonBlock(File->Fd(),false);
+ if (Req.File.Name() != "/dev/null")
+ SetNonBlock(Req.File.Fd(),false);
while (In.WriteSpace() == true)
{
- if (In.Write(File->Fd()) == false)
+ if (In.Write(Req.File.Fd()) == false)
return _error->Errno("write",_("Error writing to the file"));
// Done
@@ -721,7 +720,7 @@ bool HttpServerState::Die(FileFd * const File)
}
// See if this is because the server finished the data stream
- if (In.IsLimit() == false && State != HttpServerState::Header &&
+ if (In.IsLimit() == false && Req.State != RequestState::Header &&
Persistent == true)
{
Close();
@@ -752,7 +751,7 @@ bool HttpServerState::Die(FileFd * const File)
into the file */
bool HttpServerState::Flush(FileFd * const File)
{
- if (File != NULL)
+ if (File != nullptr)
{
// on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
// can't be set
@@ -779,7 +778,7 @@ bool HttpServerState::Flush(FileFd * const File)
// ---------------------------------------------------------------------
/* This runs the select loop over the server FDs, Output file FDs and
stdin. */
-bool HttpServerState::Go(bool ToFile, FileFd * const File)
+bool HttpServerState::Go(bool ToFile, RequestState &Req)
{
// Server has closed the connection
if (ServerFd == -1 && (In.WriteSpace() == false ||
@@ -800,8 +799,8 @@ bool HttpServerState::Go(bool ToFile, FileFd * const File)
// Add the file
int FileFD = -1;
- if (File != NULL)
- FileFD = File->Fd();
+ if (Req.File.IsOpen())
+ FileFD = Req.File.Fd();
if (In.WriteSpace() == true && ToFile == true && FileFD != -1)
FD_SET(FileFD,&wfds);
@@ -830,7 +829,7 @@ bool HttpServerState::Go(bool ToFile, FileFd * const File)
if (Res == 0)
{
_error->Error(_("Connection timed out"));
- return Die(File);
+ return Die(Req);
}
// Handle server IO
@@ -838,14 +837,14 @@ bool HttpServerState::Go(bool ToFile, FileFd * const File)
{
errno = 0;
if (In.Read(ServerFd) == false)
- return Die(File);
+ return Die(Req);
}
if (ServerFd != -1 && FD_ISSET(ServerFd,&wfds))
{
errno = 0;
if (Out.Write(ServerFd) == false)
- return Die(File);
+ return Die(Req);
}
// Send data to the file
@@ -855,11 +854,11 @@ bool HttpServerState::Go(bool ToFile, FileFd * const File)
return _error->Errno("write",_("Error writing to output file"));
}
- if (MaximumSize > 0 && File && File->Tell() > MaximumSize)
+ if (Req.MaximumSize > 0 && Req.File.IsOpen() && Req.File.Failed() == false && Req.File.Tell() > Req.MaximumSize)
{
Owner->SetFailReason("MaximumSizeExceeded");
return _error->Error("Writing more data than expected (%llu > %llu)",
- File->Tell(), MaximumSize);
+ Req.File.Tell(), Req.MaximumSize);
}
// Handle commands from APT
@@ -978,32 +977,28 @@ void HttpMethod::RotateDNS() /*{{{*/
::RotateDNS();
}
/*}}}*/
-ServerMethod::DealWithHeadersResult HttpMethod::DealWithHeaders(FetchResult &Res)/*{{{*/
+ServerMethod::DealWithHeadersResult HttpMethod::DealWithHeaders(FetchResult &Res, RequestState &Req)/*{{{*/
{
- auto ret = ServerMethod::DealWithHeaders(Res);
+ auto ret = ServerMethod::DealWithHeaders(Res, Req);
if (ret != ServerMethod::FILE_IS_OPEN)
return ret;
-
- // Open the file
- delete File;
- File = new FileFd(Queue->DestFile,FileFd::WriteAny);
- if (_error->PendingError() == true)
+ if (Req.File.Open(Queue->DestFile, FileFd::WriteAny) == false)
return ERROR_NOT_FROM_SERVER;
FailFile = Queue->DestFile;
FailFile.c_str(); // Make sure we don't do a malloc in the signal handler
- FailFd = File->Fd();
- FailTime = Server->Date;
+ FailFd = Req.File.Fd();
+ FailTime = Req.Date;
- if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false)
+ if (Server->InitHashes(Queue->ExpectedHashes) == false || Req.AddPartialFileToHashes(Req.File) == false)
{
_error->Errno("read",_("Problem hashing file"));
return ERROR_NOT_FROM_SERVER;
}
- if (Server->StartPos > 0)
- Res.ResumePoint = Server->StartPos;
+ if (Req.StartPos > 0)
+ Res.ResumePoint = Req.StartPos;
- SetNonBlock(File->Fd(),true);
+ SetNonBlock(Req.File.Fd(),true);
return FILE_IS_OPEN;
}
/*}}}*/
@@ -1015,7 +1010,5 @@ HttpMethod::HttpMethod(std::string &&pProg) : ServerMethod(pProg.c_str(), "1.2",
auto const plus = Binary.find('+');
if (plus != std::string::npos)
addName = Binary.substr(0, plus);
- File = 0;
- Server = 0;
}
/*}}}*/
diff --git a/methods/http.h b/methods/http.h
index b7341f5f8..4b0e77524 100644
--- a/methods/http.h
+++ b/methods/http.h
@@ -99,23 +99,23 @@ struct HttpServerState: public ServerState
protected:
virtual bool ReadHeaderLines(std::string &Data) APT_OVERRIDE;
- virtual bool LoadNextResponse(bool const ToFile, FileFd * const File) APT_OVERRIDE;
+ virtual bool LoadNextResponse(bool const ToFile, RequestState &Req) APT_OVERRIDE;
virtual bool WriteResponse(std::string const &Data) APT_OVERRIDE;
public:
- virtual void Reset(bool const Everything = true) APT_OVERRIDE;
+ virtual void Reset() APT_OVERRIDE;
- virtual bool RunData(FileFd * const File) APT_OVERRIDE;
- virtual bool RunDataToDevNull() APT_OVERRIDE;
+ virtual bool RunData(RequestState &Req) APT_OVERRIDE;
+ virtual bool RunDataToDevNull(RequestState &Req) APT_OVERRIDE;
virtual bool Open() APT_OVERRIDE;
virtual bool IsOpen() APT_OVERRIDE;
virtual bool Close() APT_OVERRIDE;
virtual bool InitHashes(HashStringList const &ExpectedHashes) APT_OVERRIDE;
virtual Hashes * GetHashes() APT_OVERRIDE;
- virtual bool Die(FileFd * const File) APT_OVERRIDE;
+ virtual bool Die(RequestState &Req) APT_OVERRIDE;
virtual bool Flush(FileFd * const File) APT_OVERRIDE;
- virtual bool Go(bool ToFile, FileFd * const File) APT_OVERRIDE;
+ virtual bool Go(bool ToFile, RequestState &Req) APT_OVERRIDE;
HttpServerState(URI Srv, HttpMethod *Owner);
virtual ~HttpServerState() {Close();};
@@ -128,7 +128,7 @@ class HttpMethod : public ServerMethod
virtual std::unique_ptr<ServerState> CreateServerState(URI const &uri) APT_OVERRIDE;
virtual void RotateDNS() APT_OVERRIDE;
- virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res) APT_OVERRIDE;
+ virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res, RequestState &Req) APT_OVERRIDE;
protected:
std::string AutoDetectProxyCmd;
diff --git a/methods/https.cc b/methods/https.cc
index 1bdd394ad..c473e474d 100644
--- a/methods/https.cc
+++ b/methods/https.cc
@@ -43,8 +43,9 @@ struct APT_HIDDEN CURLUserPointer {
HttpsMethod * const https;
HttpsMethod::FetchResult * const Res;
HttpsMethod::FetchItem const * const Itm;
+ RequestState * const Req;
CURLUserPointer(HttpsMethod * const https, HttpsMethod::FetchResult * const Res,
- HttpsMethod::FetchItem const * const Itm) : https(https), Res(Res), Itm(Itm) {}
+ HttpsMethod::FetchItem const * const Itm, RequestState * const Req) : https(https), Res(Res), Itm(Itm), Req(Req) {}
};
size_t
@@ -63,55 +64,58 @@ HttpsMethod::parse_header(void *buffer, size_t size, size_t nmemb, void *userp)
if (line.empty() == true)
{
- me->https->Server->JunkSize = 0;
- if (me->https->Server->Result != 416 && me->https->Server->StartPos != 0)
+ if (me->Req->File.Open(me->Itm->DestFile, FileFd::WriteAny) == false)
+ return ERROR_NOT_FROM_SERVER;
+
+ me->Req->JunkSize = 0;
+ if (me->Req->Result != 416 && me->Req->StartPos != 0)
;
- else if (me->https->Server->Result == 416)
+ else if (me->Req->Result == 416)
{
bool partialHit = false;
if (me->Itm->ExpectedHashes.usable() == true)
{
Hashes resultHashes(me->Itm->ExpectedHashes);
FileFd file(me->Itm->DestFile, FileFd::ReadOnly);
- me->https->Server->TotalFileSize = file.FileSize();
- me->https->Server->Date = file.ModificationTime();
+ me->Req->TotalFileSize = file.FileSize();
+ me->Req->Date = file.ModificationTime();
resultHashes.AddFD(file);
HashStringList const hashList = resultHashes.GetHashStringList();
partialHit = (me->Itm->ExpectedHashes == hashList);
}
- else if (me->https->Server->Result == 416 && me->https->Server->TotalFileSize == me->https->File->FileSize())
+ else if (me->Req->Result == 416 && me->Req->TotalFileSize == me->Req->File.FileSize())
partialHit = true;
if (partialHit == true)
{
- me->https->Server->Result = 200;
- me->https->Server->StartPos = me->https->Server->TotalFileSize;
+ me->Req->Result = 200;
+ me->Req->StartPos = me->Req->TotalFileSize;
// the actual size is not important for https as curl will deal with it
// by itself and e.g. doesn't bother us with transport-encoding…
- me->https->Server->JunkSize = std::numeric_limits<unsigned long long>::max();
+ me->Req->JunkSize = std::numeric_limits<unsigned long long>::max();
}
else
- me->https->Server->StartPos = 0;
+ me->Req->StartPos = 0;
}
else
- me->https->Server->StartPos = 0;
+ me->Req->StartPos = 0;
- me->Res->LastModified = me->https->Server->Date;
- me->Res->Size = me->https->Server->TotalFileSize;
- me->Res->ResumePoint = me->https->Server->StartPos;
+ me->Res->LastModified = me->Req->Date;
+ me->Res->Size = me->Req->TotalFileSize;
+ me->Res->ResumePoint = me->Req->StartPos;
// we expect valid data, so tell our caller we get the file now
- if (me->https->Server->Result >= 200 && me->https->Server->Result < 300)
+ if (me->Req->Result >= 200 && me->Req->Result < 300)
{
if (me->Res->Size != 0 && me->Res->Size > me->Res->ResumePoint)
me->https->URIStart(*me->Res);
- if (me->https->Server->AddPartialFileToHashes(*(me->https->File)) == false)
+ if (me->Req->AddPartialFileToHashes(me->Req->File) == false)
return 0;
}
else
- me->https->Server->JunkSize = std::numeric_limits<decltype(me->https->Server->JunkSize)>::max();
+ me->Req->JunkSize = std::numeric_limits<decltype(me->Req->JunkSize)>::max();
}
- else if (me->https->Server->HeaderLine(line) == false)
+ else if (me->Req->HeaderLine(line) == false)
return 0;
return size*nmemb;
@@ -120,29 +124,29 @@ HttpsMethod::parse_header(void *buffer, size_t size, size_t nmemb, void *userp)
size_t
HttpsMethod::write_data(void *buffer, size_t size, size_t nmemb, void *userp)
{
- HttpsMethod *me = static_cast<HttpsMethod *>(userp);
+ CURLUserPointer *me = static_cast<CURLUserPointer *>(userp);
size_t buffer_size = size * nmemb;
// we don't need to count the junk here, just drop anything we get as
// we don't always know how long it would be, e.g. in chunked encoding.
- if (me->Server->JunkSize != 0)
+ if (me->Req->JunkSize != 0)
return buffer_size;
- if(me->File->Write(buffer, buffer_size) != true)
+ if(me->Req->File.Write(buffer, buffer_size) != true)
return 0;
- if(me->Queue->MaximumSize > 0)
+ if(me->https->Queue->MaximumSize > 0)
{
- unsigned long long const TotalWritten = me->File->Tell();
- if (TotalWritten > me->Queue->MaximumSize)
+ unsigned long long const TotalWritten = me->Req->File.Tell();
+ if (TotalWritten > me->https->Queue->MaximumSize)
{
- me->SetFailReason("MaximumSizeExceeded");
+ me->https->SetFailReason("MaximumSizeExceeded");
_error->Error("Writing more data than expected (%llu > %llu)",
- TotalWritten, me->Queue->MaximumSize);
+ TotalWritten, me->https->Queue->MaximumSize);
return 0;
}
}
- if (me->Server->GetHashes()->Add((unsigned char const * const)buffer, buffer_size) == false)
+ if (me->https->Server->GetHashes()->Add((unsigned char const * const)buffer, buffer_size) == false)
return 0;
return buffer_size;
@@ -268,15 +272,18 @@ bool HttpsMethod::Fetch(FetchItem *Itm)
return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str());
maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc"));
+ if (Server == nullptr || Server->Comp(Itm->Uri) == false)
+ Server = CreateServerState(Itm->Uri);
FetchResult Res;
- CURLUserPointer userp(this, &Res, Itm);
+ RequestState Req(this, Server.get());
+ CURLUserPointer userp(this, &Res, Itm, &Req);
// callbacks
curl_easy_setopt(curl, CURLOPT_URL, static_cast<string>(Uri).c_str());
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, parse_header);
curl_easy_setopt(curl, CURLOPT_WRITEHEADER, &userp);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
- curl_easy_setopt(curl, CURLOPT_WRITEDATA, this);
+ curl_easy_setopt(curl, CURLOPT_WRITEDATA, &userp);
// options
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, true);
curl_easy_setopt(curl, CURLOPT_FILETIME, true);
@@ -387,13 +394,6 @@ bool HttpsMethod::Fetch(FetchItem *Itm)
headers = curl_slist_append(headers, "Accept: text/*");
}
- // go for it - if the file exists, append on it
- File = new FileFd(Itm->DestFile, FileFd::WriteAny);
- if (Server == nullptr || Server->Comp(Itm->Uri) == false)
- Server = CreateServerState(Itm->Uri);
- else
- Server->Reset(false);
-
// if we have the file send an if-range query with a range header
if (Server->RangesAllowed && stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
{
@@ -423,9 +423,9 @@ bool HttpsMethod::Fetch(FetchItem *Itm)
long curl_condition_unmet = 0;
curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &curl_condition_unmet);
if (curl_condition_unmet == 1)
- Server->Result = 304;
+ Req.Result = 304;
- File->Close();
+ Req.File.Close();
curl_slist_free_all(headers);
// cleanup
@@ -456,7 +456,7 @@ bool HttpsMethod::Fetch(FetchItem *Itm)
return false;
}
- switch (DealWithHeaders(Res))
+ switch (DealWithHeaders(Res, Req))
{
case ServerMethod::IMS_HIT:
URIDone(Res);
@@ -464,7 +464,7 @@ bool HttpsMethod::Fetch(FetchItem *Itm)
case ServerMethod::ERROR_WITH_CONTENT_PAGE:
// unlink, no need keep 401/404 page content in partial/
- RemoveFile(Binary.c_str(), File->Name());
+ RemoveFile(Binary.c_str(), Req.File.Name());
case ServerMethod::ERROR_UNRECOVERABLE:
case ServerMethod::ERROR_NOT_FROM_SERVER:
return false;
@@ -475,9 +475,9 @@ bool HttpsMethod::Fetch(FetchItem *Itm)
case ServerMethod::FILE_IS_OPEN:
struct stat resultStat;
- if (unlikely(stat(File->Name().c_str(), &resultStat) != 0))
+ if (unlikely(stat(Req.File.Name().c_str(), &resultStat) != 0))
{
- _error->Errno("stat", "Unable to access file %s", File->Name().c_str());
+ _error->Errno("stat", "Unable to access file %s", Req.File.Name().c_str());
return false;
}
Res.Size = resultStat.st_size;
@@ -490,7 +490,7 @@ bool HttpsMethod::Fetch(FetchItem *Itm)
times[0].tv_sec = Res.LastModified;
times[1].tv_sec = Res.LastModified;
times[0].tv_usec = times[1].tv_usec = 0;
- utimes(File->Name().c_str(), times);
+ utimes(Req.File.Name().c_str(), times);
}
else
Res.LastModified = resultStat.st_mtime;
@@ -502,8 +502,6 @@ bool HttpsMethod::Fetch(FetchItem *Itm)
URIDone(Res);
break;
}
-
- delete File;
return true;
}
/*}}}*/
diff --git a/methods/https.h b/methods/https.h
index 04e72e815..3b99b3abe 100644
--- a/methods/https.h
+++ b/methods/https.h
@@ -32,23 +32,23 @@ class HttpsServerState : public ServerState
protected:
virtual bool ReadHeaderLines(std::string &/*Data*/) APT_OVERRIDE { return false; }
- virtual bool LoadNextResponse(bool const /*ToFile*/, FileFd * const /*File*/) APT_OVERRIDE { return false; }
+ virtual bool LoadNextResponse(bool const /*ToFile*/, RequestState &/*Req*/) APT_OVERRIDE { return false; }
public:
virtual bool WriteResponse(std::string const &/*Data*/) APT_OVERRIDE { return false; }
/** \brief Transfer the data from the socket */
- virtual bool RunData(FileFd * const /*File*/) APT_OVERRIDE { return false; }
- virtual bool RunDataToDevNull() APT_OVERRIDE { return false; }
+ virtual bool RunData(RequestState &) APT_OVERRIDE { return false; }
+ virtual bool RunDataToDevNull(RequestState &) APT_OVERRIDE { return false; }
virtual bool Open() APT_OVERRIDE { return false; }
virtual bool IsOpen() APT_OVERRIDE { return false; }
virtual bool Close() APT_OVERRIDE { return false; }
virtual bool InitHashes(HashStringList const &ExpectedHashes) APT_OVERRIDE;
virtual Hashes * GetHashes() APT_OVERRIDE;
- virtual bool Die(FileFd * const /*File*/) APT_OVERRIDE { return false; }
+ virtual bool Die(RequestState &/*Req*/) APT_OVERRIDE { return false; }
virtual bool Flush(FileFd * const /*File*/) APT_OVERRIDE { return false; }
- virtual bool Go(bool /*ToFile*/, FileFd * const /*File*/) APT_OVERRIDE { return false; }
+ virtual bool Go(bool /*ToFile*/, RequestState &/*Req*/) APT_OVERRIDE { return false; }
HttpsServerState(URI Srv, HttpsMethod *Owner);
virtual ~HttpsServerState() {Close();};
diff --git a/methods/server.cc b/methods/server.cc
index 0408dddfd..29419e5cf 100644
--- a/methods/server.cc
+++ b/methods/server.cc
@@ -43,12 +43,10 @@ time_t ServerMethod::FailTime = 0;
// ---------------------------------------------------------------------
/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
parse error occurred */
-ServerState::RunHeadersResult ServerState::RunHeaders(FileFd * const File,
+ServerState::RunHeadersResult ServerState::RunHeaders(RequestState &Req,
const std::string &Uri)
{
- Reset(false);
Owner->Status(_("Waiting for headers"));
-
do
{
string Data;
@@ -57,35 +55,32 @@ ServerState::RunHeadersResult ServerState::RunHeaders(FileFd * const File,
if (Owner->Debug == true)
clog << "Answer for: " << Uri << endl << Data;
-
+
for (string::const_iterator I = Data.begin(); I < Data.end(); ++I)
{
string::const_iterator J = I;
for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J);
- if (HeaderLine(string(I,J)) == false)
+ if (Req.HeaderLine(string(I,J)) == false)
return RUN_HEADERS_PARSE_ERROR;
I = J;
}
// 100 Continue is a Nop...
- if (Result == 100)
+ if (Req.Result == 100)
continue;
// Tidy up the connection persistence state.
- if (Encoding == Closes && HaveContent == true)
+ if (Req.Encoding == RequestState::Closes && Req.HaveContent == true)
Persistent = false;
return RUN_HEADERS_OK;
}
- while (LoadNextResponse(false, File) == true);
+ while (LoadNextResponse(false, Req) == true);
return RUN_HEADERS_IO_ERROR;
}
/*}}}*/
-// ServerState::HeaderLine - Process a header line /*{{{*/
-// ---------------------------------------------------------------------
-/* */
-bool ServerState::HeaderLine(string Line)
+bool RequestState::HeaderLine(string const &Line) /*{{{*/
{
if (Line.empty() == true)
return true;
@@ -116,18 +111,18 @@ bool ServerState::HeaderLine(string Line)
/* Check the HTTP response header to get the default persistence
state. */
if (Major < 1)
- Persistent = false;
+ Server->Persistent = false;
else
{
if (Major == 1 && Minor == 0)
{
- Persistent = false;
+ Server->Persistent = false;
}
else
{
- Persistent = true;
- if (PipelineAllowed)
- Pipeline = true;
+ Server->Persistent = true;
+ if (Server->PipelineAllowed)
+ Server->Pipeline = true;
}
}
@@ -209,16 +204,16 @@ bool ServerState::HeaderLine(string Line)
{
if (stringcasecmp(Val,"close") == 0)
{
- Persistent = false;
- Pipeline = false;
+ Server->Persistent = false;
+ Server->Pipeline = false;
/* Some servers send error pages (as they are dynamically generated)
for simplicity via a connection close instead of e.g. chunked,
so assuming an always closing server only if we get a file + close */
if (Result >= 200 && Result < 300)
- PipelineAllowed = false;
+ Server->PipelineAllowed = false;
}
else if (stringcasecmp(Val,"keep-alive") == 0)
- Persistent = true;
+ Server->Persistent = true;
return true;
}
@@ -240,7 +235,7 @@ bool ServerState::HeaderLine(string Line)
std::string ranges = ',' + Val + ',';
ranges.erase(std::remove(ranges.begin(), ranges.end(), ' '), ranges.end());
if (ranges.find(",bytes,") == std::string::npos)
- RangesAllowed = false;
+ Server->RangesAllowed = false;
return true;
}
@@ -249,28 +244,23 @@ bool ServerState::HeaderLine(string Line)
/*}}}*/
// ServerState::ServerState - Constructor /*{{{*/
ServerState::ServerState(URI Srv, ServerMethod *Owner) :
- DownloadSize(0), ServerName(Srv), TimeOut(120), Owner(Owner)
+ ServerName(Srv), TimeOut(120), Owner(Owner)
{
Reset();
}
/*}}}*/
-bool ServerState::AddPartialFileToHashes(FileFd &File) /*{{{*/
+bool RequestState::AddPartialFileToHashes(FileFd &File) /*{{{*/
{
File.Truncate(StartPos);
- return GetHashes()->AddFD(File, StartPos);
+ return Server->GetHashes()->AddFD(File, StartPos);
}
/*}}}*/
-void ServerState::Reset(bool const Everything) /*{{{*/
+void ServerState::Reset() /*{{{*/
{
- Major = 0; Minor = 0; Result = 0; Code[0] = '\0';
- TotalFileSize = 0; JunkSize = 0; StartPos = 0;
- Encoding = Closes; time(&Date); HaveContent = false;
- State = Header; MaximumSize = 0;
- if (Everything)
- {
- Persistent = false; Pipeline = false; PipelineAllowed = true;
- RangesAllowed = true;
- }
+ Persistent = false;
+ Pipeline = false;
+ PipelineAllowed = true;
+ RangesAllowed = true;
}
/*}}}*/
@@ -280,10 +270,10 @@ void ServerState::Reset(bool const Everything) /*{{{*/
to do. Returns DealWithHeadersResult (see http.h for details).
*/
ServerMethod::DealWithHeadersResult
-ServerMethod::DealWithHeaders(FetchResult &Res)
+ServerMethod::DealWithHeaders(FetchResult &Res, RequestState &Req)
{
// Not Modified
- if (Server->Result == 304)
+ if (Req.Result == 304)
{
RemoveFile("server", Queue->DestFile);
Res.IMSHit = true;
@@ -300,26 +290,26 @@ ServerMethod::DealWithHeaders(FetchResult &Res)
* redirect. Pass on those codes so the error handling kicks in.
*/
if (AllowRedirect
- && (Server->Result > 300 && Server->Result < 400)
- && (Server->Result != 300 // Multiple Choices
- && Server->Result != 304 // Not Modified
- && Server->Result != 306)) // (Not part of HTTP/1.1, reserved)
+ && (Req.Result > 300 && Req.Result < 400)
+ && (Req.Result != 300 // Multiple Choices
+ && Req.Result != 304 // Not Modified
+ && Req.Result != 306)) // (Not part of HTTP/1.1, reserved)
{
- if (Server->Location.empty() == true)
+ if (Req.Location.empty() == true)
;
- else if (Server->Location[0] == '/' && Queue->Uri.empty() == false)
+ else if (Req.Location[0] == '/' && Queue->Uri.empty() == false)
{
URI Uri = Queue->Uri;
if (Uri.Host.empty() == false)
NextURI = URI::SiteOnly(Uri);
else
NextURI.clear();
- NextURI.append(DeQuoteString(Server->Location));
+ NextURI.append(DeQuoteString(Req.Location));
if (Queue->Uri == NextURI)
{
SetFailReason("RedirectionLoop");
_error->Error("Redirection loop encountered");
- if (Server->HaveContent == true)
+ if (Req.HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
@@ -327,12 +317,12 @@ ServerMethod::DealWithHeaders(FetchResult &Res)
}
else
{
- NextURI = DeQuoteString(Server->Location);
+ NextURI = DeQuoteString(Req.Location);
URI tmpURI = NextURI;
if (tmpURI.Access.find('+') != std::string::npos)
{
_error->Error("Server tried to trick us into using a specific implementation: %s", tmpURI.Access.c_str());
- if (Server->HaveContent == true)
+ if (Req.HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
@@ -358,7 +348,7 @@ ServerMethod::DealWithHeaders(FetchResult &Res)
{
SetFailReason("RedirectionLoop");
_error->Error("Redirection loop encountered");
- if (Server->HaveContent == true)
+ if (Req.HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
@@ -390,7 +380,7 @@ ServerMethod::DealWithHeaders(FetchResult &Res)
/* else pass through for error message */
}
// retry after an invalid range response without partial data
- else if (Server->Result == 416)
+ else if (Req.Result == 416)
{
struct stat SBuf;
if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
@@ -400,25 +390,25 @@ ServerMethod::DealWithHeaders(FetchResult &Res)
{
Hashes resultHashes(Queue->ExpectedHashes);
FileFd file(Queue->DestFile, FileFd::ReadOnly);
- Server->TotalFileSize = file.FileSize();
- Server->Date = file.ModificationTime();
+ Req.TotalFileSize = file.FileSize();
+ Req.Date = file.ModificationTime();
resultHashes.AddFD(file);
HashStringList const hashList = resultHashes.GetHashStringList();
partialHit = (Queue->ExpectedHashes == hashList);
}
- else if ((unsigned long long)SBuf.st_size == Server->TotalFileSize)
+ else if ((unsigned long long)SBuf.st_size == Req.TotalFileSize)
partialHit = true;
if (partialHit == true)
{
// the file is completely downloaded, but was not moved
- if (Server->HaveContent == true)
+ if (Req.HaveContent == true)
{
// nuke the sent error page
- Server->RunDataToDevNull();
- Server->HaveContent = false;
+ Server->RunDataToDevNull(Req);
+ Req.HaveContent = false;
}
- Server->StartPos = Server->TotalFileSize;
- Server->Result = 200;
+ Req.StartPos = Req.TotalFileSize;
+ Req.Result = 200;
}
else if (RemoveFile("server", Queue->DestFile))
{
@@ -430,23 +420,23 @@ ServerMethod::DealWithHeaders(FetchResult &Res)
/* We have a reply we don't handle. This should indicate a perm server
failure */
- if (Server->Result < 200 || Server->Result >= 300)
+ if (Req.Result < 200 || Req.Result >= 300)
{
if (_error->PendingError() == false)
{
std::string err;
- strprintf(err, "HttpError%u", Server->Result);
+ strprintf(err, "HttpError%u", Req.Result);
SetFailReason(err);
- _error->Error("%u %s", Server->Result, Server->Code);
+ _error->Error("%u %s", Req.Result, Req.Code);
}
- if (Server->HaveContent == true)
+ if (Req.HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
// This is some sort of 2xx 'data follows' reply
- Res.LastModified = Server->Date;
- Res.Size = Server->TotalFileSize;
+ Res.LastModified = Req.Date;
+ Res.Size = Req.TotalFileSize;
return FILE_IS_OPEN;
}
/*}}}*/
@@ -605,9 +595,10 @@ int ServerMethod::Loop()
// Fill the pipeline.
Fetch(0);
-
+
+ RequestState Req(this, Server.get());
// Fetch the next URL header data from the server.
- switch (Server->RunHeaders(File, Queue->Uri))
+ switch (Server->RunHeaders(Req, Queue->Uri))
{
case ServerState::RUN_HEADERS_OK:
break;
@@ -646,7 +637,7 @@ int ServerMethod::Loop()
// Decide what to do.
FetchResult Res;
Res.Filename = Queue->DestFile;
- switch (DealWithHeaders(Res))
+ switch (DealWithHeaders(Res, Req))
{
// Ok, the file is Open
case FILE_IS_OPEN:
@@ -660,24 +651,23 @@ int ServerMethod::Loop()
// we could do "Server->MaximumSize = Queue->MaximumSize" here
// but that would break the clever pipeline messup detection
// so instead we use the size of the biggest item in the queue
- Server->MaximumSize = FindMaximumObjectSizeInQueue();
+ Req.MaximumSize = FindMaximumObjectSizeInQueue();
- if (Server->HaveContent)
- Result = Server->RunData(File);
+ if (Req.HaveContent)
+ Result = Server->RunData(Req);
/* If the server is sending back sizeless responses then fill in
the size now */
if (Res.Size == 0)
- Res.Size = File->Size();
-
+ Res.Size = Req.File.Size();
+
// Close the file, destroy the FD object and timestamp it
FailFd = -1;
- delete File;
- File = 0;
-
+ Req.File.Close();
+
// Timestamp
struct timeval times[2];
- times[0].tv_sec = times[1].tv_sec = Server->Date;
+ times[0].tv_sec = times[1].tv_sec = Req.Date;
times[0].tv_usec = times[1].tv_usec = 0;
utimes(Queue->DestFile.c_str(), times);
@@ -758,9 +748,6 @@ int ServerMethod::Loop()
// Hard internal error, kill the connection and fail
case ERROR_NOT_FROM_SERVER:
{
- delete File;
- File = 0;
-
Fail();
RotateDNS();
Server->Close();
@@ -770,7 +757,7 @@ int ServerMethod::Loop()
// We need to flush the data, the header is like a 404 w/ error text
case ERROR_WITH_CONTENT_PAGE:
{
- Server->RunDataToDevNull();
+ Server->RunDataToDevNull(Req);
Fail();
break;
}
@@ -779,8 +766,8 @@ int ServerMethod::Loop()
case TRY_AGAIN_OR_REDIRECT:
{
// Clear rest of response if there is content
- if (Server->HaveContent)
- Server->RunDataToDevNull();
+ if (Req.HaveContent)
+ Server->RunDataToDevNull(Req);
Redirect(NextURI);
break;
}
@@ -805,7 +792,7 @@ unsigned long long ServerMethod::FindMaximumObjectSizeInQueue() const /*{{{*/
}
/*}}}*/
ServerMethod::ServerMethod(std::string &&Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/
- aptMethod(std::move(Binary), Ver, Flags), Server(nullptr), File(NULL), PipelineDepth(10),
+ aptMethod(std::move(Binary), Ver, Flags), Server(nullptr), PipelineDepth(10),
AllowRedirect(false), Debug(false)
{
}
diff --git a/methods/server.h b/methods/server.h
index c3adba87a..6b12c7c7a 100644
--- a/methods/server.h
+++ b/methods/server.h
@@ -12,6 +12,7 @@
#define APT_SERVER_H
#include <apt-pkg/strutl.h>
+#include <apt-pkg/fileutl.h>
#include "aptmethod.h"
#include <time.h>
@@ -24,52 +25,62 @@ using std::endl;
class Hashes;
class ServerMethod;
-class FileFd;
+struct ServerState;
-struct ServerState
+struct RequestState
{
- // This is the last parsed Header Line
- unsigned int Major;
- unsigned int Minor;
- unsigned int Result;
+ unsigned int Major = 0;
+ unsigned int Minor = 0;
+ unsigned int Result = 0;
char Code[360];
- // These are some statistics from the last parsed header lines
-
// total size of the usable content (aka: the file)
- unsigned long long TotalFileSize;
+ unsigned long long TotalFileSize = 0;
// size we actually download (can be smaller than Size if we have partial content)
- unsigned long long DownloadSize;
+ unsigned long long DownloadSize = 0;
// size of junk content (aka: server error pages)
- unsigned long long JunkSize;
+ unsigned long long JunkSize = 0;
// The start of the data (for partial content)
- unsigned long long StartPos;
+ unsigned long long StartPos = 0;
+
+ unsigned long long MaximumSize = 0;
time_t Date;
- bool HaveContent;
- enum {Chunked,Stream,Closes} Encoding;
- enum {Header, Data} State;
+ bool HaveContent = false;
+ enum {Chunked,Stream,Closes} Encoding = Closes;
+ enum {Header, Data} State = Header;
+ std::string Location;
+
+ FileFd File;
+
+ ServerMethod * const Owner;
+ ServerState * const Server;
+
+ bool HeaderLine(std::string const &Line);
+ bool AddPartialFileToHashes(FileFd &File);
+
+ RequestState(ServerMethod * const Owner, ServerState * const Server) :
+ Owner(Owner), Server(Server) { time(&Date); }
+};
+
+struct ServerState
+{
bool Persistent;
bool PipelineAllowed;
bool RangesAllowed;
- std::string Location;
- // This is a Persistent attribute of the server itself.
bool Pipeline;
URI ServerName;
URI Proxy;
unsigned long TimeOut;
- unsigned long long MaximumSize;
-
protected:
ServerMethod *Owner;
virtual bool ReadHeaderLines(std::string &Data) = 0;
- virtual bool LoadNextResponse(bool const ToFile, FileFd * const File) = 0;
+ virtual bool LoadNextResponse(bool const ToFile, RequestState &Req) = 0;
public:
- bool HeaderLine(std::string Line);
/** \brief Result of the header acquire */
enum RunHeadersResult {
@@ -81,25 +92,24 @@ struct ServerState
RUN_HEADERS_PARSE_ERROR
};
/** \brief Get the headers before the data */
- RunHeadersResult RunHeaders(FileFd * const File, const std::string &Uri);
- bool AddPartialFileToHashes(FileFd &File);
+ RunHeadersResult RunHeaders(RequestState &Req, const std::string &Uri);
bool Comp(URI Other) const {return Other.Host == ServerName.Host && Other.Port == ServerName.Port;};
- virtual void Reset(bool const Everything = true);
+ virtual void Reset();
virtual bool WriteResponse(std::string const &Data) = 0;
/** \brief Transfer the data from the socket */
- virtual bool RunData(FileFd * const File) = 0;
- virtual bool RunDataToDevNull() = 0;
+ virtual bool RunData(RequestState &Req) = 0;
+ virtual bool RunDataToDevNull(RequestState &Req) = 0;
virtual bool Open() = 0;
virtual bool IsOpen() = 0;
virtual bool Close() = 0;
virtual bool InitHashes(HashStringList const &ExpectedHashes) = 0;
- virtual Hashes * GetHashes() = 0;
- virtual bool Die(FileFd * const File) = 0;
+ virtual bool Die(RequestState &Req) = 0;
virtual bool Flush(FileFd * const File) = 0;
- virtual bool Go(bool ToFile, FileFd * const File) = 0;
+ virtual bool Go(bool ToFile, RequestState &Req) = 0;
+ virtual Hashes * GetHashes() = 0;
ServerState(URI Srv, ServerMethod *Owner);
virtual ~ServerState() {};
@@ -112,7 +122,6 @@ class ServerMethod : public aptMethod
std::unique_ptr<ServerState> Server;
std::string NextURI;
- FileFd *File;
unsigned long PipelineDepth;
bool AllowRedirect;
@@ -140,7 +149,7 @@ class ServerMethod : public aptMethod
TRY_AGAIN_OR_REDIRECT
};
/** \brief Handle the retrieved header data */
- virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res);
+ virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res, RequestState &Req);
// In the event of a fatal signal this file will be closed and timestamped.
static std::string FailFile;
@@ -148,8 +157,6 @@ class ServerMethod : public aptMethod
static time_t FailTime;
static APT_NORETURN void SigTerm(int);
- virtual bool Flush() { return Server->Flush(File); };
-
int Loop();
virtual void SendReq(FetchItem *Itm) = 0;