summaryrefslogtreecommitdiff
path: root/methods
diff options
context:
space:
mode:
Diffstat (limited to 'methods')
-rw-r--r--methods/bzip2.cc177
-rw-r--r--methods/ftp.cc3
-rw-r--r--methods/ftp.h2
-rw-r--r--methods/gpgv.cc100
-rw-r--r--methods/gzip.cc63
-rw-r--r--methods/http.cc59
-rw-r--r--methods/http.h39
-rw-r--r--methods/makefile20
-rw-r--r--methods/mirror.cc128
-rw-r--r--methods/mirror.h5
-rw-r--r--methods/rred.cc19
-rw-r--r--methods/rsh.cc3
12 files changed, 373 insertions, 245 deletions
diff --git a/methods/bzip2.cc b/methods/bzip2.cc
new file mode 100644
index 000000000..5da214bfc
--- /dev/null
+++ b/methods/bzip2.cc
@@ -0,0 +1,177 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+/* ######################################################################
+
+ Bzip2 method - Take a file URI in and decompress it into the target
+ file.
+
+ While the method is named "bzip2" it handles also other compression
+ types as it calls binaries based on the name of the method,
+ so it can also be used to handle gzip, lzma and others if named
+ correctly.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/acquire-method.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/hashes.h>
+
+#include <sys/stat.h>
+#include <unistd.h>
+#include <utime.h>
+#include <stdio.h>
+#include <errno.h>
+#include <apti18n.h>
+ /*}}}*/
+
+const char *Prog;
+
+class Bzip2Method : public pkgAcqMethod
+{
+ virtual bool Fetch(FetchItem *Itm);
+
+ public:
+
+ Bzip2Method() : pkgAcqMethod("1.1",SingleInstance | SendConfig) {};
+};
+
+
+// Bzip2Method::Fetch - Decompress the passed URI /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool Bzip2Method::Fetch(FetchItem *Itm)
+{
+ URI Get = Itm->Uri;
+ string Path = Get.Host + Get.Path; // To account for relative paths
+
+ string GzPathOption = "Dir::bin::"+string(Prog);
+
+ FetchResult Res;
+ Res.Filename = Itm->DestFile;
+ URIStart(Res);
+
+ // Open the source and destination files
+ FileFd From(Path,FileFd::ReadOnly);
+
+ // if the file is empty, just rename it and return
+ if(From.Size() == 0)
+ {
+ rename(Path.c_str(), Itm->DestFile.c_str());
+ return true;
+ }
+
+ int GzOut[2];
+ if (pipe(GzOut) < 0)
+ return _error->Errno("pipe",_("Couldn't open pipe for %s"),Prog);
+
+ // Fork bzip2
+ pid_t Process = ExecFork();
+ if (Process == 0)
+ {
+ close(GzOut[0]);
+ dup2(From.Fd(),STDIN_FILENO);
+ dup2(GzOut[1],STDOUT_FILENO);
+ From.Close();
+ close(GzOut[1]);
+ SetCloseExec(STDIN_FILENO,false);
+ SetCloseExec(STDOUT_FILENO,false);
+
+ const char *Args[3];
+ string Tmp = _config->Find(GzPathOption,Prog);
+ Args[0] = Tmp.c_str();
+ Args[1] = "-d";
+ Args[2] = 0;
+ execvp(Args[0],(char **)Args);
+ _exit(100);
+ }
+ From.Close();
+ close(GzOut[1]);
+
+ FileFd FromGz(GzOut[0]); // For autoclose
+ FileFd To(Itm->DestFile,FileFd::WriteEmpty);
+ To.EraseOnFailure();
+ if (_error->PendingError() == true)
+ return false;
+
+ // Read data from bzip2, generate checksums and write
+ Hashes Hash;
+ bool Failed = false;
+ while (1)
+ {
+ unsigned char Buffer[4*1024];
+ unsigned long Count;
+
+ Count = read(GzOut[0],Buffer,sizeof(Buffer));
+ if (Count < 0 && errno == EINTR)
+ continue;
+
+ if (Count < 0)
+ {
+ _error->Errno("read", _("Read error from %s process"),Prog);
+ Failed = true;
+ break;
+ }
+
+ if (Count == 0)
+ break;
+
+ Hash.Add(Buffer,Count);
+ if (To.Write(Buffer,Count) == false)
+ {
+ Failed = true;
+ FromGz.Close();
+ break;
+ }
+ }
+
+ // Wait for bzip2 to finish
+ if (ExecWait(Process,_config->Find(GzPathOption,Prog).c_str(),false) == false)
+ {
+ To.OpFail();
+ return false;
+ }
+
+ To.Close();
+
+ if (Failed == true)
+ return false;
+
+ // Transfer the modification times
+ struct stat Buf;
+ if (stat(Path.c_str(),&Buf) != 0)
+ return _error->Errno("stat",_("Failed to stat"));
+
+ struct utimbuf TimeBuf;
+ TimeBuf.actime = Buf.st_atime;
+ TimeBuf.modtime = Buf.st_mtime;
+ if (utime(Itm->DestFile.c_str(),&TimeBuf) != 0)
+ return _error->Errno("utime",_("Failed to set modification time"));
+
+ if (stat(Itm->DestFile.c_str(),&Buf) != 0)
+ return _error->Errno("stat",_("Failed to stat"));
+
+ // Return a Done response
+ Res.LastModified = Buf.st_mtime;
+ Res.Size = Buf.st_size;
+ Res.TakeHashes(Hash);
+
+ URIDone(Res);
+
+ return true;
+}
+ /*}}}*/
+
+int main(int argc, char *argv[])
+{
+ setlocale(LC_ALL, "");
+
+ Bzip2Method Mth;
+
+ Prog = strrchr(argv[0],'/');
+ Prog++;
+
+ return Mth.Run();
+}
diff --git a/methods/ftp.cc b/methods/ftp.cc
index 3e1725823..97248f900 100644
--- a/methods/ftp.cc
+++ b/methods/ftp.cc
@@ -661,8 +661,7 @@ bool FTPConn::ModTime(const char *Path, time_t &Time)
return true;
// Parse it
- StrToTime(Msg,Time);
- return true;
+ return FTPMDTMStrToTime(Msg.c_str(), Time);
}
/*}}}*/
// FTPConn::CreateDataFd - Get a data connection /*{{{*/
diff --git a/methods/ftp.h b/methods/ftp.h
index 1bcea41b6..d7f1f7fbe 100644
--- a/methods/ftp.h
+++ b/methods/ftp.h
@@ -40,7 +40,7 @@ class FTPConn
public:
- bool Comp(URI Other) {return Other.Host == ServerName.Host && Other.Port == ServerName.Port;};
+ bool Comp(URI Other) {return Other.Host == ServerName.Host && Other.Port == ServerName.Port && Other.User == ServerName.User && Other.Password == ServerName.Password; };
// Raw connection IO
bool ReadResp(unsigned int &Ret,string &Text);
diff --git a/methods/gpgv.cc b/methods/gpgv.cc
index c58e6cc45..018e4f622 100644
--- a/methods/gpgv.cc
+++ b/methods/gpgv.cc
@@ -2,6 +2,7 @@
#include <apt-pkg/acquire-method.h>
#include <apt-pkg/strutl.h>
#include <apt-pkg/fileutl.h>
+#include <apt-pkg/indexcopy.h>
#include <apti18n.h>
#include <utime.h>
@@ -12,6 +13,8 @@
#include <iostream>
#include <sstream>
+#include <vector>
+
#define GNUPGPREFIX "[GNUPG:]"
#define GNUPGBADSIG "[GNUPG:] BADSIG"
#define GNUPGNOPUBKEY "[GNUPG:] NO_PUBKEY"
@@ -52,107 +55,29 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
if (Debug == true)
std::clog << "inside VerifyGetSigners" << std::endl;
- pid_t pid;
int fd[2];
- FILE *pipein;
- int status;
- string const gpgvpath = _config->Find("Dir::Bin::gpg", "/usr/bin/gpgv");
- // FIXME: remove support for deprecated APT::GPGV setting
- string const trustedFile = _config->FindFile("Dir::Etc::Trusted",
- _config->Find("APT::GPGV::TrustedKeyring", "/etc/apt/trusted.gpg").c_str());
- string const trustedPath = _config->FindDir("Dir::Etc::TrustedParts", "/etc/apt/trusted.gpg.d");
- if (Debug == true)
- {
- std::clog << "gpgv path: " << gpgvpath << std::endl;
- std::clog << "Keyring file: " << trustedFile << std::endl;
- std::clog << "Keyring path: " << trustedPath << std::endl;
- }
-
- vector<string> keyrings = GetListOfFilesInDir(trustedPath, "gpg", false);
- if (FileExists(trustedFile) == true)
- keyrings.push_back(trustedFile);
-
- if (keyrings.empty() == true)
- {
- // TRANSLATOR: %s is the trusted keyring parts directory
- ioprintf(ret, _("No keyring installed in %s."), trustedPath.c_str());
- return ret.str();
- }
if (pipe(fd) < 0)
return "Couldn't create pipe";
- pid = fork();
+ pid_t pid = fork();
if (pid < 0)
return string("Couldn't spawn new process") + strerror(errno);
else if (pid == 0)
{
- const char *Args[400];
- unsigned int i = 0;
-
- Args[i++] = gpgvpath.c_str();
- Args[i++] = "--status-fd";
- Args[i++] = "3";
- Args[i++] = "--ignore-time-conflict";
- for (vector<string>::const_iterator K = keyrings.begin();
- K != keyrings.end(); ++K)
- {
- Args[i++] = "--keyring";
- Args[i++] = K->c_str();
- // check overflow (minus a bit of extra space at the end)
- if(i >= sizeof(Args)/sizeof(char*)-5) {
- std::clog << _("E: Too many keyrings should be passed to gpgv. Exiting.") << std::endl;
- exit(111);
- }
- }
-
- Configuration::Item const *Opts;
- Opts = _config->Tree("Acquire::gpgv::Options");
- if (Opts != 0)
- {
- Opts = Opts->Child;
- for (; Opts != 0; Opts = Opts->Next)
- {
- if (Opts->Value.empty() == true)
- continue;
- Args[i++] = Opts->Value.c_str();
- // check overflow (minus a bit of extra space at the end)
- if(i >= sizeof(Args)/sizeof(char*)-5) {
- std::clog << _("E: Argument list from Acquire::gpgv::Options too long. Exiting.") << std::endl;
- exit(111);
- }
- }
- }
- Args[i++] = file;
- Args[i++] = outfile;
- Args[i++] = NULL;
-
- if (Debug == true)
+ if (SigVerify::RunGPGV(outfile, file, 3, fd) == false)
{
- std::clog << "Preparing to exec: " << gpgvpath;
- for(unsigned int j=0;Args[j] != NULL; j++)
- std::clog << " " << Args[j];
- std::clog << std::endl;
+ // TRANSLATOR: %s is the trusted keyring parts directory
+ ioprintf(ret, _("No keyring installed in %s."),
+ _config->FindDir("Dir::Etc::TrustedParts", "/etc/apt/trusted.gpg.d").c_str());
+ return ret.str();
}
- int const nullfd = open("/dev/null", O_RDONLY);
- close(fd[0]);
- // Redirect output to /dev/null; we read from the status fd
- dup2(nullfd, STDOUT_FILENO);
- dup2(nullfd, STDERR_FILENO);
- // Redirect the pipe to the status fd (3)
- dup2(fd[1], 3);
-
- putenv((char *)"LANG=");
- putenv((char *)"LC_ALL=");
- putenv((char *)"LC_MESSAGES=");
- execvp(gpgvpath.c_str(), (char **)Args);
-
exit(111);
}
close(fd[1]);
- pipein = fdopen(fd[0], "r");
-
+ FILE *pipein = fdopen(fd[0], "r");
+
// Loop over the output of gpgv, and check the signatures.
size_t buffersize = 64;
char *buffer = (char *) malloc(buffersize);
@@ -225,6 +150,7 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
}
fclose(pipein);
+ int status;
waitpid(pid, &status, 0);
if (Debug == true)
{
@@ -243,7 +169,7 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
}
else if (WEXITSTATUS(status) == 111)
{
- ioprintf(ret, _("Could not execute '%s' to verify signature (is gpgv installed?)"), gpgvpath.c_str());
+ ioprintf(ret, _("Could not execute 'gpgv' to verify signature (is gpgv installed?)"));
return ret.str();
}
else
diff --git a/methods/gzip.cc b/methods/gzip.cc
index f732c0b86..72e3ac909 100644
--- a/methods/gzip.cc
+++ b/methods/gzip.cc
@@ -23,8 +23,6 @@
#include <apti18n.h>
/*}}}*/
-const char *Prog;
-
class GzipMethod : public pkgAcqMethod
{
virtual bool Fetch(FetchItem *Itm);
@@ -43,14 +41,12 @@ bool GzipMethod::Fetch(FetchItem *Itm)
URI Get = Itm->Uri;
string Path = Get.Host + Get.Path; // To account for relative paths
- string GzPathOption = "Dir::bin::"+string(Prog);
-
FetchResult Res;
Res.Filename = Itm->DestFile;
URIStart(Res);
// Open the source and destination files
- FileFd From(Path,FileFd::ReadOnly);
+ FileFd From(Path,FileFd::ReadOnlyGzip);
// if the file is empty, just rename it and return
if(From.Size() == 0)
@@ -59,40 +55,12 @@ bool GzipMethod::Fetch(FetchItem *Itm)
return true;
}
- int GzOut[2];
- if (pipe(GzOut) < 0)
- return _error->Errno("pipe",_("Couldn't open pipe for %s"),Prog);
-
- // Fork gzip
- pid_t Process = ExecFork();
- if (Process == 0)
- {
- close(GzOut[0]);
- dup2(From.Fd(),STDIN_FILENO);
- dup2(GzOut[1],STDOUT_FILENO);
- From.Close();
- close(GzOut[1]);
- SetCloseExec(STDIN_FILENO,false);
- SetCloseExec(STDOUT_FILENO,false);
-
- const char *Args[3];
- string Tmp = _config->Find(GzPathOption,Prog);
- Args[0] = Tmp.c_str();
- Args[1] = "-d";
- Args[2] = 0;
- execvp(Args[0],(char **)Args);
- _exit(100);
- }
- From.Close();
- close(GzOut[1]);
-
- FileFd FromGz(GzOut[0]); // For autoclose
FileFd To(Itm->DestFile,FileFd::WriteEmpty);
To.EraseOnFailure();
if (_error->PendingError() == true)
return false;
- // Read data from gzip, generate checksums and write
+ // Read data from source, generate checksums and write
Hashes Hash;
bool Failed = false;
while (1)
@@ -100,36 +68,23 @@ bool GzipMethod::Fetch(FetchItem *Itm)
unsigned char Buffer[4*1024];
unsigned long Count;
- Count = read(GzOut[0],Buffer,sizeof(Buffer));
- if (Count < 0 && errno == EINTR)
- continue;
-
- if (Count < 0)
+ if (!From.Read(Buffer,sizeof(Buffer),&Count))
{
- _error->Errno("read", _("Read error from %s process"),Prog);
- Failed = true;
- break;
+ To.OpFail();
+ return false;
}
-
if (Count == 0)
break;
-
+
Hash.Add(Buffer,Count);
if (To.Write(Buffer,Count) == false)
{
Failed = true;
- FromGz.Close();
break;
}
}
- // Wait for gzip to finish
- if (ExecWait(Process,_config->Find(GzPathOption,Prog).c_str(),false) == false)
- {
- To.OpFail();
- return false;
- }
-
+ From.Close();
To.Close();
if (Failed == true)
@@ -165,9 +120,5 @@ int main(int argc, char *argv[])
setlocale(LC_ALL, "");
GzipMethod Mth;
-
- Prog = strrchr(argv[0],'/');
- Prog++;
-
return Mth.Run();
}
diff --git a/methods/http.cc b/methods/http.cc
index 904030e0a..9fa74bffa 100644
--- a/methods/http.cc
+++ b/methods/http.cc
@@ -67,7 +67,7 @@ unsigned long CircleBuf::BwReadLimit=0;
unsigned long CircleBuf::BwTickReadData=0;
struct timeval CircleBuf::BwReadTick={0,0};
const unsigned int CircleBuf::BW_HZ=10;
-
+
// CircleBuf::CircleBuf - Circular input buffer /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -378,7 +378,7 @@ bool ServerState::Close()
// ---------------------------------------------------------------------
/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
parse error occurred */
-int ServerState::RunHeaders()
+ServerState::RunHeadersResult ServerState::RunHeaders()
{
State = Header;
@@ -407,7 +407,7 @@ int ServerState::RunHeaders()
string::const_iterator J = I;
for (; J != Data.end() && *J != '\n' && *J != '\r';J++);
if (HeaderLine(string(I,J)) == false)
- return 2;
+ return RUN_HEADERS_PARSE_ERROR;
I = J;
}
@@ -419,11 +419,11 @@ int ServerState::RunHeaders()
if (Encoding == Closes && HaveContent == true)
Persistent = false;
- return 0;
+ return RUN_HEADERS_OK;
}
while (Owner->Go(false,this) == true);
- return 1;
+ return RUN_HEADERS_IO_ERROR;
}
/*}}}*/
// ServerState::RunData - Transfer the data from the socket /*{{{*/
@@ -631,7 +631,7 @@ bool ServerState::HeaderLine(string Line)
if (stringcasecmp(Tag,"Last-Modified:") == 0)
{
- if (StrToTime(Val,Date) == false)
+ if (RFC1123StrToTime(Val.c_str(), Date) == false)
return _error->Error(_("Unknown date format"));
return true;
}
@@ -734,7 +734,7 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out)
Base64Encode(Uri.User + ":" + Uri.Password) + "\r\n";
}
Req += "User-Agent: " + _config->Find("Acquire::http::User-Agent",
- "Ubuntu APT-HTTP/1.3 ("VERSION")") + "\r\n\r\n";
+ "Debian APT-HTTP/1.3 ("VERSION")") + "\r\n\r\n";
if (Debug == true)
cerr << Req << endl;
@@ -914,15 +914,10 @@ bool HttpMethod::ServerDie(ServerState *Srv)
// HttpMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/
// ---------------------------------------------------------------------
/* We look at the header data we got back from the server and decide what
- to do. Returns
- 0 - File is open,
- 1 - IMS hit
- 3 - Unrecoverable error
- 4 - Error with error content page
- 5 - Unrecoverable non-server error (close the connection)
- 6 - Try again with a new or changed URI
+ to do. Returns DealWithHeadersResult (see http.h for details).
*/
-int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
+HttpMethod::DealWithHeadersResult
+HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
{
// Not Modified
if (Srv->Result == 304)
@@ -930,7 +925,7 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
unlink(Queue->DestFile.c_str());
Res.IMSHit = true;
Res.LastModified = Queue->LastModified;
- return 1;
+ return IMS_HIT;
}
/* Redirect
@@ -949,7 +944,7 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
if (!Srv->Location.empty())
{
NextURI = Srv->Location;
- return 6;
+ return TRY_AGAIN_OR_REDIRECT;
}
/* else pass through for error message */
}
@@ -963,8 +958,8 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
SetFailReason(err);
_error->Error("%u %s",Srv->Result,Srv->Code);
if (Srv->HaveContent == true)
- return 4;
- return 3;
+ return ERROR_WITH_CONTENT_PAGE;
+ return ERROR_UNRECOVERABLE;
}
// This is some sort of 2xx 'data follows' reply
@@ -975,7 +970,7 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
delete File;
File = new FileFd(Queue->DestFile,FileFd::WriteAny);
if (_error->PendingError() == true)
- return 5;
+ return ERROR_NOT_FROM_SERVER;
FailFile = Queue->DestFile;
FailFile.c_str(); // Make sure we dont do a malloc in the signal handler
@@ -1003,13 +998,13 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
if (Srv->In.Hash->AddFD(File->Fd(),Srv->StartPos) == false)
{
_error->Errno("read",_("Problem hashing file"));
- return 5;
+ return ERROR_NOT_FROM_SERVER;
}
lseek(File->Fd(),0,SEEK_END);
}
SetNonBlock(File->Fd(),true);
- return 0;
+ return FILE_IS_OPEN;
}
/*}}}*/
// HttpMethod::SigTerm - Handle a fatal signal /*{{{*/
@@ -1037,7 +1032,7 @@ void HttpMethod::SigTerm(int)
depth. */
bool HttpMethod::Fetch(FetchItem *)
{
- if (Server == 0)
+ if (Server == 0)
return true;
// Queue the requests
@@ -1150,11 +1145,11 @@ int HttpMethod::Loop()
// Fetch the next URL header data from the server.
switch (Server->RunHeaders())
{
- case 0:
+ case ServerState::RUN_HEADERS_OK:
break;
// The header data is bad
- case 2:
+ case ServerState::RUN_HEADERS_PARSE_ERROR:
{
_error->Error(_("Bad header data"));
Fail(true);
@@ -1164,7 +1159,7 @@ int HttpMethod::Loop()
// The server closed a connection during the header get..
default:
- case 1:
+ case ServerState::RUN_HEADERS_IO_ERROR:
{
FailCounter++;
_error->Discard();
@@ -1188,7 +1183,7 @@ int HttpMethod::Loop()
switch (DealWithHeaders(Res,Server))
{
// Ok, the file is Open
- case 0:
+ case FILE_IS_OPEN:
{
URIStart(Res);
@@ -1241,21 +1236,21 @@ int HttpMethod::Loop()
}
// IMS hit
- case 1:
+ case IMS_HIT:
{
URIDone(Res);
break;
}
// Hard server error, not found or something
- case 3:
+ case ERROR_UNRECOVERABLE:
{
Fail();
break;
}
// Hard internal error, kill the connection and fail
- case 5:
+ case ERROR_NOT_FROM_SERVER:
{
delete File;
File = 0;
@@ -1267,7 +1262,7 @@ int HttpMethod::Loop()
}
// We need to flush the data, the header is like a 404 w/ error text
- case 4:
+ case ERROR_WITH_CONTENT_PAGE:
{
Fail();
@@ -1280,7 +1275,7 @@ int HttpMethod::Loop()
}
// Try again with a new URL
- case 6:
+ case TRY_AGAIN_OR_REDIRECT:
{
// Clear rest of response if there is content
if (Server->HaveContent)
diff --git a/methods/http.h b/methods/http.h
index bac94e177..0bc019e77 100644
--- a/methods/http.h
+++ b/methods/http.h
@@ -13,7 +13,7 @@
#define MAXLEN 360
-
+#include <apt-pkg/hashes.h>
using std::cout;
using std::endl;
@@ -117,7 +117,19 @@ struct ServerState
void Reset() {Major = 0; Minor = 0; Result = 0; Size = 0; StartPos = 0;
Encoding = Closes; time(&Date); ServerFd = -1;
Pipeline = true;};
- int RunHeaders();
+
+ /** \brief Result of the header acquire */
+ enum RunHeadersResult {
+ /** \brief Header ok */
+ RUN_HEADERS_OK,
+ /** \brief IO error while retrieving */
+ RUN_HEADERS_IO_ERROR,
+ /** \brief Parse error after retrieving */
+ RUN_HEADERS_PARSE_ERROR,
+ };
+ /** \brief Get the headers before the data */
+ RunHeadersResult RunHeaders();
+ /** \brief Transfer the data from the socket */
bool RunData();
bool Open();
@@ -133,7 +145,26 @@ class HttpMethod : public pkgAcqMethod
bool Go(bool ToFile,ServerState *Srv);
bool Flush(ServerState *Srv);
bool ServerDie(ServerState *Srv);
- int DealWithHeaders(FetchResult &Res,ServerState *Srv);
+
+ /** \brief Result of the header parsing */
+ enum DealWithHeadersResult {
+ /** \brief The file is open and ready */
+ FILE_IS_OPEN,
+ /** \brief We got a IMS hit, the file has not changed */
+ IMS_HIT,
+ /** \brief The server reported a unrecoverable error */
+ ERROR_UNRECOVERABLE,
+ /** \brief The server reported a error with a error content page */
+ ERROR_WITH_CONTENT_PAGE,
+ /** \brief A error on the client side */
+ ERROR_NOT_FROM_SERVER,
+ /** \brief A redirect or retry request */
+ TRY_AGAIN_OR_REDIRECT
+ };
+ /** \brief Handle the retrieved header data */
+ DealWithHeadersResult DealWithHeaders(FetchResult &Res,ServerState *Srv);
+
+ /** \brief Try to AutoDetect the proxy */
bool AutoDetectProxy();
virtual bool Configuration(string Message);
@@ -149,7 +180,7 @@ class HttpMethod : public pkgAcqMethod
string NextURI;
string AutoDetectProxyCmd;
-
+
public:
friend class ServerState;
diff --git a/methods/makefile b/methods/makefile
index eabe85cfd..d94a85340 100644
--- a/methods/makefile
+++ b/methods/makefile
@@ -86,9 +86,16 @@ LIB_MAKES = apt-pkg/makefile
SOURCE = mirror.cc http.cc rfc2553emu.cc connect.cc
include $(PROGRAM_H)
-# SSH and bzip2 method symlink
-binary: $(BIN)/ssh $(BIN)/bzip2 $(BIN)/lzma
-veryclean: clean-$(BIN)/ssh clean-$(BIN)/bzip2 clean-$(BIN)/lzma
+# The gzip method
+PROGRAM=bzip2
+SLIBS = -lapt-pkg $(INTLLIBS)
+LIB_MAKES = apt-pkg/makefile
+SOURCE = bzip2.cc
+include $(PROGRAM_H)
+
+# SSH and lzma method symlink
+binary: $(BIN)/ssh $(BIN)/lzma
+veryclean: clean-$(BIN)/ssh clean-$(BIN)/lzma
$(BIN)/ssh:
echo "Installing ssh method link"
@@ -96,13 +103,8 @@ $(BIN)/ssh:
clean-$(BIN)/ssh:
-rm $(BIN)/ssh
-$(BIN)/bzip2:
- echo "Installing bzip2 method link"
- ln -fs gzip $(BIN)/bzip2
$(BIN)/lzma:
echo "Installing lzma method link"
- ln -fs gzip $(BIN)/lzma
-clean-$(BIN)/bzip2:
- -rm $(BIN)/bzip2
+ ln -fs bzip2 $(BIN)/lzma
clean-$(BIN)/lzma:
-rm $(BIN)/lzma
diff --git a/methods/mirror.cc b/methods/mirror.cc
index b3a956b95..e8873d97b 100644
--- a/methods/mirror.cc
+++ b/methods/mirror.cc
@@ -25,6 +25,8 @@
using namespace std;
+#include<sstream>
+
#include "mirror.h"
#include "http.h"
#include "apti18n.h"
@@ -104,7 +106,7 @@ bool MirrorMethod::Clean(string Dir)
for(I=list.begin(); I != list.end(); I++)
{
string uri = (*I)->GetURI();
- if(uri.substr(0,strlen("mirror://")) != string("mirror://"))
+ if(uri.find("mirror://") != 0)
continue;
string BaseUri = uri.substr(0,uri.size()-1);
if (URItoFileName(BaseUri) == Dir->d_name)
@@ -126,28 +128,6 @@ bool MirrorMethod::DownloadMirrorFile(string mirror_uri_str)
if(Debug)
clog << "MirrorMethod::DownloadMirrorFile(): " << endl;
- // check the file, if it is not older than RefreshInterval just use it
- // otherwise try to get a new one
- if(FileExists(MirrorFile))
- {
- struct stat buf;
- time_t t,now,refresh;
- if(stat(MirrorFile.c_str(), &buf) != 0)
- return false;
- t = std::max(buf.st_mtime, buf.st_ctime);
- now = time(NULL);
- refresh = 60*_config->FindI("Acquire::Mirror::RefreshInterval",360);
- if(t + refresh > now)
- {
- if(Debug)
- clog << "Mirror file is in RefreshInterval" << endl;
- DownloadedMirrorFile = true;
- return true;
- }
- if(Debug)
- clog << "Mirror file " << MirrorFile << " older than " << refresh << "min, re-download it" << endl;
- }
-
// not that great to use pkgAcquire here, but we do not have
// any other way right now
string fetch = BaseUri;
@@ -162,7 +142,55 @@ bool MirrorMethod::DownloadMirrorFile(string mirror_uri_str)
return res;
}
-bool MirrorMethod::SelectMirror()
+/* convert a the Queue->Uri back to the mirror base uri and look
+ * at all mirrors we have for this, this is needed as queue->uri
+ * may point to different mirrors (if TryNextMirror() was run)
+ */
+void MirrorMethod::CurrentQueueUriToMirror()
+{
+ // already in mirror:// style so nothing to do
+ if(Queue->Uri.find("mirror://") == 0)
+ return;
+
+ // find current mirror and select next one
+ for (vector<string>::const_iterator mirror = AllMirrors.begin();
+ mirror != AllMirrors.end(); ++mirror)
+ {
+ if (Queue->Uri.find(*mirror) == 0)
+ {
+ Queue->Uri.replace(0, mirror->length(), BaseUri);
+ return;
+ }
+ }
+ _error->Error("Internal error: Failed to convert %s back to %s",
+ Queue->Uri.c_str(), BaseUri.c_str());
+}
+
+bool MirrorMethod::TryNextMirror()
+{
+ // find current mirror and select next one
+ for (vector<string>::const_iterator mirror = AllMirrors.begin();
+ mirror != AllMirrors.end(); ++mirror)
+ {
+ if (Queue->Uri.find(*mirror) != 0)
+ continue;
+
+ vector<string>::const_iterator nextmirror = mirror + 1;
+ if (nextmirror != AllMirrors.end())
+ break;
+ Queue->Uri.replace(0, mirror->length(), *nextmirror);
+ if (Debug)
+ clog << "TryNextMirror: " << Queue->Uri << endl;
+ return true;
+ }
+
+ if (Debug)
+ clog << "TryNextMirror could not find another mirror to try" << endl;
+
+ return false;
+}
+
+bool MirrorMethod::InitMirrors()
{
// if we do not have a MirrorFile, fallback
if(!FileExists(MirrorFile))
@@ -179,10 +207,14 @@ bool MirrorMethod::SelectMirror()
// get into sync issues (got indexfiles from mirror A,
// but packages from mirror B - one might be out of date etc)
ifstream in(MirrorFile.c_str());
- getline(in, Mirror);
- if(Debug)
- cerr << "Using mirror: " << Mirror << endl;
-
+ string s;
+ while (!in.eof())
+ {
+ getline(in, s);
+ if (s.size() > 0)
+ AllMirrors.push_back(s);
+ }
+ Mirror = AllMirrors[0];
UsedMirror = Mirror;
return true;
}
@@ -274,23 +306,20 @@ bool MirrorMethod::Fetch(FetchItem *Itm)
DownloadMirrorFile(Itm->Uri);
}
- if(Mirror.empty()) {
- if(!SelectMirror()) {
+ if(AllMirrors.empty()) {
+ if(!InitMirrors()) {
// no valid mirror selected, something went wrong downloading
// from the master mirror site most likely and there is
// no old mirror file availalbe
return false;
}
}
- if(Debug)
- clog << "selected mirror: " << Mirror << endl;
+ if(Itm->Uri.find("mirror://") != string::npos)
+ Itm->Uri.replace(0,BaseUri.size(), Mirror);
- for (FetchItem *I = Queue; I != 0; I = I->Next)
- {
- if(I->Uri.find("mirror://") != string::npos)
- I->Uri.replace(0,BaseUri.size(), Mirror);
- }
+ if(Debug)
+ clog << "Fetch: " << Itm->Uri << endl << endl;
// now run the real fetcher
return HttpMethod::Fetch(Itm);
@@ -298,22 +327,35 @@ bool MirrorMethod::Fetch(FetchItem *Itm)
void MirrorMethod::Fail(string Err,bool Transient)
{
- if(Queue->Uri.find("http://") != string::npos)
- Queue->Uri.replace(0,Mirror.size(), BaseUri);
+ // FIXME: TryNextMirror is not ideal for indexfile as we may
+ // run into auth issues
+
+ if (Debug)
+ clog << "Failure to get " << Queue->Uri << endl;
+
+ // try the next mirror on fail (if its not a expected failure,
+ // e.g. translations are ok to ignore)
+ if (!Queue->FailIgnore && TryNextMirror())
+ return;
+
+ // all mirrors failed, so bail out
+ string s;
+ strprintf(s, _("[Mirror: %s]"), Mirror.c_str());
+ SetIP(s);
+
+ CurrentQueueUriToMirror();
pkgAcqMethod::Fail(Err, Transient);
}
void MirrorMethod::URIStart(FetchResult &Res)
{
- if(Queue->Uri.find("http://") != string::npos)
- Queue->Uri.replace(0,Mirror.size(), BaseUri);
+ CurrentQueueUriToMirror();
pkgAcqMethod::URIStart(Res);
}
void MirrorMethod::URIDone(FetchResult &Res,FetchResult *Alt)
{
- if(Queue->Uri.find("http://") != string::npos)
- Queue->Uri.replace(0,Mirror.size(), BaseUri);
+ CurrentQueueUriToMirror();
pkgAcqMethod::URIDone(Res, Alt);
}
diff --git a/methods/mirror.h b/methods/mirror.h
index ed817806b..0a3ea6e92 100644
--- a/methods/mirror.h
+++ b/methods/mirror.h
@@ -26,6 +26,7 @@ class MirrorMethod : public HttpMethod
// we simply transform between BaseUri and Mirror
string BaseUri; // the original mirror://... url
string Mirror; // the selected mirror uri (http://...)
+ vector<string> AllMirrors; // all available mirrors
string MirrorFile; // the file that contains the list of mirrors
bool DownloadedMirrorFile; // already downloaded this session
@@ -34,7 +35,9 @@ class MirrorMethod : public HttpMethod
protected:
bool DownloadMirrorFile(string uri);
string GetMirrorFileName(string uri);
- bool SelectMirror();
+ bool InitMirrors();
+ bool TryNextMirror();
+ void CurrentQueueUriToMirror();
bool Clean(string dir);
// we need to overwrite those to transform the url back
diff --git a/methods/rred.cc b/methods/rred.cc
index 262c78cab..f42c7a072 100644
--- a/methods/rred.cc
+++ b/methods/rred.cc
@@ -477,23 +477,26 @@ bool RredMethod::Fetch(FetchItem *Itm) /*{{{*/
Patch.Close();
To.Close();
- // Transfer the modification times
- struct stat Buf;
- if (stat(Path.c_str(),&Buf) != 0)
+ /* Transfer the modification times from the patch file
+ to be able to see in which state the file should be
+ and use the access time from the "old" file */
+ struct stat BufBase, BufPatch;
+ if (stat(Path.c_str(),&BufBase) != 0 ||
+ stat(string(Path+".ed").c_str(),&BufPatch) != 0)
return _error->Errno("stat",_("Failed to stat"));
struct utimbuf TimeBuf;
- TimeBuf.actime = Buf.st_atime;
- TimeBuf.modtime = Buf.st_mtime;
+ TimeBuf.actime = BufBase.st_atime;
+ TimeBuf.modtime = BufPatch.st_mtime;
if (utime(Itm->DestFile.c_str(),&TimeBuf) != 0)
return _error->Errno("utime",_("Failed to set modification time"));
- if (stat(Itm->DestFile.c_str(),&Buf) != 0)
+ if (stat(Itm->DestFile.c_str(),&BufBase) != 0)
return _error->Errno("stat",_("Failed to stat"));
// return done
- Res.LastModified = Buf.st_mtime;
- Res.Size = Buf.st_size;
+ Res.LastModified = BufBase.st_mtime;
+ Res.Size = BufBase.st_size;
Res.TakeHashes(Hash);
URIDone(Res);
diff --git a/methods/rsh.cc b/methods/rsh.cc
index f0ccfc42d..97b4ef151 100644
--- a/methods/rsh.cc
+++ b/methods/rsh.cc
@@ -278,8 +278,7 @@ bool RSHConn::ModTime(const char *Path, time_t &Time)
return false;
// Parse it
- StrToTime(Msg,Time);
- return true;
+ return FTPMDTMStrToTime(Msg.c_str(), Time);
}
/*}}}*/
// RSHConn::Get - Get a file /*{{{*/