summaryrefslogtreecommitdiff
path: root/methods
diff options
context:
space:
mode:
Diffstat (limited to 'methods')
-rw-r--r--methods/bzip2.cc177
-rw-r--r--methods/connect.cc23
-rw-r--r--methods/copy.cc1
-rw-r--r--methods/ftp.cc3
-rw-r--r--methods/ftp.h2
-rw-r--r--methods/gpgv.cc100
-rw-r--r--methods/gzip.cc63
-rw-r--r--methods/http.cc69
-rw-r--r--methods/http.h43
-rw-r--r--methods/http_main.cc20
-rw-r--r--methods/makefile30
-rw-r--r--methods/mirror.cc372
-rw-r--r--methods/mirror.h55
-rw-r--r--methods/rred.cc19
-rw-r--r--methods/rsh.cc3
15 files changed, 756 insertions, 224 deletions
diff --git a/methods/bzip2.cc b/methods/bzip2.cc
new file mode 100644
index 000000000..5da214bfc
--- /dev/null
+++ b/methods/bzip2.cc
@@ -0,0 +1,177 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+/* ######################################################################
+
+ Bzip2 method - Take a file URI in and decompress it into the target
+ file.
+
+ While the method is named "bzip2" it handles also other compression
+ types as it calls binaries based on the name of the method,
+ so it can also be used to handle gzip, lzma and others if named
+ correctly.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/acquire-method.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/hashes.h>
+
+#include <sys/stat.h>
+#include <unistd.h>
+#include <utime.h>
+#include <stdio.h>
+#include <errno.h>
+#include <apti18n.h>
+ /*}}}*/
+
+const char *Prog;
+
+class Bzip2Method : public pkgAcqMethod
+{
+ virtual bool Fetch(FetchItem *Itm);
+
+ public:
+
+ Bzip2Method() : pkgAcqMethod("1.1",SingleInstance | SendConfig) {};
+};
+
+
+// Bzip2Method::Fetch - Decompress the passed URI /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool Bzip2Method::Fetch(FetchItem *Itm)
+{
+ URI Get = Itm->Uri;
+ string Path = Get.Host + Get.Path; // To account for relative paths
+
+ string GzPathOption = "Dir::bin::"+string(Prog);
+
+ FetchResult Res;
+ Res.Filename = Itm->DestFile;
+ URIStart(Res);
+
+ // Open the source and destination files
+ FileFd From(Path,FileFd::ReadOnly);
+
+ // if the file is empty, just rename it and return
+ if(From.Size() == 0)
+ {
+ rename(Path.c_str(), Itm->DestFile.c_str());
+ return true;
+ }
+
+ int GzOut[2];
+ if (pipe(GzOut) < 0)
+ return _error->Errno("pipe",_("Couldn't open pipe for %s"),Prog);
+
+ // Fork bzip2
+ pid_t Process = ExecFork();
+ if (Process == 0)
+ {
+ close(GzOut[0]);
+ dup2(From.Fd(),STDIN_FILENO);
+ dup2(GzOut[1],STDOUT_FILENO);
+ From.Close();
+ close(GzOut[1]);
+ SetCloseExec(STDIN_FILENO,false);
+ SetCloseExec(STDOUT_FILENO,false);
+
+ const char *Args[3];
+ string Tmp = _config->Find(GzPathOption,Prog);
+ Args[0] = Tmp.c_str();
+ Args[1] = "-d";
+ Args[2] = 0;
+ execvp(Args[0],(char **)Args);
+ _exit(100);
+ }
+ From.Close();
+ close(GzOut[1]);
+
+ FileFd FromGz(GzOut[0]); // For autoclose
+ FileFd To(Itm->DestFile,FileFd::WriteEmpty);
+ To.EraseOnFailure();
+ if (_error->PendingError() == true)
+ return false;
+
+ // Read data from bzip2, generate checksums and write
+ Hashes Hash;
+ bool Failed = false;
+ while (1)
+ {
+ unsigned char Buffer[4*1024];
+ unsigned long Count;
+
+ Count = read(GzOut[0],Buffer,sizeof(Buffer));
+ if (Count < 0 && errno == EINTR)
+ continue;
+
+ if (Count < 0)
+ {
+ _error->Errno("read", _("Read error from %s process"),Prog);
+ Failed = true;
+ break;
+ }
+
+ if (Count == 0)
+ break;
+
+ Hash.Add(Buffer,Count);
+ if (To.Write(Buffer,Count) == false)
+ {
+ Failed = true;
+ FromGz.Close();
+ break;
+ }
+ }
+
+ // Wait for bzip2 to finish
+ if (ExecWait(Process,_config->Find(GzPathOption,Prog).c_str(),false) == false)
+ {
+ To.OpFail();
+ return false;
+ }
+
+ To.Close();
+
+ if (Failed == true)
+ return false;
+
+ // Transfer the modification times
+ struct stat Buf;
+ if (stat(Path.c_str(),&Buf) != 0)
+ return _error->Errno("stat",_("Failed to stat"));
+
+ struct utimbuf TimeBuf;
+ TimeBuf.actime = Buf.st_atime;
+ TimeBuf.modtime = Buf.st_mtime;
+ if (utime(Itm->DestFile.c_str(),&TimeBuf) != 0)
+ return _error->Errno("utime",_("Failed to set modification time"));
+
+ if (stat(Itm->DestFile.c_str(),&Buf) != 0)
+ return _error->Errno("stat",_("Failed to stat"));
+
+ // Return a Done response
+ Res.LastModified = Buf.st_mtime;
+ Res.Size = Buf.st_size;
+ Res.TakeHashes(Hash);
+
+ URIDone(Res);
+
+ return true;
+}
+ /*}}}*/
+
+int main(int argc, char *argv[])
+{
+ setlocale(LC_ALL, "");
+
+ Bzip2Method Mth;
+
+ Prog = strrchr(argv[0],'/');
+ Prog++;
+
+ return Mth.Run();
+}
diff --git a/methods/connect.cc b/methods/connect.cc
index 2f6b4833e..a5af1f1a6 100644
--- a/methods/connect.cc
+++ b/methods/connect.cc
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
+#include <sstream>
#include<set>
#include<string>
@@ -70,19 +71,17 @@ static bool DoConnect(struct addrinfo *Addr,string Host,
Owner->Status(_("Connecting to %s (%s)"),Host.c_str(),Name);
// if that addr did timeout before, we do not try it again
- if(bad_addr.find(string(Name)) != bad_addr.end())
+ if(bad_addr.find(string(Name)) != bad_addr.end())
return false;
/* If this is an IP rotation store the IP we are using.. If something goes
wrong this will get tacked onto the end of the error message */
if (LastHostAddr->ai_next != 0)
{
- char Name2[NI_MAXHOST + NI_MAXSERV + 10];
- snprintf(Name2,sizeof(Name2),_("[IP: %s %s]"),Name,Service);
- Owner->SetFailExtraMsg(string(Name2));
- }
- else
- Owner->SetFailExtraMsg("");
+ std::stringstream ss;
+ ioprintf(ss, _("[IP: %s %s]"),Name,Service);
+ Owner->SetIP(ss.str());
+ }
// Get a socket
if ((Fd = socket(Addr->ai_family,Addr->ai_socktype,
@@ -100,7 +99,7 @@ static bool DoConnect(struct addrinfo *Addr,string Host,
nonblocking */
if (WaitFd(Fd,true,TimeOut) == false) {
bad_addr.insert(bad_addr.begin(), string(Name));
- Owner->SetFailExtraMsg("\nFailReason: Timeout");
+ Owner->SetFailReason("Timeout");
return _error->Error(_("Could not connect to %s:%s (%s), "
"connection timed out"),Host.c_str(),Service,Name);
}
@@ -115,9 +114,9 @@ static bool DoConnect(struct addrinfo *Addr,string Host,
{
errno = Err;
if(errno == ECONNREFUSED)
- Owner->SetFailExtraMsg("\nFailReason: ConnectionRefused");
+ Owner->SetFailReason("ConnectionRefused");
else if (errno == ETIMEDOUT)
- Owner->SetFailExtraMsg("\nFailReason: ConnectionTimedOut");
+ Owner->SetFailReason("ConnectionTimedOut");
bad_addr.insert(bad_addr.begin(), string(Name));
return _error->Errno("connect",_("Could not connect to %s:%s (%s)."),Host.c_str(),
Service,Name);
@@ -184,13 +183,13 @@ bool Connect(string Host,int Port,const char *Service,int DefPort,int &Fd,
continue;
}
bad_addr.insert(bad_addr.begin(), Host);
- Owner->SetFailExtraMsg("\nFailReason: ResolveFailure");
+ Owner->SetFailReason("ResolveFailure");
return _error->Error(_("Could not resolve '%s'"),Host.c_str());
}
if (Res == EAI_AGAIN)
{
- Owner->SetFailExtraMsg("\nFailReason: TmpResolveFailure");
+ Owner->SetFailReason("TmpResolveFailure");
return _error->Error(_("Temporary failure resolving '%s'"),
Host.c_str());
}
diff --git a/methods/copy.cc b/methods/copy.cc
index 72896b4c0..027b59f46 100644
--- a/methods/copy.cc
+++ b/methods/copy.cc
@@ -84,6 +84,7 @@ bool CopyMethod::Fetch(FetchItem *Itm)
FileFd Fd(Res.Filename, FileFd::ReadOnly);
Hash.AddFD(Fd.Fd(), Fd.Size());
Res.TakeHashes(Hash);
+
URIDone(Res);
return true;
}
diff --git a/methods/ftp.cc b/methods/ftp.cc
index 3e1725823..97248f900 100644
--- a/methods/ftp.cc
+++ b/methods/ftp.cc
@@ -661,8 +661,7 @@ bool FTPConn::ModTime(const char *Path, time_t &Time)
return true;
// Parse it
- StrToTime(Msg,Time);
- return true;
+ return FTPMDTMStrToTime(Msg.c_str(), Time);
}
/*}}}*/
// FTPConn::CreateDataFd - Get a data connection /*{{{*/
diff --git a/methods/ftp.h b/methods/ftp.h
index 1bcea41b6..d7f1f7fbe 100644
--- a/methods/ftp.h
+++ b/methods/ftp.h
@@ -40,7 +40,7 @@ class FTPConn
public:
- bool Comp(URI Other) {return Other.Host == ServerName.Host && Other.Port == ServerName.Port;};
+ bool Comp(URI Other) {return Other.Host == ServerName.Host && Other.Port == ServerName.Port && Other.User == ServerName.User && Other.Password == ServerName.Password; };
// Raw connection IO
bool ReadResp(unsigned int &Ret,string &Text);
diff --git a/methods/gpgv.cc b/methods/gpgv.cc
index c58e6cc45..018e4f622 100644
--- a/methods/gpgv.cc
+++ b/methods/gpgv.cc
@@ -2,6 +2,7 @@
#include <apt-pkg/acquire-method.h>
#include <apt-pkg/strutl.h>
#include <apt-pkg/fileutl.h>
+#include <apt-pkg/indexcopy.h>
#include <apti18n.h>
#include <utime.h>
@@ -12,6 +13,8 @@
#include <iostream>
#include <sstream>
+#include <vector>
+
#define GNUPGPREFIX "[GNUPG:]"
#define GNUPGBADSIG "[GNUPG:] BADSIG"
#define GNUPGNOPUBKEY "[GNUPG:] NO_PUBKEY"
@@ -52,107 +55,29 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
if (Debug == true)
std::clog << "inside VerifyGetSigners" << std::endl;
- pid_t pid;
int fd[2];
- FILE *pipein;
- int status;
- string const gpgvpath = _config->Find("Dir::Bin::gpg", "/usr/bin/gpgv");
- // FIXME: remove support for deprecated APT::GPGV setting
- string const trustedFile = _config->FindFile("Dir::Etc::Trusted",
- _config->Find("APT::GPGV::TrustedKeyring", "/etc/apt/trusted.gpg").c_str());
- string const trustedPath = _config->FindDir("Dir::Etc::TrustedParts", "/etc/apt/trusted.gpg.d");
- if (Debug == true)
- {
- std::clog << "gpgv path: " << gpgvpath << std::endl;
- std::clog << "Keyring file: " << trustedFile << std::endl;
- std::clog << "Keyring path: " << trustedPath << std::endl;
- }
-
- vector<string> keyrings = GetListOfFilesInDir(trustedPath, "gpg", false);
- if (FileExists(trustedFile) == true)
- keyrings.push_back(trustedFile);
-
- if (keyrings.empty() == true)
- {
- // TRANSLATOR: %s is the trusted keyring parts directory
- ioprintf(ret, _("No keyring installed in %s."), trustedPath.c_str());
- return ret.str();
- }
if (pipe(fd) < 0)
return "Couldn't create pipe";
- pid = fork();
+ pid_t pid = fork();
if (pid < 0)
return string("Couldn't spawn new process") + strerror(errno);
else if (pid == 0)
{
- const char *Args[400];
- unsigned int i = 0;
-
- Args[i++] = gpgvpath.c_str();
- Args[i++] = "--status-fd";
- Args[i++] = "3";
- Args[i++] = "--ignore-time-conflict";
- for (vector<string>::const_iterator K = keyrings.begin();
- K != keyrings.end(); ++K)
- {
- Args[i++] = "--keyring";
- Args[i++] = K->c_str();
- // check overflow (minus a bit of extra space at the end)
- if(i >= sizeof(Args)/sizeof(char*)-5) {
- std::clog << _("E: Too many keyrings should be passed to gpgv. Exiting.") << std::endl;
- exit(111);
- }
- }
-
- Configuration::Item const *Opts;
- Opts = _config->Tree("Acquire::gpgv::Options");
- if (Opts != 0)
- {
- Opts = Opts->Child;
- for (; Opts != 0; Opts = Opts->Next)
- {
- if (Opts->Value.empty() == true)
- continue;
- Args[i++] = Opts->Value.c_str();
- // check overflow (minus a bit of extra space at the end)
- if(i >= sizeof(Args)/sizeof(char*)-5) {
- std::clog << _("E: Argument list from Acquire::gpgv::Options too long. Exiting.") << std::endl;
- exit(111);
- }
- }
- }
- Args[i++] = file;
- Args[i++] = outfile;
- Args[i++] = NULL;
-
- if (Debug == true)
+ if (SigVerify::RunGPGV(outfile, file, 3, fd) == false)
{
- std::clog << "Preparing to exec: " << gpgvpath;
- for(unsigned int j=0;Args[j] != NULL; j++)
- std::clog << " " << Args[j];
- std::clog << std::endl;
+ // TRANSLATOR: %s is the trusted keyring parts directory
+ ioprintf(ret, _("No keyring installed in %s."),
+ _config->FindDir("Dir::Etc::TrustedParts", "/etc/apt/trusted.gpg.d").c_str());
+ return ret.str();
}
- int const nullfd = open("/dev/null", O_RDONLY);
- close(fd[0]);
- // Redirect output to /dev/null; we read from the status fd
- dup2(nullfd, STDOUT_FILENO);
- dup2(nullfd, STDERR_FILENO);
- // Redirect the pipe to the status fd (3)
- dup2(fd[1], 3);
-
- putenv((char *)"LANG=");
- putenv((char *)"LC_ALL=");
- putenv((char *)"LC_MESSAGES=");
- execvp(gpgvpath.c_str(), (char **)Args);
-
exit(111);
}
close(fd[1]);
- pipein = fdopen(fd[0], "r");
-
+ FILE *pipein = fdopen(fd[0], "r");
+
// Loop over the output of gpgv, and check the signatures.
size_t buffersize = 64;
char *buffer = (char *) malloc(buffersize);
@@ -225,6 +150,7 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
}
fclose(pipein);
+ int status;
waitpid(pid, &status, 0);
if (Debug == true)
{
@@ -243,7 +169,7 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
}
else if (WEXITSTATUS(status) == 111)
{
- ioprintf(ret, _("Could not execute '%s' to verify signature (is gpgv installed?)"), gpgvpath.c_str());
+ ioprintf(ret, _("Could not execute 'gpgv' to verify signature (is gpgv installed?)"));
return ret.str();
}
else
diff --git a/methods/gzip.cc b/methods/gzip.cc
index f732c0b86..72e3ac909 100644
--- a/methods/gzip.cc
+++ b/methods/gzip.cc
@@ -23,8 +23,6 @@
#include <apti18n.h>
/*}}}*/
-const char *Prog;
-
class GzipMethod : public pkgAcqMethod
{
virtual bool Fetch(FetchItem *Itm);
@@ -43,14 +41,12 @@ bool GzipMethod::Fetch(FetchItem *Itm)
URI Get = Itm->Uri;
string Path = Get.Host + Get.Path; // To account for relative paths
- string GzPathOption = "Dir::bin::"+string(Prog);
-
FetchResult Res;
Res.Filename = Itm->DestFile;
URIStart(Res);
// Open the source and destination files
- FileFd From(Path,FileFd::ReadOnly);
+ FileFd From(Path,FileFd::ReadOnlyGzip);
// if the file is empty, just rename it and return
if(From.Size() == 0)
@@ -59,40 +55,12 @@ bool GzipMethod::Fetch(FetchItem *Itm)
return true;
}
- int GzOut[2];
- if (pipe(GzOut) < 0)
- return _error->Errno("pipe",_("Couldn't open pipe for %s"),Prog);
-
- // Fork gzip
- pid_t Process = ExecFork();
- if (Process == 0)
- {
- close(GzOut[0]);
- dup2(From.Fd(),STDIN_FILENO);
- dup2(GzOut[1],STDOUT_FILENO);
- From.Close();
- close(GzOut[1]);
- SetCloseExec(STDIN_FILENO,false);
- SetCloseExec(STDOUT_FILENO,false);
-
- const char *Args[3];
- string Tmp = _config->Find(GzPathOption,Prog);
- Args[0] = Tmp.c_str();
- Args[1] = "-d";
- Args[2] = 0;
- execvp(Args[0],(char **)Args);
- _exit(100);
- }
- From.Close();
- close(GzOut[1]);
-
- FileFd FromGz(GzOut[0]); // For autoclose
FileFd To(Itm->DestFile,FileFd::WriteEmpty);
To.EraseOnFailure();
if (_error->PendingError() == true)
return false;
- // Read data from gzip, generate checksums and write
+ // Read data from source, generate checksums and write
Hashes Hash;
bool Failed = false;
while (1)
@@ -100,36 +68,23 @@ bool GzipMethod::Fetch(FetchItem *Itm)
unsigned char Buffer[4*1024];
unsigned long Count;
- Count = read(GzOut[0],Buffer,sizeof(Buffer));
- if (Count < 0 && errno == EINTR)
- continue;
-
- if (Count < 0)
+ if (!From.Read(Buffer,sizeof(Buffer),&Count))
{
- _error->Errno("read", _("Read error from %s process"),Prog);
- Failed = true;
- break;
+ To.OpFail();
+ return false;
}
-
if (Count == 0)
break;
-
+
Hash.Add(Buffer,Count);
if (To.Write(Buffer,Count) == false)
{
Failed = true;
- FromGz.Close();
break;
}
}
- // Wait for gzip to finish
- if (ExecWait(Process,_config->Find(GzPathOption,Prog).c_str(),false) == false)
- {
- To.OpFail();
- return false;
- }
-
+ From.Close();
To.Close();
if (Failed == true)
@@ -165,9 +120,5 @@ int main(int argc, char *argv[])
setlocale(LC_ALL, "");
GzipMethod Mth;
-
- Prog = strrchr(argv[0],'/');
- Prog++;
-
return Mth.Run();
}
diff --git a/methods/http.cc b/methods/http.cc
index b05444691..9fa74bffa 100644
--- a/methods/http.cc
+++ b/methods/http.cc
@@ -67,7 +67,7 @@ unsigned long CircleBuf::BwReadLimit=0;
unsigned long CircleBuf::BwTickReadData=0;
struct timeval CircleBuf::BwReadTick={0,0};
const unsigned int CircleBuf::BW_HZ=10;
-
+
// CircleBuf::CircleBuf - Circular input buffer /*{{{*/
// ---------------------------------------------------------------------
/* */
@@ -378,7 +378,7 @@ bool ServerState::Close()
// ---------------------------------------------------------------------
/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
parse error occurred */
-int ServerState::RunHeaders()
+ServerState::RunHeadersResult ServerState::RunHeaders()
{
State = Header;
@@ -407,7 +407,7 @@ int ServerState::RunHeaders()
string::const_iterator J = I;
for (; J != Data.end() && *J != '\n' && *J != '\r';J++);
if (HeaderLine(string(I,J)) == false)
- return 2;
+ return RUN_HEADERS_PARSE_ERROR;
I = J;
}
@@ -419,11 +419,11 @@ int ServerState::RunHeaders()
if (Encoding == Closes && HaveContent == true)
Persistent = false;
- return 0;
+ return RUN_HEADERS_OK;
}
while (Owner->Go(false,this) == true);
- return 1;
+ return RUN_HEADERS_IO_ERROR;
}
/*}}}*/
// ServerState::RunData - Transfer the data from the socket /*{{{*/
@@ -631,7 +631,7 @@ bool ServerState::HeaderLine(string Line)
if (stringcasecmp(Tag,"Last-Modified:") == 0)
{
- if (StrToTime(Val,Date) == false)
+ if (RFC1123StrToTime(Val.c_str(), Date) == false)
return _error->Error(_("Unknown date format"));
return true;
}
@@ -914,15 +914,10 @@ bool HttpMethod::ServerDie(ServerState *Srv)
// HttpMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/
// ---------------------------------------------------------------------
/* We look at the header data we got back from the server and decide what
- to do. Returns
- 0 - File is open,
- 1 - IMS hit
- 3 - Unrecoverable error
- 4 - Error with error content page
- 5 - Unrecoverable non-server error (close the connection)
- 6 - Try again with a new or changed URI
+ to do. Returns DealWithHeadersResult (see http.h for details).
*/
-int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
+HttpMethod::DealWithHeadersResult
+HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
{
// Not Modified
if (Srv->Result == 304)
@@ -930,7 +925,7 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
unlink(Queue->DestFile.c_str());
Res.IMSHit = true;
Res.LastModified = Queue->LastModified;
- return 1;
+ return IMS_HIT;
}
/* Redirect
@@ -949,7 +944,7 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
if (!Srv->Location.empty())
{
NextURI = Srv->Location;
- return 6;
+ return TRY_AGAIN_OR_REDIRECT;
}
/* else pass through for error message */
}
@@ -958,10 +953,13 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
failure */
if (Srv->Result < 200 || Srv->Result >= 300)
{
+ char err[255];
+ snprintf(err,sizeof(err)-1,"HttpError%i",Srv->Result);
+ SetFailReason(err);
_error->Error("%u %s",Srv->Result,Srv->Code);
if (Srv->HaveContent == true)
- return 4;
- return 3;
+ return ERROR_WITH_CONTENT_PAGE;
+ return ERROR_UNRECOVERABLE;
}
// This is some sort of 2xx 'data follows' reply
@@ -972,7 +970,7 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
delete File;
File = new FileFd(Queue->DestFile,FileFd::WriteAny);
if (_error->PendingError() == true)
- return 5;
+ return ERROR_NOT_FROM_SERVER;
FailFile = Queue->DestFile;
FailFile.c_str(); // Make sure we dont do a malloc in the signal handler
@@ -1000,13 +998,13 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
if (Srv->In.Hash->AddFD(File->Fd(),Srv->StartPos) == false)
{
_error->Errno("read",_("Problem hashing file"));
- return 5;
+ return ERROR_NOT_FROM_SERVER;
}
lseek(File->Fd(),0,SEEK_END);
}
SetNonBlock(File->Fd(),true);
- return 0;
+ return FILE_IS_OPEN;
}
/*}}}*/
// HttpMethod::SigTerm - Handle a fatal signal /*{{{*/
@@ -1147,11 +1145,11 @@ int HttpMethod::Loop()
// Fetch the next URL header data from the server.
switch (Server->RunHeaders())
{
- case 0:
+ case ServerState::RUN_HEADERS_OK:
break;
// The header data is bad
- case 2:
+ case ServerState::RUN_HEADERS_PARSE_ERROR:
{
_error->Error(_("Bad header data"));
Fail(true);
@@ -1161,7 +1159,7 @@ int HttpMethod::Loop()
// The server closed a connection during the header get..
default:
- case 1:
+ case ServerState::RUN_HEADERS_IO_ERROR:
{
FailCounter++;
_error->Discard();
@@ -1185,7 +1183,7 @@ int HttpMethod::Loop()
switch (DealWithHeaders(Res,Server))
{
// Ok, the file is Open
- case 0:
+ case FILE_IS_OPEN:
{
URIStart(Res);
@@ -1238,21 +1236,21 @@ int HttpMethod::Loop()
}
// IMS hit
- case 1:
+ case IMS_HIT:
{
URIDone(Res);
break;
}
// Hard server error, not found or something
- case 3:
+ case ERROR_UNRECOVERABLE:
{
Fail();
break;
}
// Hard internal error, kill the connection and fail
- case 5:
+ case ERROR_NOT_FROM_SERVER:
{
delete File;
File = 0;
@@ -1264,7 +1262,7 @@ int HttpMethod::Loop()
}
// We need to flush the data, the header is like a 404 w/ error text
- case 4:
+ case ERROR_WITH_CONTENT_PAGE:
{
Fail();
@@ -1277,7 +1275,7 @@ int HttpMethod::Loop()
}
// Try again with a new URL
- case 6:
+ case TRY_AGAIN_OR_REDIRECT:
{
// Clear rest of response if there is content
if (Server->HaveContent)
@@ -1371,15 +1369,4 @@ bool HttpMethod::AutoDetectProxy()
}
/*}}}*/
-int main()
-{
- setlocale(LC_ALL, "");
- // ignore SIGPIPE, this can happen on write() if the socket
- // closes the connection (this is dealt with via ServerDie())
- signal(SIGPIPE, SIG_IGN);
-
- HttpMethod Mth;
- return Mth.Loop();
-}
-
diff --git a/methods/http.h b/methods/http.h
index ceee36cbe..0bc019e77 100644
--- a/methods/http.h
+++ b/methods/http.h
@@ -13,7 +13,7 @@
#define MAXLEN 360
-#include <iostream>
+#include <apt-pkg/hashes.h>
using std::cout;
using std::endl;
@@ -117,7 +117,19 @@ struct ServerState
void Reset() {Major = 0; Minor = 0; Result = 0; Size = 0; StartPos = 0;
Encoding = Closes; time(&Date); ServerFd = -1;
Pipeline = true;};
- int RunHeaders();
+
+ /** \brief Result of the header acquire */
+ enum RunHeadersResult {
+ /** \brief Header ok */
+ RUN_HEADERS_OK,
+ /** \brief IO error while retrieving */
+ RUN_HEADERS_IO_ERROR,
+ /** \brief Parse error after retrieving */
+ RUN_HEADERS_PARSE_ERROR,
+ };
+ /** \brief Get the headers before the data */
+ RunHeadersResult RunHeaders();
+ /** \brief Transfer the data from the socket */
bool RunData();
bool Open();
@@ -133,10 +145,28 @@ class HttpMethod : public pkgAcqMethod
bool Go(bool ToFile,ServerState *Srv);
bool Flush(ServerState *Srv);
bool ServerDie(ServerState *Srv);
- int DealWithHeaders(FetchResult &Res,ServerState *Srv);
+
+ /** \brief Result of the header parsing */
+ enum DealWithHeadersResult {
+ /** \brief The file is open and ready */
+ FILE_IS_OPEN,
+ /** \brief We got a IMS hit, the file has not changed */
+ IMS_HIT,
+ /** \brief The server reported a unrecoverable error */
+ ERROR_UNRECOVERABLE,
+ /** \brief The server reported a error with a error content page */
+ ERROR_WITH_CONTENT_PAGE,
+ /** \brief A error on the client side */
+ ERROR_NOT_FROM_SERVER,
+ /** \brief A redirect or retry request */
+ TRY_AGAIN_OR_REDIRECT
+ };
+ /** \brief Handle the retrieved header data */
+ DealWithHeadersResult DealWithHeaders(FetchResult &Res,ServerState *Srv);
+
+ /** \brief Try to AutoDetect the proxy */
bool AutoDetectProxy();
- virtual bool Fetch(FetchItem *);
virtual bool Configuration(string Message);
// In the event of a fatal signal this file will be closed and timestamped.
@@ -144,10 +174,13 @@ class HttpMethod : public pkgAcqMethod
static int FailFd;
static time_t FailTime;
static void SigTerm(int);
+
+ protected:
+ virtual bool Fetch(FetchItem *);
string NextURI;
string AutoDetectProxyCmd;
-
+
public:
friend class ServerState;
diff --git a/methods/http_main.cc b/methods/http_main.cc
new file mode 100644
index 000000000..7815c2fc1
--- /dev/null
+++ b/methods/http_main.cc
@@ -0,0 +1,20 @@
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/acquire-method.h>
+#include <signal.h>
+
+#include "connect.h"
+#include "rfc2553emu.h"
+#include "http.h"
+
+
+int main()
+{
+ setlocale(LC_ALL, "");
+
+ // ignore SIGPIPE, this can happen on write() if the socket
+ // closes the connection (this is dealt with via ServerDie())
+ signal(SIGPIPE, SIG_IGN);
+
+ HttpMethod Mth;
+ return Mth.Loop();
+}
diff --git a/methods/makefile b/methods/makefile
index 7bcae6b9b..d94a85340 100644
--- a/methods/makefile
+++ b/methods/makefile
@@ -48,7 +48,7 @@ include $(PROGRAM_H)
PROGRAM=http
SLIBS = -lapt-pkg $(SOCKETLIBS) $(INTLLIBS)
LIB_MAKES = apt-pkg/makefile
-SOURCE = http.cc rfc2553emu.cc connect.cc
+SOURCE = http.cc http_main.cc rfc2553emu.cc connect.cc
include $(PROGRAM_H)
# The https method
@@ -79,22 +79,32 @@ LIB_MAKES = apt-pkg/makefile
SOURCE = rsh.cc
include $(PROGRAM_H)
-# SSH and bzip2 method symlink
-binary: $(BIN)/ssh $(BIN)/bzip2 $(BIN)/lzma
-veryclean: clean-$(BIN)/ssh clean-$(BIN)/bzip2 clean-$(BIN)/lzma
+# The mirror method
+PROGRAM=mirror
+SLIBS = -lapt-pkg $(SOCKETLIBS)
+LIB_MAKES = apt-pkg/makefile
+SOURCE = mirror.cc http.cc rfc2553emu.cc connect.cc
+include $(PROGRAM_H)
+
+# The gzip method
+PROGRAM=bzip2
+SLIBS = -lapt-pkg $(INTLLIBS)
+LIB_MAKES = apt-pkg/makefile
+SOURCE = bzip2.cc
+include $(PROGRAM_H)
+
+# SSH and lzma method symlink
+binary: $(BIN)/ssh $(BIN)/lzma
+veryclean: clean-$(BIN)/ssh clean-$(BIN)/lzma
+
$(BIN)/ssh:
echo "Installing ssh method link"
ln -fs rsh $(BIN)/ssh
clean-$(BIN)/ssh:
-rm $(BIN)/ssh
-$(BIN)/bzip2:
- echo "Installing bzip2 method link"
- ln -fs gzip $(BIN)/bzip2
$(BIN)/lzma:
echo "Installing lzma method link"
- ln -fs gzip $(BIN)/lzma
-clean-$(BIN)/bzip2:
- -rm $(BIN)/bzip2
+ ln -fs bzip2 $(BIN)/lzma
clean-$(BIN)/lzma:
-rm $(BIN)/lzma
diff --git a/methods/mirror.cc b/methods/mirror.cc
new file mode 100644
index 000000000..e8873d97b
--- /dev/null
+++ b/methods/mirror.cc
@@ -0,0 +1,372 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: mirror.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $
+/* ######################################################################
+
+ Mirror Aquire Method - This is the Mirror aquire method for APT.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/acquire-method.h>
+#include <apt-pkg/acquire-item.h>
+#include <apt-pkg/acquire.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/hashes.h>
+#include <apt-pkg/sourcelist.h>
+
+#include <fstream>
+#include <iostream>
+#include <stdarg.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <dirent.h>
+
+using namespace std;
+
+#include<sstream>
+
+#include "mirror.h"
+#include "http.h"
+#include "apti18n.h"
+ /*}}}*/
+
+/* Done:
+ * - works with http (only!)
+ * - always picks the first mirror from the list
+ * - call out to problem reporting script
+ * - supports "deb mirror://host/path/to/mirror-list/// dist component"
+ * - uses pkgAcqMethod::FailReason() to have a string representation
+ * of the failure that is also send to LP
+ *
+ * TODO:
+ * - deal with runing as non-root because we can't write to the lists
+ dir then -> use the cached mirror file
+ * - better method to download than having a pkgAcquire interface here
+ * and better error handling there!
+ * - support more than http
+ * - testing :)
+ */
+
+MirrorMethod::MirrorMethod()
+ : HttpMethod(), DownloadedMirrorFile(false)
+{
+};
+
+// HttpMethod::Configuration - Handle a configuration message /*{{{*/
+// ---------------------------------------------------------------------
+/* We stash the desired pipeline depth */
+bool MirrorMethod::Configuration(string Message)
+{
+ if (pkgAcqMethod::Configuration(Message) == false)
+ return false;
+ Debug = _config->FindB("Debug::Acquire::mirror",false);
+
+ return true;
+}
+ /*}}}*/
+
+// clean the mirrors dir based on ttl information
+bool MirrorMethod::Clean(string Dir)
+{
+ vector<metaIndex *>::const_iterator I;
+
+ if(Debug)
+ clog << "MirrorMethod::Clean(): " << Dir << endl;
+
+ if(Dir == "/")
+ return _error->Error("will not clean: '/'");
+
+ // read sources.list
+ pkgSourceList list;
+ list.ReadMainList();
+
+ DIR *D = opendir(Dir.c_str());
+ if (D == 0)
+ return _error->Errno("opendir",_("Unable to read %s"),Dir.c_str());
+
+ string StartDir = SafeGetCWD();
+ if (chdir(Dir.c_str()) != 0)
+ {
+ closedir(D);
+ return _error->Errno("chdir",_("Unable to change to %s"),Dir.c_str());
+ }
+
+ for (struct dirent *Dir = readdir(D); Dir != 0; Dir = readdir(D))
+ {
+ // Skip some files..
+ if (strcmp(Dir->d_name,"lock") == 0 ||
+ strcmp(Dir->d_name,"partial") == 0 ||
+ strcmp(Dir->d_name,".") == 0 ||
+ strcmp(Dir->d_name,"..") == 0)
+ continue;
+
+ // see if we have that uri
+ for(I=list.begin(); I != list.end(); I++)
+ {
+ string uri = (*I)->GetURI();
+ if(uri.find("mirror://") != 0)
+ continue;
+ string BaseUri = uri.substr(0,uri.size()-1);
+ if (URItoFileName(BaseUri) == Dir->d_name)
+ break;
+ }
+ // nothing found, nuke it
+ if (I == list.end())
+ unlink(Dir->d_name);
+ };
+
+ chdir(StartDir.c_str());
+ closedir(D);
+ return true;
+}
+
+
+bool MirrorMethod::DownloadMirrorFile(string mirror_uri_str)
+{
+ if(Debug)
+ clog << "MirrorMethod::DownloadMirrorFile(): " << endl;
+
+ // not that great to use pkgAcquire here, but we do not have
+ // any other way right now
+ string fetch = BaseUri;
+ fetch.replace(0,strlen("mirror://"),"http://");
+
+ pkgAcquire Fetcher;
+ new pkgAcqFile(&Fetcher, fetch, "", 0, "", "", "", MirrorFile);
+ bool res = (Fetcher.Run() == pkgAcquire::Continue);
+ if(res)
+ DownloadedMirrorFile = true;
+ Fetcher.Shutdown();
+ return res;
+}
+
+/* convert a the Queue->Uri back to the mirror base uri and look
+ * at all mirrors we have for this, this is needed as queue->uri
+ * may point to different mirrors (if TryNextMirror() was run)
+ */
+void MirrorMethod::CurrentQueueUriToMirror()
+{
+ // already in mirror:// style so nothing to do
+ if(Queue->Uri.find("mirror://") == 0)
+ return;
+
+ // find current mirror and select next one
+ for (vector<string>::const_iterator mirror = AllMirrors.begin();
+ mirror != AllMirrors.end(); ++mirror)
+ {
+ if (Queue->Uri.find(*mirror) == 0)
+ {
+ Queue->Uri.replace(0, mirror->length(), BaseUri);
+ return;
+ }
+ }
+ _error->Error("Internal error: Failed to convert %s back to %s",
+ Queue->Uri.c_str(), BaseUri.c_str());
+}
+
+bool MirrorMethod::TryNextMirror()
+{
+ // find current mirror and select next one
+ for (vector<string>::const_iterator mirror = AllMirrors.begin();
+ mirror != AllMirrors.end(); ++mirror)
+ {
+ if (Queue->Uri.find(*mirror) != 0)
+ continue;
+
+ vector<string>::const_iterator nextmirror = mirror + 1;
+ if (nextmirror != AllMirrors.end())
+ break;
+ Queue->Uri.replace(0, mirror->length(), *nextmirror);
+ if (Debug)
+ clog << "TryNextMirror: " << Queue->Uri << endl;
+ return true;
+ }
+
+ if (Debug)
+ clog << "TryNextMirror could not find another mirror to try" << endl;
+
+ return false;
+}
+
+bool MirrorMethod::InitMirrors()
+{
+ // if we do not have a MirrorFile, fallback
+ if(!FileExists(MirrorFile))
+ {
+ // FIXME: fallback to a default mirror here instead
+ // and provide a config option to define that default
+ return _error->Error(_("No mirror file '%s' found "), MirrorFile.c_str());
+ }
+
+ // FIXME: make the mirror selection more clever, do not
+ // just use the first one!
+ // BUT: we can not make this random, the mirror has to be
+ // stable accross session, because otherwise we can
+ // get into sync issues (got indexfiles from mirror A,
+ // but packages from mirror B - one might be out of date etc)
+ ifstream in(MirrorFile.c_str());
+ string s;
+ while (!in.eof())
+ {
+ getline(in, s);
+ if (s.size() > 0)
+ AllMirrors.push_back(s);
+ }
+ Mirror = AllMirrors[0];
+ UsedMirror = Mirror;
+ return true;
+}
+
+string MirrorMethod::GetMirrorFileName(string mirror_uri_str)
+{
+ /*
+ - a mirror_uri_str looks like this:
+ mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors/dists/feisty/Release.gpg
+
+ - the matching source.list entry
+ deb mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors feisty main
+
+ - we actually want to go after:
+ http://people.ubuntu.com/~mvo/apt/mirror/mirrors
+
+ And we need to save the BaseUri for later:
+ - mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors
+
+ FIXME: what if we have two similar prefixes?
+ mirror://people.ubuntu.com/~mvo/mirror
+ mirror://people.ubuntu.com/~mvo/mirror2
+ then mirror_uri_str looks like:
+ mirror://people.ubuntu.com/~mvo/apt/mirror/dists/feisty/Release.gpg
+ mirror://people.ubuntu.com/~mvo/apt/mirror2/dists/feisty/Release.gpg
+ we search sources.list and find:
+ mirror://people.ubuntu.com/~mvo/apt/mirror
+ in both cases! So we need to apply some domain knowledge here :( and
+ check for /dists/ or /Release.gpg as suffixes
+ */
+ string name;
+ if(Debug)
+ std::cerr << "GetMirrorFileName: " << mirror_uri_str << std::endl;
+
+ // read sources.list and find match
+ vector<metaIndex *>::const_iterator I;
+ pkgSourceList list;
+ list.ReadMainList();
+ for(I=list.begin(); I != list.end(); I++)
+ {
+ string uristr = (*I)->GetURI();
+ if(Debug)
+ std::cerr << "Checking: " << uristr << std::endl;
+ if(uristr.substr(0,strlen("mirror://")) != string("mirror://"))
+ continue;
+ // find matching uri in sources.list
+ if(mirror_uri_str.substr(0,uristr.size()) == uristr)
+ {
+ if(Debug)
+ std::cerr << "found BaseURI: " << uristr << std::endl;
+ BaseUri = uristr.substr(0,uristr.size()-1);
+ }
+ }
+ // get new file
+ name = _config->FindDir("Dir::State::mirrors") + URItoFileName(BaseUri);
+
+ if(Debug)
+ {
+ cerr << "base-uri: " << BaseUri << endl;
+ cerr << "mirror-file: " << name << endl;
+ }
+ return name;
+}
+
+// MirrorMethod::Fetch - Fetch an item /*{{{*/
+// ---------------------------------------------------------------------
+/* This adds an item to the pipeline. We keep the pipeline at a fixed
+ depth. */
+bool MirrorMethod::Fetch(FetchItem *Itm)
+{
+ if(Debug)
+ clog << "MirrorMethod::Fetch()" << endl;
+
+ // the http method uses Fetch(0) as a way to update the pipeline,
+ // just let it do its work in this case - Fetch() with a valid
+ // Itm will always run before the first Fetch(0)
+ if(Itm == NULL)
+ return HttpMethod::Fetch(Itm);
+
+ // if we don't have the name of the mirror file on disk yet,
+ // calculate it now (can be derived from the uri)
+ if(MirrorFile.empty())
+ MirrorFile = GetMirrorFileName(Itm->Uri);
+
+ // download mirror file once (if we are after index files)
+ if(Itm->IndexFile && !DownloadedMirrorFile)
+ {
+ Clean(_config->FindDir("Dir::State::mirrors"));
+ DownloadMirrorFile(Itm->Uri);
+ }
+
+ if(AllMirrors.empty()) {
+ if(!InitMirrors()) {
+ // no valid mirror selected, something went wrong downloading
+ // from the master mirror site most likely and there is
+ // no old mirror file availalbe
+ return false;
+ }
+ }
+
+ if(Itm->Uri.find("mirror://") != string::npos)
+ Itm->Uri.replace(0,BaseUri.size(), Mirror);
+
+ if(Debug)
+ clog << "Fetch: " << Itm->Uri << endl << endl;
+
+ // now run the real fetcher
+ return HttpMethod::Fetch(Itm);
+};
+
+void MirrorMethod::Fail(string Err,bool Transient)
+{
+ // FIXME: TryNextMirror is not ideal for indexfile as we may
+ // run into auth issues
+
+ if (Debug)
+ clog << "Failure to get " << Queue->Uri << endl;
+
+ // try the next mirror on fail (if its not a expected failure,
+ // e.g. translations are ok to ignore)
+ if (!Queue->FailIgnore && TryNextMirror())
+ return;
+
+ // all mirrors failed, so bail out
+ string s;
+ strprintf(s, _("[Mirror: %s]"), Mirror.c_str());
+ SetIP(s);
+
+ CurrentQueueUriToMirror();
+ pkgAcqMethod::Fail(Err, Transient);
+}
+
+void MirrorMethod::URIStart(FetchResult &Res)
+{
+ CurrentQueueUriToMirror();
+ pkgAcqMethod::URIStart(Res);
+}
+
+void MirrorMethod::URIDone(FetchResult &Res,FetchResult *Alt)
+{
+ CurrentQueueUriToMirror();
+ pkgAcqMethod::URIDone(Res, Alt);
+}
+
+
+int main()
+{
+ setlocale(LC_ALL, "");
+
+ MirrorMethod Mth;
+
+ return Mth.Loop();
+}
+
+
diff --git a/methods/mirror.h b/methods/mirror.h
new file mode 100644
index 000000000..0a3ea6e92
--- /dev/null
+++ b/methods/mirror.h
@@ -0,0 +1,55 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $
+// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $
+/* ######################################################################
+
+ MIRROR Aquire Method - This is the MIRROR aquire method for APT.
+
+ ##################################################################### */
+ /*}}}*/
+
+#ifndef APT_MIRROR_H
+#define APT_MIRROR_H
+
+
+#include <iostream>
+
+using std::cout;
+using std::cerr;
+using std::endl;
+
+#include "http.h"
+
+class MirrorMethod : public HttpMethod
+{
+ FetchResult Res;
+ // we simply transform between BaseUri and Mirror
+ string BaseUri; // the original mirror://... url
+ string Mirror; // the selected mirror uri (http://...)
+ vector<string> AllMirrors; // all available mirrors
+ string MirrorFile; // the file that contains the list of mirrors
+ bool DownloadedMirrorFile; // already downloaded this session
+
+ bool Debug;
+
+ protected:
+ bool DownloadMirrorFile(string uri);
+ string GetMirrorFileName(string uri);
+ bool InitMirrors();
+ bool TryNextMirror();
+ void CurrentQueueUriToMirror();
+ bool Clean(string dir);
+
+ // we need to overwrite those to transform the url back
+ virtual void Fail(string Why, bool Transient = false);
+ virtual void URIStart(FetchResult &Res);
+ virtual void URIDone(FetchResult &Res,FetchResult *Alt = 0);
+ virtual bool Configuration(string Message);
+
+ public:
+ MirrorMethod();
+ virtual bool Fetch(FetchItem *Itm);
+};
+
+
+#endif
diff --git a/methods/rred.cc b/methods/rred.cc
index 262c78cab..f42c7a072 100644
--- a/methods/rred.cc
+++ b/methods/rred.cc
@@ -477,23 +477,26 @@ bool RredMethod::Fetch(FetchItem *Itm) /*{{{*/
Patch.Close();
To.Close();
- // Transfer the modification times
- struct stat Buf;
- if (stat(Path.c_str(),&Buf) != 0)
+ /* Transfer the modification times from the patch file
+ to be able to see in which state the file should be
+ and use the access time from the "old" file */
+ struct stat BufBase, BufPatch;
+ if (stat(Path.c_str(),&BufBase) != 0 ||
+ stat(string(Path+".ed").c_str(),&BufPatch) != 0)
return _error->Errno("stat",_("Failed to stat"));
struct utimbuf TimeBuf;
- TimeBuf.actime = Buf.st_atime;
- TimeBuf.modtime = Buf.st_mtime;
+ TimeBuf.actime = BufBase.st_atime;
+ TimeBuf.modtime = BufPatch.st_mtime;
if (utime(Itm->DestFile.c_str(),&TimeBuf) != 0)
return _error->Errno("utime",_("Failed to set modification time"));
- if (stat(Itm->DestFile.c_str(),&Buf) != 0)
+ if (stat(Itm->DestFile.c_str(),&BufBase) != 0)
return _error->Errno("stat",_("Failed to stat"));
// return done
- Res.LastModified = Buf.st_mtime;
- Res.Size = Buf.st_size;
+ Res.LastModified = BufBase.st_mtime;
+ Res.Size = BufBase.st_size;
Res.TakeHashes(Hash);
URIDone(Res);
diff --git a/methods/rsh.cc b/methods/rsh.cc
index f0ccfc42d..97b4ef151 100644
--- a/methods/rsh.cc
+++ b/methods/rsh.cc
@@ -278,8 +278,7 @@ bool RSHConn::ModTime(const char *Path, time_t &Time)
return false;
// Parse it
- StrToTime(Msg,Time);
- return true;
+ return FTPMDTMStrToTime(Msg.c_str(), Time);
}
/*}}}*/
// RSHConn::Get - Get a file /*{{{*/