summaryrefslogtreecommitdiff
path: root/methods
diff options
context:
space:
mode:
Diffstat (limited to 'methods')
-rw-r--r--methods/cdrom.cc12
-rw-r--r--methods/copy.cc5
-rw-r--r--methods/ftp.cc2
-rw-r--r--methods/gpgv.cc32
-rw-r--r--methods/gzip.cc8
-rw-r--r--methods/http.cc33
-rw-r--r--methods/http.h4
-rw-r--r--methods/https.cc273
-rw-r--r--methods/https.h48
-rw-r--r--methods/makefile27
-rw-r--r--methods/rred.cc262
11 files changed, 662 insertions, 44 deletions
diff --git a/methods/cdrom.cc b/methods/cdrom.cc
index d6b8eae75..601bc11c9 100644
--- a/methods/cdrom.cc
+++ b/methods/cdrom.cc
@@ -30,7 +30,7 @@ class CDROMMethod : public pkgAcqMethod
::Configuration Database;
string CurrentID;
string CDROM;
- bool Mounted;
+ bool MountedByApt;
virtual bool Fetch(FetchItem *Itm);
string GetID(string Name);
@@ -48,7 +48,7 @@ CDROMMethod::CDROMMethod() : pkgAcqMethod("1.0",SingleInstance | LocalOnly |
SendConfig | NeedsCleanup |
Removable),
DatabaseLoaded(false),
- Mounted(false)
+ MountedByApt(false)
{
};
/*}}}*/
@@ -57,7 +57,7 @@ CDROMMethod::CDROMMethod() : pkgAcqMethod("1.0",SingleInstance | LocalOnly |
/* */
void CDROMMethod::Exit()
{
- if (Mounted == true)
+ if (MountedByApt == true)
UnmountCdrom(CDROM);
}
/*}}}*/
@@ -139,7 +139,8 @@ bool CDROMMethod::Fetch(FetchItem *Itm)
while (CurrentID.empty() == true)
{
bool Hit = false;
- Mounted = MountCdrom(CDROM);
+ if(!IsMounted(CDROM))
+ MountedByApt = MountCdrom(CDROM);
for (unsigned int Version = 2; Version != 0; Version--)
{
if (IdentCdrom(CDROM,NewID,Version) == false)
@@ -160,7 +161,8 @@ bool CDROMMethod::Fetch(FetchItem *Itm)
break;
// I suppose this should prompt somehow?
- if (UnmountCdrom(CDROM) == false)
+ if (_config->FindB("APT::CDROM::NoMount",false) == false &&
+ UnmountCdrom(CDROM) == false)
return _error->Error(_("Unable to unmount the CD-ROM in %s, it may still be in use."),
CDROM.c_str());
if (MediaFail(Get.Host,CDROM) == false)
diff --git a/methods/copy.cc b/methods/copy.cc
index d737e3c33..72896b4c0 100644
--- a/methods/copy.cc
+++ b/methods/copy.cc
@@ -12,6 +12,7 @@
#include <apt-pkg/fileutl.h>
#include <apt-pkg/acquire-method.h>
#include <apt-pkg/error.h>
+#include <apt-pkg/hashes.h>
#include <sys/stat.h>
#include <utime.h>
@@ -79,6 +80,10 @@ bool CopyMethod::Fetch(FetchItem *Itm)
return _error->Errno("utime",_("Failed to set modification time"));
}
+ Hashes Hash;
+ FileFd Fd(Res.Filename, FileFd::ReadOnly);
+ Hash.AddFD(Fd.Fd(), Fd.Size());
+ Res.TakeHashes(Hash);
URIDone(Res);
return true;
}
diff --git a/methods/ftp.cc b/methods/ftp.cc
index 0c2aa00a7..554a24cf5 100644
--- a/methods/ftp.cc
+++ b/methods/ftp.cc
@@ -1101,7 +1101,7 @@ int main(int argc,const char *argv[])
char S[300];
snprintf(S,sizeof(S),"http_proxy=%s",getenv("ftp_proxy"));
putenv(S);
- putenv("no_proxy=");
+ putenv((char *)"no_proxy=");
// Run the http method
string Path = flNotFile(argv[0]) + "http";
diff --git a/methods/gpgv.cc b/methods/gpgv.cc
index ba7389cba..1ed26a30a 100644
--- a/methods/gpgv.cc
+++ b/methods/gpgv.cc
@@ -17,6 +17,7 @@
#define GNUPGBADSIG "[GNUPG:] BADSIG"
#define GNUPGNOPUBKEY "[GNUPG:] NO_PUBKEY"
#define GNUPGVALIDSIG "[GNUPG:] VALIDSIG"
+#define GNUPGNODATA "[GNUPG:] NODATA"
class GPGVMethod : public pkgAcqMethod
{
@@ -82,6 +83,7 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
Args[i++] = gpgvpath.c_str();
Args[i++] = "--status-fd";
Args[i++] = "3";
+ Args[i++] = "--ignore-time-conflict";
Args[i++] = "--keyring";
Args[i++] = pubringpath.c_str();
@@ -120,9 +122,9 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
// Redirect the pipe to the status fd (3)
dup2(fd[1], 3);
- putenv("LANG=");
- putenv("LC_ALL=");
- putenv("LC_MESSAGES=");
+ putenv((char *)"LANG=");
+ putenv((char *)"LC_ALL=");
+ putenv((char *)"LC_MESSAGES=");
execvp(gpgvpath.c_str(), (char **)Args);
exit(111);
@@ -171,7 +173,12 @@ string GPGVMethod::VerifyGetSigners(const char *file, const char *outfile,
std::cerr << "Got NO_PUBKEY " << std::endl;
NoPubKeySigners.push_back(string(buffer+sizeof(GNUPGPREFIX)));
}
-
+ if (strncmp(buffer, GNUPGNODATA, sizeof(GNUPGBADSIG)-1) == 0)
+ {
+ if (_config->FindB("Debug::Acquire::gpgv", false))
+ std::cerr << "Got NODATA! " << std::endl;
+ BadSigners.push_back(string(buffer+sizeof(GNUPGPREFIX)));
+ }
if (strncmp(buffer, GNUPGVALIDSIG, sizeof(GNUPGVALIDSIG)-1) == 0)
{
char *sig = buffer + sizeof(GNUPGPREFIX);
@@ -260,23 +267,6 @@ bool GPGVMethod::Fetch(FetchItem *Itm)
return _error->Error(errmsg.c_str());
}
- // Transfer the modification times
- struct stat Buf;
- if (stat(Path.c_str(),&Buf) != 0)
- return _error->Errno("stat",_("Failed to stat %s"), Path.c_str());
-
- struct utimbuf TimeBuf;
- TimeBuf.actime = Buf.st_atime;
- TimeBuf.modtime = Buf.st_mtime;
- if (utime(Itm->DestFile.c_str(),&TimeBuf) != 0)
- return _error->Errno("utime",_("Failed to set modification time"));
-
- if (stat(Itm->DestFile.c_str(),&Buf) != 0)
- return _error->Errno("stat",_("Failed to stat"));
-
- // Return a Done response
- Res.LastModified = Buf.st_mtime;
- Res.Size = Buf.st_size;
// Just pass the raw output up, because passing it as a real data
// structure is too difficult with the method stuff. We keep it
// as three separate vectors for future extensibility.
diff --git a/methods/gzip.cc b/methods/gzip.cc
index 809afc0fc..f732c0b86 100644
--- a/methods/gzip.cc
+++ b/methods/gzip.cc
@@ -52,6 +52,13 @@ bool GzipMethod::Fetch(FetchItem *Itm)
// Open the source and destination files
FileFd From(Path,FileFd::ReadOnly);
+ // if the file is empty, just rename it and return
+ if(From.Size() == 0)
+ {
+ rename(Path.c_str(), Itm->DestFile.c_str());
+ return true;
+ }
+
int GzOut[2];
if (pipe(GzOut) < 0)
return _error->Errno("pipe",_("Couldn't open pipe for %s"),Prog);
@@ -111,6 +118,7 @@ bool GzipMethod::Fetch(FetchItem *Itm)
if (To.Write(Buffer,Count) == false)
{
Failed = true;
+ FromGz.Close();
break;
}
}
diff --git a/methods/http.cc b/methods/http.cc
index 341de94e3..d4e231fbe 100644
--- a/methods/http.cc
+++ b/methods/http.cc
@@ -3,7 +3,7 @@
// $Id: http.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $
/* ######################################################################
- HTTP Aquire Method - This is the HTTP aquire method for APT.
+ HTTP Acquire Method - This is the HTTP aquire method for APT.
It uses HTTP/1.1 and many of the fancy options there-in, such as
pipelining, range, if-range and so on.
@@ -44,6 +44,7 @@
// Internet stuff
#include <netdb.h>
+#include "config.h"
#include "connect.h"
#include "rfc2553emu.h"
#include "http.h"
@@ -57,7 +58,7 @@ time_t HttpMethod::FailTime = 0;
unsigned long PipelineDepth = 10;
unsigned long TimeOut = 120;
bool Debug = false;
-
+URI Proxy;
unsigned long CircleBuf::BwReadLimit=0;
unsigned long CircleBuf::BwTickReadData=0;
@@ -657,7 +658,7 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out)
will glitch HTTP/1.0 proxies because they do not filter it out and
pass it on, HTTP/1.1 says the connection should default to keep alive
and we expect the proxy to do this */
- if (Proxy.empty() == true)
+ if (Proxy.empty() == true || Proxy.Host.empty())
sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\nConnection: keep-alive\r\n",
QuoteString(Uri.Path,"~").c_str(),ProperHost.c_str());
else
@@ -714,7 +715,7 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out)
Req += string("Authorization: Basic ") +
Base64Encode(Uri.User + ":" + Uri.Password) + "\r\n";
- Req += "User-Agent: Debian APT-HTTP/1.3\r\n\r\n";
+ Req += "User-Agent: Debian APT-HTTP/1.3 ("VERSION")\r\n\r\n";
if (Debug == true)
cerr << Req << endl;
@@ -995,7 +996,6 @@ bool HttpMethod::Fetch(FetchItem *)
// Queue the requests
int Depth = -1;
- bool Tail = false;
for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth;
I = I->Next, Depth++)
{
@@ -1007,8 +1007,6 @@ bool HttpMethod::Fetch(FetchItem *)
if (Server->Comp(I->Uri) == false)
break;
if (QueueBack == I)
- Tail = true;
- if (Tail == true)
{
QueueBack = I->Next;
SendReq(I,Server->Out);
@@ -1070,7 +1068,6 @@ int HttpMethod::Loop()
delete Server;
Server = new ServerState(Queue->Uri,this);
}
-
/* If the server has explicitly said this is the last connection
then we pre-emptively shut down the pipeline and tear down
the connection. This will speed up HTTP/1.0 servers a tad
@@ -1167,8 +1164,24 @@ int HttpMethod::Loop()
URIDone(Res);
}
else
- Fail(true);
-
+ {
+ if (Server->ServerFd == -1)
+ {
+ FailCounter++;
+ _error->Discard();
+ Server->Close();
+
+ if (FailCounter >= 2)
+ {
+ Fail(_("Connection failed"),true);
+ FailCounter = 0;
+ }
+
+ QueueBack = Queue;
+ }
+ else
+ Fail(true);
+ }
break;
}
diff --git a/methods/http.h b/methods/http.h
index 541e2952c..6753a9901 100644
--- a/methods/http.h
+++ b/methods/http.h
@@ -3,7 +3,7 @@
// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $
/* ######################################################################
- HTTP Aquire Method - This is the HTTP aquire method for APT.
+ HTTP Acquire Method - This is the HTTP aquire method for APT.
##################################################################### */
/*}}}*/
@@ -158,6 +158,4 @@ class HttpMethod : public pkgAcqMethod
};
};
-URI Proxy;
-
#endif
diff --git a/methods/https.cc b/methods/https.cc
new file mode 100644
index 000000000..b2bbbddb1
--- /dev/null
+++ b/methods/https.cc
@@ -0,0 +1,273 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: http.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $
+/* ######################################################################
+
+ HTTPS Acquire Method - This is the HTTPS aquire method for APT.
+
+ It uses libcurl
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/acquire-method.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/hashes.h>
+
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <utime.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <iostream>
+#include <apti18n.h>
+#include <sstream>
+
+#include "config.h"
+#include "https.h"
+
+ /*}}}*/
+using namespace std;
+
+size_t
+HttpsMethod::write_data(void *buffer, size_t size, size_t nmemb, void *userp)
+{
+ HttpsMethod *me = (HttpsMethod *)userp;
+
+ if(me->File->Write(buffer, size*nmemb) != true)
+ return false;
+
+ return size*nmemb;
+}
+
+int
+HttpsMethod::progress_callback(void *clientp, double dltotal, double dlnow,
+ double ultotal, double ulnow)
+{
+ HttpsMethod *me = (HttpsMethod *)clientp;
+ if(dltotal > 0 && me->Res.Size == 0) {
+ me->Res.Size = (unsigned long)dltotal;
+ me->URIStart(me->Res);
+ }
+ return 0;
+}
+
+void HttpsMethod::SetupProxy()
+{
+ URI ServerName = Queue->Uri;
+
+ // Determine the proxy setting
+ if (getenv("http_proxy") == 0)
+ {
+ string DefProxy = _config->Find("Acquire::http::Proxy");
+ string SpecificProxy = _config->Find("Acquire::http::Proxy::" + ServerName.Host);
+ if (SpecificProxy.empty() == false)
+ {
+ if (SpecificProxy == "DIRECT")
+ Proxy = "";
+ else
+ Proxy = SpecificProxy;
+ }
+ else
+ Proxy = DefProxy;
+ }
+
+ // Parse no_proxy, a , separated list of domains
+ if (getenv("no_proxy") != 0)
+ {
+ if (CheckDomainList(ServerName.Host,getenv("no_proxy")) == true)
+ Proxy = "";
+ }
+
+ // Determine what host and port to use based on the proxy settings
+ string Host;
+ if (Proxy.empty() == true || Proxy.Host.empty() == true)
+ {
+ }
+ else
+ {
+ if (Proxy.Port != 0)
+ curl_easy_setopt(curl, CURLOPT_PROXYPORT, Proxy.Port);
+ curl_easy_setopt(curl, CURLOPT_PROXY, Proxy.Host.c_str());
+ }
+}
+
+
+// HttpsMethod::Fetch - Fetch an item /*{{{*/
+// ---------------------------------------------------------------------
+/* This adds an item to the pipeline. We keep the pipeline at a fixed
+ depth. */
+bool HttpsMethod::Fetch(FetchItem *Itm)
+{
+ stringstream ss;
+ struct stat SBuf;
+ struct curl_slist *headers=NULL;
+ char curl_errorstr[CURL_ERROR_SIZE];
+ long curl_responsecode;
+
+ // TODO:
+ // - http::Timeout
+ // - http::Pipeline-Depth
+ // - error checking/reporting
+ // - more debug options? (CURLOPT_DEBUGFUNCTION?)
+
+ curl_easy_reset(curl);
+ SetupProxy();
+
+ // callbacks
+ curl_easy_setopt(curl, CURLOPT_URL, Itm->Uri.c_str());
+ curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
+ curl_easy_setopt(curl, CURLOPT_WRITEDATA, this);
+ curl_easy_setopt(curl, CURLOPT_PROGRESSFUNCTION, progress_callback);
+ curl_easy_setopt(curl, CURLOPT_PROGRESSDATA, this);
+ curl_easy_setopt(curl, CURLOPT_NOPROGRESS, false);
+ curl_easy_setopt(curl, CURLOPT_FAILONERROR, true);
+ curl_easy_setopt(curl, CURLOPT_FILETIME, true);
+
+ // FIXME: https: offer various options of verification
+ bool peer_verify = _config->FindB("Acquire::https::Verify-Peer", false);
+ curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, peer_verify);
+
+ // sslcert file
+ string pem = _config->Find("Acquire::https::SslCert","");
+ if(pem != "")
+ curl_easy_setopt(curl, CURLOPT_SSLCERT, pem.c_str());
+
+ // CA-Dir
+ string certdir = _config->Find("Acquire::https::CaPath","");
+ if(certdir != "")
+ curl_easy_setopt(curl, CURLOPT_CAPATH, certdir.c_str());
+
+ // Server-verify
+ int verify = _config->FindI("Acquire::https::Verify-Host",2);
+ curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, verify);
+
+ // cache-control
+ if(_config->FindB("Acquire::http::No-Cache",false) == false)
+ {
+ // cache enabled
+ if (_config->FindB("Acquire::http::No-Store",false) == true)
+ headers = curl_slist_append(headers,"Cache-Control: no-store");
+ ioprintf(ss, "Cache-Control: max-age=%u", _config->FindI("Acquire::http::Max-Age",0));
+ headers = curl_slist_append(headers, ss.str().c_str());
+ } else {
+ // cache disabled by user
+ headers = curl_slist_append(headers, "Cache-Control: no-cache");
+ headers = curl_slist_append(headers, "Pragma: no-cache");
+ }
+ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
+
+ // speed limit
+ int dlLimit = _config->FindI("Acquire::http::Dl-Limit",0)*1024;
+ if (dlLimit > 0)
+ curl_easy_setopt(curl, CURLOPT_MAX_RECV_SPEED_LARGE, dlLimit);
+
+ // set header
+ curl_easy_setopt(curl, CURLOPT_USERAGENT,"Debian APT-CURL/1.0 ("VERSION")");
+
+ // debug
+ if(_config->FindB("Debug::Acquire::https", false))
+ curl_easy_setopt(curl, CURLOPT_VERBOSE, true);
+
+ // error handling
+ curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_errorstr);
+
+ // if we have the file send an if-range query with a range header
+ if (stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
+ {
+ char Buf[1000];
+ sprintf(Buf,"Range: bytes=%li-\r\nIf-Range: %s\r\n",
+ (long)SBuf.st_size - 1,
+ TimeRFC1123(SBuf.st_mtime).c_str());
+ headers = curl_slist_append(headers, Buf);
+ }
+ else if(Itm->LastModified > 0)
+ {
+ curl_easy_setopt(curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_IFMODSINCE);
+ curl_easy_setopt(curl, CURLOPT_TIMEVALUE, Itm->LastModified);
+ }
+
+ // go for it - if the file exists, append on it
+ File = new FileFd(Itm->DestFile, FileFd::WriteAny);
+ if (File->Size() > 0)
+ File->Seek(File->Size() - 1);
+
+ // keep apt updated
+ Res.Filename = Itm->DestFile;
+
+ // get it!
+ CURLcode success = curl_easy_perform(curl);
+ curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &curl_responsecode);
+
+ long curl_servdate;
+ curl_easy_getinfo(curl, CURLINFO_FILETIME, &curl_servdate);
+
+ // cleanup
+ if(success != 0)
+ {
+ unlink(File->Name().c_str());
+ _error->Error(curl_errorstr);
+ Fail();
+ return true;
+ }
+ File->Close();
+
+ // Timestamp
+ struct utimbuf UBuf;
+ if (curl_servdate != -1) {
+ UBuf.actime = curl_servdate;
+ UBuf.modtime = curl_servdate;
+ utime(File->Name().c_str(),&UBuf);
+ }
+
+ // check the downloaded result
+ struct stat Buf;
+ if (stat(File->Name().c_str(),&Buf) == 0)
+ {
+ Res.Filename = File->Name();
+ Res.LastModified = Buf.st_mtime;
+ Res.IMSHit = false;
+ if (curl_responsecode == 304)
+ {
+ unlink(File->Name().c_str());
+ Res.IMSHit = true;
+ Res.LastModified = Itm->LastModified;
+ Res.Size = 0;
+ URIDone(Res);
+ return true;
+ }
+ Res.Size = Buf.st_size;
+ }
+
+ // take hashes
+ Hashes Hash;
+ FileFd Fd(Res.Filename, FileFd::ReadOnly);
+ Hash.AddFD(Fd.Fd(), Fd.Size());
+ Res.TakeHashes(Hash);
+
+ // keep apt updated
+ URIDone(Res);
+
+ // cleanup
+ Res.Size = 0;
+ delete File;
+ curl_slist_free_all(headers);
+
+ return true;
+};
+
+int main()
+{
+ setlocale(LC_ALL, "");
+
+ HttpsMethod Mth;
+ curl_global_init(CURL_GLOBAL_SSL) ;
+
+ return Mth.Run();
+}
+
+
diff --git a/methods/https.h b/methods/https.h
new file mode 100644
index 000000000..2c33d95ee
--- /dev/null
+++ b/methods/https.h
@@ -0,0 +1,48 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $
+// $Id: http.h,v 1.12 2002/04/18 05:09:38 jgg Exp $
+/* ######################################################################
+
+ HTTP Acquire Method - This is the HTTP aquire method for APT.
+
+ ##################################################################### */
+ /*}}}*/
+
+#ifndef APT_HTTP_H
+#define APT_HTTP_H
+
+#define MAXLEN 360
+
+#include <iostream>
+#include <curl/curl.h>
+
+using std::cout;
+using std::endl;
+
+class HttpsMethod;
+
+
+class HttpsMethod : public pkgAcqMethod
+{
+
+ virtual bool Fetch(FetchItem *);
+ static size_t write_data(void *buffer, size_t size, size_t nmemb, void *userp);
+ static int progress_callback(void *clientp, double dltotal, double dlnow,
+ double ultotal, double ulnow);
+ void SetupProxy();
+ CURL *curl;
+ FetchResult Res;
+
+ public:
+ FileFd *File;
+
+ HttpsMethod() : pkgAcqMethod("1.2",Pipeline | SendConfig)
+ {
+ File = 0;
+ curl = curl_easy_init();
+ };
+};
+
+URI Proxy;
+
+#endif
diff --git a/methods/makefile b/methods/makefile
index 3f561a2c3..5794c84e7 100644
--- a/methods/makefile
+++ b/methods/makefile
@@ -7,7 +7,7 @@ include ../buildlib/defaults.mak
BIN := $(BIN)/methods
# FIXME..
-LIB_APT_PKG_MAJOR = 3.12
+LIB_APT_PKG_MAJOR = 4.6
APT_DOMAIN := libapt-pkg$(LIB_APT_PKG_MAJOR)
# The file method
@@ -52,6 +52,13 @@ LIB_MAKES = apt-pkg/makefile
SOURCE = http.cc rfc2553emu.cc connect.cc
include $(PROGRAM_H)
+# The https method
+PROGRAM=https
+SLIBS = -lapt-pkg -lcurl
+LIB_MAKES = apt-pkg/makefile
+SOURCE = https.cc
+include $(PROGRAM_H)
+
# The ftp method
PROGRAM=ftp
SLIBS = -lapt-pkg $(SOCKETLIBS)
@@ -59,6 +66,13 @@ LIB_MAKES = apt-pkg/makefile
SOURCE = ftp.cc rfc2553emu.cc connect.cc
include $(PROGRAM_H)
+# The rred method
+PROGRAM=rred
+SLIBS = -lapt-pkg $(SOCKETLIBS)
+LIB_MAKES = apt-pkg/makefile
+SOURCE = rred.cc
+include $(PROGRAM_H)
+
# The rsh method
PROGRAM=rsh
SLIBS = -lapt-pkg
@@ -66,9 +80,9 @@ LIB_MAKES = apt-pkg/makefile
SOURCE = rsh.cc
include $(PROGRAM_H)
-# SSH and vzip2 method symlink
-binary: $(BIN)/ssh $(BIN)/bzip2
-veryclean: clean-$(BIN)/ssh clean-$(BIN)/bzip2
+# SSH and bzip2 method symlink
+binary: $(BIN)/ssh $(BIN)/bzip2 $(BIN)/lzma
+veryclean: clean-$(BIN)/ssh clean-$(BIN)/bzip2 clean-$(BIN)/lzma
$(BIN)/ssh:
echo "Installing ssh method link"
ln -fs rsh $(BIN)/ssh
@@ -78,5 +92,10 @@ clean-$(BIN)/ssh:
$(BIN)/bzip2:
echo "Installing bzip2 method link"
ln -fs gzip $(BIN)/bzip2
+$(BIN)/lzma:
+ echo "Installing lzma method link"
+ ln -fs gzip $(BIN)/lzma
clean-$(BIN)/bzip2:
-rm $(BIN)/bzip2
+clean-$(BIN)/lzma:
+ -rm $(BIN)/lzma
diff --git a/methods/rred.cc b/methods/rred.cc
new file mode 100644
index 000000000..6fa57f3a6
--- /dev/null
+++ b/methods/rred.cc
@@ -0,0 +1,262 @@
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/acquire-method.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/hashes.h>
+
+#include <sys/stat.h>
+#include <unistd.h>
+#include <utime.h>
+#include <stdio.h>
+#include <errno.h>
+#include <apti18n.h>
+
+/* this method implements a patch functionality similar to "patch --ed" that is
+ * used by the "tiffany" incremental packages download stuff. it differs from
+ * "ed" insofar that it is way more restricted (and therefore secure). in the
+ * moment only the "c", "a" and "d" commands of ed are implemented (diff
+ * doesn't output any other). additionally the records must be reverse sorted
+ * by line number and may not overlap (diff *seems* to produce this kind of
+ * output).
+ * */
+
+const char *Prog;
+
+class RredMethod : public pkgAcqMethod
+{
+ bool Debug;
+ // the size of this doesn't really matter (except for performance)
+ const static int BUF_SIZE = 1024;
+ // the ed commands
+ enum Mode {MODE_CHANGED, MODE_DELETED, MODE_ADDED};
+ // return values
+ enum State {ED_OK, ED_ORDERING, ED_PARSER, ED_FAILURE};
+ // this applies a single hunk, it uses a tail recursion to
+ // reverse the hunks in the file
+ int ed_rec(FILE *ed_cmds, FILE *in_file, FILE *out_file, int line,
+ char *buffer, unsigned int bufsize, Hashes *hash);
+ // apply a patch file
+ int ed_file(FILE *ed_cmds, FILE *in_file, FILE *out_file, Hashes *hash);
+ // the methods main method
+ virtual bool Fetch(FetchItem *Itm);
+
+ public:
+
+ RredMethod() : pkgAcqMethod("1.1",SingleInstance | SendConfig) {};
+};
+
+int RredMethod::ed_rec(FILE *ed_cmds, FILE *in_file, FILE *out_file, int line,
+ char *buffer, unsigned int bufsize, Hashes *hash) {
+ int pos;
+ int startline;
+ int stopline;
+ int mode;
+ int written;
+ char *idx;
+
+ /* get the current command and parse it*/
+ if (fgets(buffer, bufsize, ed_cmds) == NULL) {
+ return line;
+ }
+ startline = strtol(buffer, &idx, 10);
+ if (startline < line) {
+ return ED_ORDERING;
+ }
+ if (*idx == ',') {
+ idx++;
+ stopline = strtol(idx, &idx, 10);
+ }
+ else {
+ stopline = startline;
+ }
+ if (*idx == 'c') {
+ mode = MODE_CHANGED;
+ if (Debug == true) {
+ std::clog << "changing from line " << startline
+ << " to " << stopline << std::endl;
+ }
+ }
+ else if (*idx == 'a') {
+ mode = MODE_ADDED;
+ if (Debug == true) {
+ std::clog << "adding after line " << startline << std::endl;
+ }
+ }
+ else if (*idx == 'd') {
+ mode = MODE_DELETED;
+ if (Debug == true) {
+ std::clog << "deleting from line " << startline
+ << " to " << stopline << std::endl;
+ }
+ }
+ else {
+ return ED_PARSER;
+ }
+ /* get the current position */
+ pos = ftell(ed_cmds);
+ /* if this is add or change then go to the next full stop */
+ if ((mode == MODE_CHANGED) || (mode == MODE_ADDED)) {
+ do {
+ fgets(buffer, bufsize, ed_cmds);
+ while ((strlen(buffer) == (bufsize - 1))
+ && (buffer[bufsize - 2] != '\n')) {
+ fgets(buffer, bufsize, ed_cmds);
+ buffer[0] = ' ';
+ }
+ } while (strncmp(buffer, ".", 1) != 0);
+ }
+ /* do the recursive call */
+ line = ed_rec(ed_cmds, in_file, out_file, line, buffer, bufsize,
+ hash);
+ /* pass on errors */
+ if (line < 0) {
+ return line;
+ }
+ /* apply our hunk */
+ fseek(ed_cmds, pos, SEEK_SET);
+ /* first wind to the current position */
+ if (mode != MODE_ADDED) {
+ startline -= 1;
+ }
+ while (line < startline) {
+ fgets(buffer, bufsize, in_file);
+ written = fwrite(buffer, 1, strlen(buffer), out_file);
+ hash->Add((unsigned char*)buffer, written);
+ while ((strlen(buffer) == (bufsize - 1))
+ && (buffer[bufsize - 2] != '\n')) {
+ fgets(buffer, bufsize, in_file);
+ written = fwrite(buffer, 1, strlen(buffer), out_file);
+ hash->Add((unsigned char*)buffer, written);
+ }
+ line++;
+ }
+ /* include from ed script */
+ if ((mode == MODE_ADDED) || (mode == MODE_CHANGED)) {
+ do {
+ fgets(buffer, bufsize, ed_cmds);
+ if (strncmp(buffer, ".", 1) != 0) {
+ written = fwrite(buffer, 1, strlen(buffer), out_file);
+ hash->Add((unsigned char*)buffer, written);
+ while ((strlen(buffer) == (bufsize - 1))
+ && (buffer[bufsize - 2] != '\n')) {
+ fgets(buffer, bufsize, ed_cmds);
+ written = fwrite(buffer, 1, strlen(buffer), out_file);
+ hash->Add((unsigned char*)buffer, written);
+ }
+ }
+ else {
+ break;
+ }
+ } while (1);
+ }
+ /* ignore the corresponding number of lines from input */
+ if ((mode == MODE_DELETED) || (mode == MODE_CHANGED)) {
+ while (line < stopline) {
+ fgets(buffer, bufsize, in_file);
+ while ((strlen(buffer) == (bufsize - 1))
+ && (buffer[bufsize - 2] != '\n')) {
+ fgets(buffer, bufsize, in_file);
+ }
+ line++;
+ }
+ }
+ return line;
+}
+
+int RredMethod::ed_file(FILE *ed_cmds, FILE *in_file, FILE *out_file,
+ Hashes *hash) {
+ char buffer[BUF_SIZE];
+ int result;
+ int written;
+
+ /* we do a tail recursion to read the commands in the right order */
+ result = ed_rec(ed_cmds, in_file, out_file, 0, buffer, BUF_SIZE,
+ hash);
+
+ /* read the rest from infile */
+ if (result > 0) {
+ while (fgets(buffer, BUF_SIZE, in_file) != NULL) {
+ written = fwrite(buffer, 1, strlen(buffer), out_file);
+ hash->Add((unsigned char*)buffer, written);
+ }
+ }
+ else {
+ return ED_FAILURE;
+ }
+ return ED_OK;
+}
+
+
+bool RredMethod::Fetch(FetchItem *Itm)
+{
+ Debug = _config->FindB("Debug::pkgAcquire::RRed",false);
+ URI Get = Itm->Uri;
+ string Path = Get.Host + Get.Path; // To account for relative paths
+ // Path contains the filename to patch
+ FetchResult Res;
+ Res.Filename = Itm->DestFile;
+ URIStart(Res);
+ // Res.Filename the destination filename
+
+ if (Debug == true)
+ std::clog << "Patching " << Path << " with " << Path
+ << ".ed and putting result into " << Itm->DestFile << std::endl;
+ // Open the source and destination files (the d'tor of FileFd will do
+ // the cleanup/closing of the fds)
+ FileFd From(Path,FileFd::ReadOnly);
+ FileFd Patch(Path+".ed",FileFd::ReadOnly);
+ FileFd To(Itm->DestFile,FileFd::WriteEmpty);
+ To.EraseOnFailure();
+ if (_error->PendingError() == true)
+ return false;
+
+ Hashes Hash;
+ FILE* fFrom = fdopen(From.Fd(), "r");
+ FILE* fPatch = fdopen(Patch.Fd(), "r");
+ FILE* fTo = fdopen(To.Fd(), "w");
+ // now do the actual patching
+ if (ed_file(fPatch, fFrom, fTo, &Hash) != ED_OK) {
+ _error->Errno("rred", _("Could not patch file"));
+ return false;
+ }
+
+ // write out the result
+ fflush(fFrom);
+ fflush(fPatch);
+ fflush(fTo);
+ From.Close();
+ Patch.Close();
+ To.Close();
+
+ // Transfer the modification times
+ struct stat Buf;
+ if (stat(Path.c_str(),&Buf) != 0)
+ return _error->Errno("stat",_("Failed to stat"));
+
+ struct utimbuf TimeBuf;
+ TimeBuf.actime = Buf.st_atime;
+ TimeBuf.modtime = Buf.st_mtime;
+ if (utime(Itm->DestFile.c_str(),&TimeBuf) != 0)
+ return _error->Errno("utime",_("Failed to set modification time"));
+
+ if (stat(Itm->DestFile.c_str(),&Buf) != 0)
+ return _error->Errno("stat",_("Failed to stat"));
+
+ // return done
+ Res.LastModified = Buf.st_mtime;
+ Res.Size = Buf.st_size;
+ Res.TakeHashes(Hash);
+ URIDone(Res);
+
+ return true;
+}
+
+int main(int argc, char *argv[])
+{
+ RredMethod Mth;
+
+ Prog = strrchr(argv[0],'/');
+ Prog++;
+
+ return Mth.Run();
+}