From 6789e01e9370b3b7f65d52138c5657eaa712b4d1 Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Fri, 20 Nov 2015 00:54:07 +0100 Subject: do not segfault in cache generation on mmap failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Out of memory and similar circumstanzas could cause MMap::Map to fail and especially the mmap/malloc calls in it. With some additional checking we can avoid segfaults and similar in such situations – at least in theory as if this is a real out of memory everything we do to handle the error could just as well run into a memory problem as well… But at least in theory (if MMap::Map is made to fail always) we can deal with it so good that a user actually never sees a failure (as the cache it tries to load with it fails and is discarded, so that DynamicMMap takes over and a new one is build) instead of segfaulting. Closes: 803417 --- apt-pkg/cachefile.cc | 2 ++ 1 file changed, 2 insertions(+) (limited to 'apt-pkg/cachefile.cc') diff --git a/apt-pkg/cachefile.cc b/apt-pkg/cachefile.cc index aaa2436c5..39f1e72db 100644 --- a/apt-pkg/cachefile.cc +++ b/apt-pkg/cachefile.cc @@ -80,6 +80,8 @@ bool pkgCacheFile::BuildCaches(OpProgress *Progress, bool WithLock) if (file.IsOpen() == false || file.Failed()) return false; Map = new MMap(file, MMap::Public|MMap::ReadOnly); + if (unlikely(Map->validData() == false)) + return false; Cache = new pkgCache(Map); return _error->PendingError() == false; } -- cgit v1.2.3