summaryrefslogtreecommitdiff
path: root/apt-pkg/memrchr.cc
diff options
context:
space:
mode:
Diffstat (limited to 'apt-pkg/memrchr.cc')
-rw-r--r--apt-pkg/memrchr.cc157
1 files changed, 157 insertions, 0 deletions
diff --git a/apt-pkg/memrchr.cc b/apt-pkg/memrchr.cc
new file mode 100644
index 000000000..edf8f346a
--- /dev/null
+++ b/apt-pkg/memrchr.cc
@@ -0,0 +1,157 @@
+/* memrchr -- find the last occurrence of a byte in a memory block
+
+ Copyright (C) 1991, 1993, 1996-1997, 1999-2000, 2003-2015 Free Software
+ Foundation, Inc.
+
+ Based on strlen implementation by Torbjorn Granlund (tege@sics.se),
+ with help from Dan Sahlin (dan@sics.se) and
+ commentary by Jim Blandy (jimb@ai.mit.edu);
+ adaptation to memchr suggested by Dick Karpinski (dick@cca.ucsf.edu),
+ and implemented by Roland McGrath (roland@ai.mit.edu).
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#include <config.h>
+
+#ifndef HAVE_MEMRCHR
+#define reg_char char
+
+#include <string.h>
+#include <limits.h>
+
+#undef __memrchr
+#ifdef _LIBC
+# undef memrchr
+#endif
+
+#ifndef weak_alias
+# define __memrchr memrchr
+#endif
+
+/* Search no more than N bytes of S for C. */
+extern "C" void *
+memrchr (const void *s, int c_in, size_t n)
+{
+ /* On 32-bit hardware, choosing longword to be a 32-bit unsigned
+ long instead of a 64-bit uintmax_t tends to give better
+ performance. On 64-bit hardware, unsigned long is generally 64
+ bits already. Change this typedef to experiment with
+ performance. */
+ typedef unsigned long int longword;
+
+ const unsigned char *char_ptr;
+ const longword *longword_ptr;
+ longword repeated_one;
+ longword repeated_c;
+ unsigned reg_char c;
+
+ c = (unsigned char) c_in;
+
+ /* Handle the last few bytes by reading one byte at a time.
+ Do this until CHAR_PTR is aligned on a longword boundary. */
+ for (char_ptr = (const unsigned char *) s + n;
+ n > 0 && (size_t) char_ptr % sizeof (longword) != 0;
+ --n)
+ if (*--char_ptr == c)
+ return (void *) char_ptr;
+
+ longword_ptr = (const longword *) char_ptr;
+
+ /* All these elucidatory comments refer to 4-byte longwords,
+ but the theory applies equally well to any size longwords. */
+
+ /* Compute auxiliary longword values:
+ repeated_one is a value which has a 1 in every byte.
+ repeated_c has c in every byte. */
+ repeated_one = 0x01010101;
+ repeated_c = c | (c << 8);
+ repeated_c |= repeated_c << 16;
+ if (0xffffffffU < (longword) -1)
+ {
+ repeated_one |= repeated_one << 31 << 1;
+ repeated_c |= repeated_c << 31 << 1;
+ if (8 < sizeof (longword))
+ {
+ size_t i;
+
+ for (i = 64; i < sizeof (longword) * 8; i *= 2)
+ {
+ repeated_one |= repeated_one << i;
+ repeated_c |= repeated_c << i;
+ }
+ }
+ }
+
+ /* Instead of the traditional loop which tests each byte, we will test a
+ longword at a time. The tricky part is testing if *any of the four*
+ bytes in the longword in question are equal to c. We first use an xor
+ with repeated_c. This reduces the task to testing whether *any of the
+ four* bytes in longword1 is zero.
+
+ We compute tmp =
+ ((longword1 - repeated_one) & ~longword1) & (repeated_one << 7).
+ That is, we perform the following operations:
+ 1. Subtract repeated_one.
+ 2. & ~longword1.
+ 3. & a mask consisting of 0x80 in every byte.
+ Consider what happens in each byte:
+ - If a byte of longword1 is zero, step 1 and 2 transform it into 0xff,
+ and step 3 transforms it into 0x80. A carry can also be propagated
+ to more significant bytes.
+ - If a byte of longword1 is nonzero, let its lowest 1 bit be at
+ position k (0 <= k <= 7); so the lowest k bits are 0. After step 1,
+ the byte ends in a single bit of value 0 and k bits of value 1.
+ After step 2, the result is just k bits of value 1: 2^k - 1. After
+ step 3, the result is 0. And no carry is produced.
+ So, if longword1 has only non-zero bytes, tmp is zero.
+ Whereas if longword1 has a zero byte, call j the position of the least
+ significant zero byte. Then the result has a zero at positions 0, ...,
+ j-1 and a 0x80 at position j. We cannot predict the result at the more
+ significant bytes (positions j+1..3), but it does not matter since we
+ already have a non-zero bit at position 8*j+7.
+
+ So, the test whether any byte in longword1 is zero is equivalent to
+ testing whether tmp is nonzero. */
+
+ while (n >= sizeof (longword))
+ {
+ longword longword1 = *--longword_ptr ^ repeated_c;
+
+ if ((((longword1 - repeated_one) & ~longword1)
+ & (repeated_one << 7)) != 0)
+ {
+ longword_ptr++;
+ break;
+ }
+ n -= sizeof (longword);
+ }
+
+ char_ptr = (const unsigned char *) longword_ptr;
+
+ /* At this point, we know that either n < sizeof (longword), or one of the
+ sizeof (longword) bytes starting at char_ptr is == c. On little-endian
+ machines, we could determine the first such byte without any further
+ memory accesses, just by looking at the tmp result from the last loop
+ iteration. But this does not work on big-endian machines. Choose code
+ that works in both cases. */
+
+ while (n-- > 0)
+ {
+ if (*--char_ptr == c)
+ return (void *) char_ptr;
+ }
+
+ return NULL;
+}
+#endif