Rollup merge of #68914 - nnethercote:speed-up-SipHasher128, r=michaelwoerister
Speed up `SipHasher128`. The current code in `SipHasher128::short_write` is inefficient. It uses `u8to64_le` (which is complex and slow) to extract just the right number of bytes of the input into a u64 and pad the result with zeroes. It then left-shifts that value in order to bitwise-OR it with `self.tail`. For example, imagine we have a u32 input `0xIIHH_GGFF` and only need three bytes to fill up `self.tail`. The current code uses `u8to64_le` to construct `0x0000_0000_00HH_GGFF`, which is just `0xIIHH_GGFF` with the `0xII` removed and zero-extended to a u64. The code then left-shifts that value by five bytes -- discarding the `0x00` byte that replaced the `0xII` byte! -- to give `0xHHGG_FF00_0000_0000`. It then then ORs that value with `self.tail`. There's a much simpler way to do it: zero-extend to u64 first, then left shift. E.g. `0xIIHH_GGFF` is zero-extended to `0x0000_0000_IIHH_GGFF`, and then left-shifted to `0xHHGG_FF00_0000_0000`. We don't have to take time to exclude the unneeded `0xII` byte, because it just gets shifted out anyway! It also avoids multiple occurrences of `unsafe`. There's a similar story with the setting of `self.tail` at the method's end. The current code uses `u8to64_le` to extract the remaining part of the input, but the same effect can be achieved more quickly with a right shift on the zero-extended input. This commit changes `SipHasher128` to use the simpler shift-based approach. The code is also smaller, which means that `short_write` is now inlined where previously it wasn't, which makes things faster again. This gives big speed-ups for all incremental builds, especially "baseline" incremental builds. r? @michaelwoerister
This commit is contained in:
commit
f2d829ce6a
1 changed files with 84 additions and 80 deletions
|
@ -4,7 +4,6 @@ use std::cmp;
|
||||||
use std::hash::Hasher;
|
use std::hash::Hasher;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
use std::slice;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
@ -52,46 +51,17 @@ macro_rules! compress {
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Loads an integer of the desired type from a byte stream, in LE order. Uses
|
/// Loads up to 8 bytes from a byte-slice into a little-endian u64.
|
||||||
/// `copy_nonoverlapping` to let the compiler generate the most efficient way
|
|
||||||
/// to load it from a possibly unaligned address.
|
|
||||||
///
|
|
||||||
/// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
|
|
||||||
macro_rules! load_int_le {
|
|
||||||
($buf:expr, $i:expr, $int_ty:ident) => {{
|
|
||||||
debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
|
|
||||||
let mut data = 0 as $int_ty;
|
|
||||||
ptr::copy_nonoverlapping(
|
|
||||||
$buf.get_unchecked($i),
|
|
||||||
&mut data as *mut _ as *mut u8,
|
|
||||||
mem::size_of::<$int_ty>(),
|
|
||||||
);
|
|
||||||
data.to_le()
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Loads an u64 using up to 7 bytes of a byte slice.
|
|
||||||
///
|
|
||||||
/// Unsafe because: unchecked indexing at start..start+len
|
|
||||||
#[inline]
|
#[inline]
|
||||||
unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
|
fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
|
||||||
debug_assert!(len < 8);
|
assert!(len <= 8 && start + len <= buf.len());
|
||||||
let mut i = 0; // current byte index (from LSB) in the output u64
|
|
||||||
let mut out = 0;
|
let mut out = 0u64;
|
||||||
if i + 3 < len {
|
unsafe {
|
||||||
out = u64::from(load_int_le!(buf, start + i, u32));
|
let out_ptr = &mut out as *mut _ as *mut u8;
|
||||||
i += 4;
|
ptr::copy_nonoverlapping(buf.as_ptr().offset(start as isize), out_ptr, len);
|
||||||
}
|
}
|
||||||
if i + 1 < len {
|
out.to_le()
|
||||||
out |= u64::from(load_int_le!(buf, start + i, u16)) << (i * 8);
|
|
||||||
i += 2
|
|
||||||
}
|
|
||||||
if i < len {
|
|
||||||
out |= u64::from(*buf.get_unchecked(start + i)) << (i * 8);
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
debug_assert_eq!(i, len);
|
|
||||||
out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SipHasher128 {
|
impl SipHasher128 {
|
||||||
|
@ -122,42 +92,76 @@ impl SipHasher128 {
|
||||||
self.state.v1 ^= 0xee;
|
self.state.v1 ^= 0xee;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Specialized write function that is only valid for buffers with len <= 8.
|
// A specialized write function for values with size <= 8.
|
||||||
// It's used to force inlining of write_u8 and write_usize, those would normally be inlined
|
//
|
||||||
// except for composite types (that includes slices and str hashing because of delimiter).
|
// The hashing of multi-byte integers depends on endianness. E.g.:
|
||||||
// Without this extra push the compiler is very reluctant to inline delimiter writes,
|
// - little-endian: `write_u32(0xDDCCBBAA)` == `write([0xAA, 0xBB, 0xCC, 0xDD])`
|
||||||
// degrading performance substantially for the most common use cases.
|
// - big-endian: `write_u32(0xDDCCBBAA)` == `write([0xDD, 0xCC, 0xBB, 0xAA])`
|
||||||
|
//
|
||||||
|
// This function does the right thing for little-endian hardware. On
|
||||||
|
// big-endian hardware `x` must be byte-swapped first to give the right
|
||||||
|
// behaviour. After any byte-swapping, the input must be zero-extended to
|
||||||
|
// 64-bits. The caller is responsible for the byte-swapping and
|
||||||
|
// zero-extension.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn short_write(&mut self, msg: &[u8]) {
|
fn short_write<T>(&mut self, _x: T, x: u64) {
|
||||||
debug_assert!(msg.len() <= 8);
|
let size = mem::size_of::<T>();
|
||||||
let length = msg.len();
|
self.length += size;
|
||||||
self.length += length;
|
|
||||||
|
|
||||||
|
// The original number must be zero-extended, not sign-extended.
|
||||||
|
debug_assert!(if size < 8 { x >> (8 * size) == 0 } else { true });
|
||||||
|
|
||||||
|
// The number of bytes needed to fill `self.tail`.
|
||||||
let needed = 8 - self.ntail;
|
let needed = 8 - self.ntail;
|
||||||
let fill = cmp::min(length, needed);
|
|
||||||
if fill == 8 {
|
// SipHash parses the input stream as 8-byte little-endian integers.
|
||||||
self.tail = unsafe { load_int_le!(msg, 0, u64) };
|
// Inputs are put into `self.tail` until 8 bytes of data have been
|
||||||
} else {
|
// collected, and then that word is processed.
|
||||||
self.tail |= unsafe { u8to64_le(msg, 0, fill) } << (8 * self.ntail);
|
//
|
||||||
if length < needed {
|
// For example, imagine that `self.tail` is 0x0000_00EE_DDCC_BBAA,
|
||||||
self.ntail += length;
|
// `self.ntail` is 5 (because 5 bytes have been put into `self.tail`),
|
||||||
return;
|
// and `needed` is therefore 3.
|
||||||
}
|
//
|
||||||
|
// - Scenario 1, `self.write_u8(0xFF)`: we have already zero-extended
|
||||||
|
// the input to 0x0000_0000_0000_00FF. We now left-shift it five
|
||||||
|
// bytes, giving 0x0000_FF00_0000_0000. We then bitwise-OR that value
|
||||||
|
// into `self.tail`, resulting in 0x0000_FFEE_DDCC_BBAA.
|
||||||
|
// (Zero-extension of the original input is critical in this scenario
|
||||||
|
// because we don't want the high two bytes of `self.tail` to be
|
||||||
|
// touched by the bitwise-OR.) `self.tail` is not yet full, so we
|
||||||
|
// return early, after updating `self.ntail` to 6.
|
||||||
|
//
|
||||||
|
// - Scenario 2, `self.write_u32(0xIIHH_GGFF)`: we have already
|
||||||
|
// zero-extended the input to 0x0000_0000_IIHH_GGFF. We now
|
||||||
|
// left-shift it five bytes, giving 0xHHGG_FF00_0000_0000. We then
|
||||||
|
// bitwise-OR that value into `self.tail`, resulting in
|
||||||
|
// 0xHHGG_FFEE_DDCC_BBAA. `self.tail` is now full, and we can use it
|
||||||
|
// to update `self.state`. (As mentioned above, this assumes a
|
||||||
|
// little-endian machine; on a big-endian machine we would have
|
||||||
|
// byte-swapped 0xIIHH_GGFF in the caller, giving 0xFFGG_HHII, and we
|
||||||
|
// would then end up bitwise-ORing 0xGGHH_II00_0000_0000 into
|
||||||
|
// `self.tail`).
|
||||||
|
//
|
||||||
|
self.tail |= x << (8 * self.ntail);
|
||||||
|
if size < needed {
|
||||||
|
self.ntail += size;
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// `self.tail` is full, process it.
|
||||||
self.state.v3 ^= self.tail;
|
self.state.v3 ^= self.tail;
|
||||||
Sip24Rounds::c_rounds(&mut self.state);
|
Sip24Rounds::c_rounds(&mut self.state);
|
||||||
self.state.v0 ^= self.tail;
|
self.state.v0 ^= self.tail;
|
||||||
|
|
||||||
// Buffered tail is now flushed, process new input.
|
// Continuing scenario 2: we have one byte left over from the input. We
|
||||||
self.ntail = length - needed;
|
// set `self.ntail` to 1 and `self.tail` to `0x0000_0000_IIHH_GGFF >>
|
||||||
self.tail = unsafe { u8to64_le(msg, needed, self.ntail) };
|
// 8*3`, which is 0x0000_0000_0000_00II. (Or on a big-endian machine
|
||||||
}
|
// the prior byte-swapping would leave us with 0x0000_0000_0000_00FF.)
|
||||||
|
//
|
||||||
#[inline(always)]
|
// The `if` is needed to avoid shifting by 64 bits, which Rust
|
||||||
fn short_write_gen<T>(&mut self, x: T) {
|
// complains about.
|
||||||
let bytes =
|
self.ntail = size - needed;
|
||||||
unsafe { slice::from_raw_parts(&x as *const T as *const u8, mem::size_of::<T>()) };
|
self.tail = if needed < 8 { x >> (8 * needed) } else { 0 };
|
||||||
self.short_write(bytes);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -182,52 +186,52 @@ impl SipHasher128 {
|
||||||
impl Hasher for SipHasher128 {
|
impl Hasher for SipHasher128 {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn write_u8(&mut self, i: u8) {
|
fn write_u8(&mut self, i: u8) {
|
||||||
self.short_write_gen(i);
|
self.short_write(i, i as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn write_u16(&mut self, i: u16) {
|
fn write_u16(&mut self, i: u16) {
|
||||||
self.short_write_gen(i);
|
self.short_write(i, i.to_le() as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn write_u32(&mut self, i: u32) {
|
fn write_u32(&mut self, i: u32) {
|
||||||
self.short_write_gen(i);
|
self.short_write(i, i.to_le() as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn write_u64(&mut self, i: u64) {
|
fn write_u64(&mut self, i: u64) {
|
||||||
self.short_write_gen(i);
|
self.short_write(i, i.to_le() as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn write_usize(&mut self, i: usize) {
|
fn write_usize(&mut self, i: usize) {
|
||||||
self.short_write_gen(i);
|
self.short_write(i, i.to_le() as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn write_i8(&mut self, i: i8) {
|
fn write_i8(&mut self, i: i8) {
|
||||||
self.short_write_gen(i);
|
self.short_write(i, i as u8 as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn write_i16(&mut self, i: i16) {
|
fn write_i16(&mut self, i: i16) {
|
||||||
self.short_write_gen(i);
|
self.short_write(i, (i as u16).to_le() as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn write_i32(&mut self, i: i32) {
|
fn write_i32(&mut self, i: i32) {
|
||||||
self.short_write_gen(i);
|
self.short_write(i, (i as u32).to_le() as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn write_i64(&mut self, i: i64) {
|
fn write_i64(&mut self, i: i64) {
|
||||||
self.short_write_gen(i);
|
self.short_write(i, (i as u64).to_le() as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn write_isize(&mut self, i: isize) {
|
fn write_isize(&mut self, i: isize) {
|
||||||
self.short_write_gen(i);
|
self.short_write(i, (i as usize).to_le() as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -239,7 +243,7 @@ impl Hasher for SipHasher128 {
|
||||||
|
|
||||||
if self.ntail != 0 {
|
if self.ntail != 0 {
|
||||||
needed = 8 - self.ntail;
|
needed = 8 - self.ntail;
|
||||||
self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail);
|
self.tail |= u8to64_le(msg, 0, cmp::min(length, needed)) << (8 * self.ntail);
|
||||||
if length < needed {
|
if length < needed {
|
||||||
self.ntail += length;
|
self.ntail += length;
|
||||||
return;
|
return;
|
||||||
|
@ -257,7 +261,7 @@ impl Hasher for SipHasher128 {
|
||||||
|
|
||||||
let mut i = needed;
|
let mut i = needed;
|
||||||
while i < len - left {
|
while i < len - left {
|
||||||
let mi = unsafe { load_int_le!(msg, i, u64) };
|
let mi = u8to64_le(msg, i, 8);
|
||||||
|
|
||||||
self.state.v3 ^= mi;
|
self.state.v3 ^= mi;
|
||||||
Sip24Rounds::c_rounds(&mut self.state);
|
Sip24Rounds::c_rounds(&mut self.state);
|
||||||
|
@ -266,7 +270,7 @@ impl Hasher for SipHasher128 {
|
||||||
i += 8;
|
i += 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.tail = unsafe { u8to64_le(msg, i, left) };
|
self.tail = u8to64_le(msg, i, left);
|
||||||
self.ntail = left;
|
self.ntail = left;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue