1
Fork 0

SipHasher128: use more named constants, update comments

This commit is contained in:
Tyson Nottingham 2020-10-04 19:39:17 -07:00
parent f6f96e2a87
commit b86161ad9c

View file

@ -7,10 +7,11 @@ use std::ptr;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
const ELEM_SIZE: usize = mem::size_of::<u64>();
const BUFFER_SIZE_ELEMS: usize = 8; const BUFFER_SIZE_ELEMS: usize = 8;
const BUFFER_SIZE_BYTES: usize = BUFFER_SIZE_ELEMS * mem::size_of::<u64>(); const BUFFER_SIZE_BYTES: usize = BUFFER_SIZE_ELEMS * ELEM_SIZE;
const BUFFER_SIZE_ELEMS_SPILL: usize = BUFFER_SIZE_ELEMS + 1; const BUFFER_SIZE_ELEMS_SPILL: usize = BUFFER_SIZE_ELEMS + 1;
const BUFFER_SIZE_BYTES_SPILL: usize = BUFFER_SIZE_ELEMS_SPILL * mem::size_of::<u64>(); const BUFFER_SIZE_BYTES_SPILL: usize = BUFFER_SIZE_ELEMS_SPILL * ELEM_SIZE;
const BUFFER_SPILL_INDEX: usize = BUFFER_SIZE_ELEMS_SPILL - 1; const BUFFER_SPILL_INDEX: usize = BUFFER_SIZE_ELEMS_SPILL - 1;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -54,15 +55,16 @@ macro_rules! compress {
}}; }};
} }
// Copies up to 8 bytes from source to destination. This may be faster than // Copies up to 8 bytes from source to destination. This performs better than
// calling `ptr::copy_nonoverlapping` with an arbitrary count, since all of // `ptr::copy_nonoverlapping` on microbenchmarks and may perform better on real
// the copies have fixed sizes and thus avoid calling memcpy. // workloads since all of the copies have fixed sizes and avoid calling memcpy.
#[inline] #[inline]
unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) { unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) {
debug_assert!(count <= 8); const COUNT_MAX: usize = 8;
debug_assert!(count <= COUNT_MAX);
if count == 8 { if count == COUNT_MAX {
ptr::copy_nonoverlapping(src, dst, 8); ptr::copy_nonoverlapping(src, dst, COUNT_MAX);
return; return;
} }
@ -85,7 +87,7 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
debug_assert_eq!(i, count); debug_assert_eq!(i, count);
} }
// Implementation // # Implementation
// //
// This implementation uses buffering to reduce the hashing cost for inputs // This implementation uses buffering to reduce the hashing cost for inputs
// consisting of many small integers. Buffering simplifies the integration of // consisting of many small integers. Buffering simplifies the integration of
@ -99,10 +101,11 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
// //
// When a write fills the buffer, a buffer processing function is invoked to // When a write fills the buffer, a buffer processing function is invoked to
// hash all of the buffered input. The buffer processing functions are marked // hash all of the buffered input. The buffer processing functions are marked
// #[inline(never)] so that they aren't inlined into the append functions, which // `#[inline(never)]` so that they aren't inlined into the append functions,
// ensures the more frequently called append functions remain inlineable and // which ensures the more frequently called append functions remain inlineable
// don't include register pushing/popping that would only be made necessary by // and don't include register pushing/popping that would only be made necessary
// inclusion of the complex buffer processing path which uses those registers. // by inclusion of the complex buffer processing path which uses those
// registers.
// //
// The buffer includes a "spill"--an extra element at the end--which simplifies // The buffer includes a "spill"--an extra element at the end--which simplifies
// the integer write buffer processing path. The value that fills the buffer can // the integer write buffer processing path. The value that fills the buffer can
@ -118,7 +121,7 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
// efficiently implemented with an uninitialized buffer. On the other hand, an // efficiently implemented with an uninitialized buffer. On the other hand, an
// uninitialized buffer may become more important should a larger one be used. // uninitialized buffer may become more important should a larger one be used.
// //
// Platform Dependence // # Platform Dependence
// //
// The SipHash algorithm operates on byte sequences. It parses the input stream // The SipHash algorithm operates on byte sequences. It parses the input stream
// as 8-byte little-endian integers. Therefore, given the same byte sequence, it // as 8-byte little-endian integers. Therefore, given the same byte sequence, it
@ -131,14 +134,14 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
// native size), or independent (by converting to a common size), supposing the // native size), or independent (by converting to a common size), supposing the
// values can be represented in 32 bits. // values can be represented in 32 bits.
// //
// In order to make SipHasher128 consistent with SipHasher in libstd, we choose // In order to make `SipHasher128` consistent with `SipHasher` in libstd, we
// to do the integer to byte sequence conversion in the platform-dependent way. // choose to do the integer to byte sequence conversion in the platform-
// Clients can achieve (nearly) platform-independent hashing by widening `isize` // dependent way. Clients can achieve (nearly) platform-independent hashing by
// and `usize` integers to 64 bits on 32-bit systems and byte-swapping integers // widening `isize` and `usize` integers to 64 bits on 32-bit systems and
// on big-endian systems before passing them to the writing functions. This // byte-swapping integers on big-endian systems before passing them to the
// causes the input byte sequence to look identical on big- and little- endian // writing functions. This causes the input byte sequence to look identical on
// systems (supposing `isize` and `usize` values can be represented in 32 bits), // big- and little- endian systems (supposing `isize` and `usize` values can be
// which ensures platform-independent results. // represented in 32 bits), which ensures platform-independent results.
impl SipHasher128 { impl SipHasher128 {
#[inline] #[inline]
pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 { pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 {
@ -156,7 +159,7 @@ impl SipHasher128 {
}; };
unsafe { unsafe {
// Initialize spill because we read from it in short_write_process_buffer. // Initialize spill because we read from it in `short_write_process_buffer`.
*hasher.buf.get_unchecked_mut(BUFFER_SPILL_INDEX) = MaybeUninit::zeroed(); *hasher.buf.get_unchecked_mut(BUFFER_SPILL_INDEX) = MaybeUninit::zeroed();
} }
@ -190,9 +193,9 @@ impl SipHasher128 {
// A specialized write function for values with size <= 8 that should only // A specialized write function for values with size <= 8 that should only
// be called when the write would cause the buffer to fill. // be called when the write would cause the buffer to fill.
// //
// SAFETY: the write of x into self.buf starting at byte offset self.nbuf // SAFETY: the write of `x` into `self.buf` starting at byte offset
// must cause self.buf to become fully initialized (and not overflow) if it // `self.nbuf` must cause `self.buf` to become fully initialized (and not
// wasn't already. // overflow) if it wasn't already.
#[inline(never)] #[inline(never)]
unsafe fn short_write_process_buffer<T>(&mut self, x: T) { unsafe fn short_write_process_buffer<T>(&mut self, x: T) {
let size = mem::size_of::<T>(); let size = mem::size_of::<T>();
@ -223,7 +226,7 @@ impl SipHasher128 {
ptr::copy_nonoverlapping(src, self.buf.as_mut_ptr() as *mut u8, size - 1); ptr::copy_nonoverlapping(src, self.buf.as_mut_ptr() as *mut u8, size - 1);
// This function should only be called when the write fills the buffer. // This function should only be called when the write fills the buffer.
// Therefore, when size == 1, the new self.nbuf must be zero. The size // Therefore, when size == 1, the new `self.nbuf` must be zero. The size
// is statically known, so the branch is optimized away. // is statically known, so the branch is optimized away.
self.nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE_BYTES }; self.nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE_BYTES };
self.processed += BUFFER_SIZE_BYTES; self.processed += BUFFER_SIZE_BYTES;
@ -240,7 +243,7 @@ impl SipHasher128 {
unsafe { unsafe {
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
if length < 8 { if length <= 8 {
copy_nonoverlapping_small(msg.as_ptr(), dst, length); copy_nonoverlapping_small(msg.as_ptr(), dst, length);
} else { } else {
// This memcpy is *not* optimized away. // This memcpy is *not* optimized away.
@ -259,9 +262,9 @@ impl SipHasher128 {
// A write function for byte slices that should only be called when the // A write function for byte slices that should only be called when the
// write would cause the buffer to fill. // write would cause the buffer to fill.
// //
// SAFETY: self.buf must be initialized up to the byte offset self.nbuf, and // SAFETY: `self.buf` must be initialized up to the byte offset `self.nbuf`,
// msg must contain enough bytes to initialize the rest of the element // and `msg` must contain enough bytes to initialize the rest of the element
// containing the byte offset self.nbuf. // containing the byte offset `self.nbuf`.
#[inline(never)] #[inline(never)]
unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) { unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) {
let length = msg.len(); let length = msg.len();
@ -272,8 +275,8 @@ impl SipHasher128 {
// Always copy first part of input into current element of buffer. // Always copy first part of input into current element of buffer.
// This function should only be called when the write fills the buffer, // This function should only be called when the write fills the buffer,
// so we know that there is enough input to fill the current element. // so we know that there is enough input to fill the current element.
let valid_in_elem = nbuf & 0x7; let valid_in_elem = nbuf % ELEM_SIZE;
let needed_in_elem = 8 - valid_in_elem; let needed_in_elem = ELEM_SIZE - valid_in_elem;
let src = msg.as_ptr(); let src = msg.as_ptr();
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
@ -281,10 +284,11 @@ impl SipHasher128 {
// Process buffer. // Process buffer.
// Using nbuf / 8 + 1 rather than (nbuf + needed_in_elem) / 8 to show // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
// the compiler that this loop's upper bound is > 0. We know that is // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
// true, because last step ensured we have a full element in the buffer. // We know that is true, because last step ensured we have a full
let last = nbuf / 8 + 1; // element in the buffer.
let last = nbuf / ELEM_SIZE + 1;
for i in 0..last { for i in 0..last {
let elem = self.buf.get_unchecked(i).assume_init().to_le(); let elem = self.buf.get_unchecked(i).assume_init().to_le();
@ -293,26 +297,26 @@ impl SipHasher128 {
self.state.v0 ^= elem; self.state.v0 ^= elem;
} }
// Process the remaining u64-sized chunks of input. // Process the remaining element-sized chunks of input.
let mut processed = needed_in_elem; let mut processed = needed_in_elem;
let input_left = length - processed; let input_left = length - processed;
let u64s_left = input_left / 8; let elems_left = input_left / ELEM_SIZE;
let u8s_left = input_left & 0x7; let extra_bytes_left = input_left % ELEM_SIZE;
for _ in 0..u64s_left { for _ in 0..elems_left {
let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le(); let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
self.state.v3 ^= elem; self.state.v3 ^= elem;
Sip24Rounds::c_rounds(&mut self.state); Sip24Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem; self.state.v0 ^= elem;
processed += 8; processed += ELEM_SIZE;
} }
// Copy remaining input into start of buffer. // Copy remaining input into start of buffer.
let src = msg.as_ptr().add(processed); let src = msg.as_ptr().add(processed);
let dst = self.buf.as_mut_ptr() as *mut u8; let dst = self.buf.as_mut_ptr() as *mut u8;
copy_nonoverlapping_small(src, dst, u8s_left); copy_nonoverlapping_small(src, dst, extra_bytes_left);
self.nbuf = u8s_left; self.nbuf = extra_bytes_left;
self.processed += nbuf + processed; self.processed += nbuf + processed;
} }
@ -321,7 +325,7 @@ impl SipHasher128 {
debug_assert!(self.nbuf < BUFFER_SIZE_BYTES); debug_assert!(self.nbuf < BUFFER_SIZE_BYTES);
// Process full elements in buffer. // Process full elements in buffer.
let last = self.nbuf / 8; let last = self.nbuf / ELEM_SIZE;
// Since we're consuming self, avoid updating members for a potential // Since we're consuming self, avoid updating members for a potential
// performance gain. // performance gain.
@ -335,14 +339,14 @@ impl SipHasher128 {
} }
// Get remaining partial element. // Get remaining partial element.
let elem = if self.nbuf % 8 != 0 { let elem = if self.nbuf % ELEM_SIZE != 0 {
unsafe { unsafe {
// Ensure element is initialized by writing zero bytes. At most // Ensure element is initialized by writing zero bytes. At most
// seven are required given the above check. It's safe to write // `ELEM_SIZE - 1` are required given the above check. It's safe
// this many because we have the spill element and we maintain // to write this many because we have the spill and we maintain
// self.nbuf such that this write will start before the spill. // `self.nbuf` such that this write will start before the spill.
let dst = (self.buf.as_mut_ptr() as *mut u8).add(self.nbuf); let dst = (self.buf.as_mut_ptr() as *mut u8).add(self.nbuf);
ptr::write_bytes(dst, 0, 7); ptr::write_bytes(dst, 0, ELEM_SIZE - 1);
self.buf.get_unchecked(last).assume_init().to_le() self.buf.get_unchecked(last).assume_init().to_le()
} }
} else { } else {