1
Fork 0

hashmap: port to the new allocator API

This commit is contained in:
Daniel Micay 2014-05-11 04:19:05 -04:00
parent 032510bae2
commit 7da24ea1a9

View file

@ -30,8 +30,6 @@ use std::result::{Ok, Err};
use std::slice::ImmutableVector; use std::slice::ImmutableVector;
mod table { mod table {
extern crate libc;
use std::clone::Clone; use std::clone::Clone;
use std::cmp; use std::cmp;
use std::cmp::Eq; use std::cmp::Eq;
@ -42,10 +40,10 @@ mod table {
use std::prelude::Drop; use std::prelude::Drop;
use std::ptr; use std::ptr;
use std::ptr::RawPtr; use std::ptr::RawPtr;
use std::rt::libc_heap; use std::mem::{min_align_of, size_of};
use std::intrinsics::{size_of, min_align_of, transmute}; use std::intrinsics::{move_val_init, set_memory, transmute};
use std::intrinsics::{move_val_init, set_memory};
use std::iter::{Iterator, range_step_inclusive}; use std::iter::{Iterator, range_step_inclusive};
use std::rt::heap::{allocate, deallocate};
static EMPTY_BUCKET: u64 = 0u64; static EMPTY_BUCKET: u64 = 0u64;
@ -185,10 +183,6 @@ mod table {
assert_eq!(round_up_to_next(5, 4), 8); assert_eq!(round_up_to_next(5, 4), 8);
} }
fn has_alignment(n: uint, alignment: uint) -> bool {
round_up_to_next(n, alignment) == n
}
// Returns a tuple of (minimum required malloc alignment, hash_offset, // Returns a tuple of (minimum required malloc alignment, hash_offset,
// key_offset, val_offset, array_size), from the start of a mallocated array. // key_offset, val_offset, array_size), from the start of a mallocated array.
fn calculate_offsets( fn calculate_offsets(
@ -243,12 +237,7 @@ mod table {
keys_size, min_align_of::< K >(), keys_size, min_align_of::< K >(),
vals_size, min_align_of::< V >()); vals_size, min_align_of::< V >());
let buffer = libc_heap::malloc_raw(size) as *mut u8; let buffer = allocate(size, malloc_alignment);
// FIXME #13094: If malloc was not at as aligned as we expected,
// our offset calculations are just plain wrong. We could support
// any alignment if we switched from `malloc` to `posix_memalign`.
assert!(has_alignment(buffer as uint, malloc_alignment));
let hashes = buffer.offset(hash_offset as int) as *mut u64; let hashes = buffer.offset(hash_offset as int) as *mut u64;
let keys = buffer.offset(keys_offset as int) as *mut K; let keys = buffer.offset(keys_offset as int) as *mut K;
@ -418,7 +407,7 @@ mod table {
// modified to no longer assume this. // modified to no longer assume this.
#[test] #[test]
fn can_alias_safehash_as_u64() { fn can_alias_safehash_as_u64() {
unsafe { assert_eq!(size_of::<SafeHash>(), size_of::<u64>()) }; assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
} }
pub struct Entries<'a, K, V> { pub struct Entries<'a, K, V> {
@ -560,8 +549,15 @@ mod table {
assert_eq!(self.size, 0); assert_eq!(self.size, 0);
let hashes_size = self.capacity * size_of::<u64>();
let keys_size = self.capacity * size_of::<K>();
let vals_size = self.capacity * size_of::<V>();
let (align, _, _, _, size) = calculate_offsets(hashes_size, min_align_of::<u64>(),
keys_size, min_align_of::<K>(),
vals_size, min_align_of::<V>());
unsafe { unsafe {
libc::free(self.hashes as *mut libc::c_void); deallocate(self.hashes as *mut u8, size, align);
// Remember how everything was allocated out of one buffer // Remember how everything was allocated out of one buffer
// during initialization? We only need one call to free here. // during initialization? We only need one call to free here.
} }