auto merge of #14006 : thestinger/rust/jemalloc, r=alexcrichton
Closes #11807
This commit is contained in:
commit
adb8b0b230
30 changed files with 570 additions and 217 deletions
1
.gitattributes
vendored
1
.gitattributes
vendored
|
@ -8,4 +8,3 @@ src/etc/pkg/rust-logo.ico binary
|
||||||
src/etc/pkg/rust-logo.png binary
|
src/etc/pkg/rust-logo.png binary
|
||||||
src/rt/msvc/* -whitespace
|
src/rt/msvc/* -whitespace
|
||||||
src/rt/vg/* -whitespace
|
src/rt/vg/* -whitespace
|
||||||
src/rt/jemalloc/**/* -whitespace
|
|
||||||
|
|
3
.gitmodules
vendored
3
.gitmodules
vendored
|
@ -15,3 +15,6 @@
|
||||||
[submodule "src/rt/hoedown"]
|
[submodule "src/rt/hoedown"]
|
||||||
path = src/rt/hoedown
|
path = src/rt/hoedown
|
||||||
url = https://github.com/rust-lang/hoedown.git
|
url = https://github.com/rust-lang/hoedown.git
|
||||||
|
[submodule "src/jemalloc"]
|
||||||
|
path = src/jemalloc
|
||||||
|
url = https://github.com/rust-lang/jemalloc.git
|
||||||
|
|
1
configure
vendored
1
configure
vendored
|
@ -782,6 +782,7 @@ do
|
||||||
for s in 0 1 2 3
|
for s in 0 1 2 3
|
||||||
do
|
do
|
||||||
make_dir $t/rt/stage$s
|
make_dir $t/rt/stage$s
|
||||||
|
make_dir $t/rt/jemalloc
|
||||||
make_dir $t/rt/libuv
|
make_dir $t/rt/libuv
|
||||||
make_dir $t/rt/libuv/src/ares
|
make_dir $t/rt/libuv/src/ares
|
||||||
make_dir $t/rt/libuv/src/eio
|
make_dir $t/rt/libuv/src/eio
|
||||||
|
|
|
@ -57,7 +57,7 @@ CRATES := $(TARGET_CRATES) $(HOST_CRATES)
|
||||||
TOOLS := compiletest rustdoc rustc
|
TOOLS := compiletest rustdoc rustc
|
||||||
|
|
||||||
DEPS_core :=
|
DEPS_core :=
|
||||||
DEPS_std := core libc native:rustrt native:compiler-rt native:backtrace
|
DEPS_std := core libc native:rustrt native:compiler-rt native:backtrace native:jemalloc
|
||||||
DEPS_green := std rand native:context_switch
|
DEPS_green := std rand native:context_switch
|
||||||
DEPS_rustuv := std native:uv native:uv_support
|
DEPS_rustuv := std native:uv native:uv_support
|
||||||
DEPS_native := std
|
DEPS_native := std
|
||||||
|
|
|
@ -145,6 +145,7 @@ CFG_LIB_NAME_x86_64-unknown-linux-gnu=lib$(1).so
|
||||||
CFG_STATIC_LIB_NAME_x86_64-unknown-linux-gnu=lib$(1).a
|
CFG_STATIC_LIB_NAME_x86_64-unknown-linux-gnu=lib$(1).a
|
||||||
CFG_LIB_GLOB_x86_64-unknown-linux-gnu=lib$(1)-*.so
|
CFG_LIB_GLOB_x86_64-unknown-linux-gnu=lib$(1)-*.so
|
||||||
CFG_LIB_DSYM_GLOB_x86_64-unknown-linux-gnu=lib$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_x86_64-unknown-linux-gnu=lib$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_x86_64-unknown-linux-gnu := -m64
|
||||||
CFG_GCCISH_CFLAGS_x86_64-unknown-linux-gnu := -Wall -Werror -g -fPIC -m64
|
CFG_GCCISH_CFLAGS_x86_64-unknown-linux-gnu := -Wall -Werror -g -fPIC -m64
|
||||||
CFG_GCCISH_CXXFLAGS_x86_64-unknown-linux-gnu := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_x86_64-unknown-linux-gnu := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_x86_64-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m64
|
CFG_GCCISH_LINK_FLAGS_x86_64-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m64
|
||||||
|
@ -172,6 +173,7 @@ CFG_LIB_NAME_i686-unknown-linux-gnu=lib$(1).so
|
||||||
CFG_STATIC_LIB_NAME_i686-unknown-linux-gnu=lib$(1).a
|
CFG_STATIC_LIB_NAME_i686-unknown-linux-gnu=lib$(1).a
|
||||||
CFG_LIB_GLOB_i686-unknown-linux-gnu=lib$(1)-*.so
|
CFG_LIB_GLOB_i686-unknown-linux-gnu=lib$(1)-*.so
|
||||||
CFG_LIB_DSYM_GLOB_i686-unknown-linux-gnu=lib$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_i686-unknown-linux-gnu=lib$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_i686-unknown-linux-gnu := -m32
|
||||||
CFG_GCCISH_CFLAGS_i686-unknown-linux-gnu := -Wall -Werror -g -fPIC -m32
|
CFG_GCCISH_CFLAGS_i686-unknown-linux-gnu := -Wall -Werror -g -fPIC -m32
|
||||||
CFG_GCCISH_CXXFLAGS_i686-unknown-linux-gnu := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_i686-unknown-linux-gnu := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_i686-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m32
|
CFG_GCCISH_LINK_FLAGS_i686-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m32
|
||||||
|
@ -201,6 +203,7 @@ AR_arm-apple-darwin = $(shell xcrun -find -sdk iphoneos ar)
|
||||||
CFG_LIB_NAME_arm-apple-darwin = lib$(1).dylib
|
CFG_LIB_NAME_arm-apple-darwin = lib$(1).dylib
|
||||||
CFG_LIB_GLOB_arm-apple-darwin = lib$(1)-*.dylib
|
CFG_LIB_GLOB_arm-apple-darwin = lib$(1)-*.dylib
|
||||||
CFG_LIB_DSYM_GLOB_arm-apple-darwin = lib$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_arm-apple-darwin = lib$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_arm-apple-darwin := $(CFG_IOS_FLAGS)
|
||||||
CFG_GCCISH_CFLAGS_arm-apple-darwin := -Wall -Werror -g -fPIC $(CFG_IOS_FLAGS)
|
CFG_GCCISH_CFLAGS_arm-apple-darwin := -Wall -Werror -g -fPIC $(CFG_IOS_FLAGS)
|
||||||
CFG_GCCISH_CXXFLAGS_arm-apple-darwin := -fno-rtti $(CFG_IOS_FLAGS)
|
CFG_GCCISH_CXXFLAGS_arm-apple-darwin := -fno-rtti $(CFG_IOS_FLAGS)
|
||||||
CFG_GCCISH_LINK_FLAGS_arm-apple-darwin := -dynamiclib -lpthread -framework CoreServices -Wl,-no_compact_unwind
|
CFG_GCCISH_LINK_FLAGS_arm-apple-darwin := -dynamiclib -lpthread -framework CoreServices -Wl,-no_compact_unwind
|
||||||
|
@ -229,6 +232,7 @@ CFG_LIB_NAME_x86_64-apple-darwin=lib$(1).dylib
|
||||||
CFG_STATIC_LIB_NAME_x86_64-apple-darwin=lib$(1).a
|
CFG_STATIC_LIB_NAME_x86_64-apple-darwin=lib$(1).a
|
||||||
CFG_LIB_GLOB_x86_64-apple-darwin=lib$(1)-*.dylib
|
CFG_LIB_GLOB_x86_64-apple-darwin=lib$(1)-*.dylib
|
||||||
CFG_LIB_DSYM_GLOB_x86_64-apple-darwin=lib$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_x86_64-apple-darwin=lib$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_x86_64-apple-darwin := -m64 -arch x86_64
|
||||||
CFG_GCCISH_CFLAGS_x86_64-apple-darwin := -Wall -Werror -g -fPIC -m64 -arch x86_64
|
CFG_GCCISH_CFLAGS_x86_64-apple-darwin := -Wall -Werror -g -fPIC -m64 -arch x86_64
|
||||||
CFG_GCCISH_CXXFLAGS_x86_64-apple-darwin := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_x86_64-apple-darwin := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_x86_64-apple-darwin := -dynamiclib -pthread -framework CoreServices -m64
|
CFG_GCCISH_LINK_FLAGS_x86_64-apple-darwin := -dynamiclib -pthread -framework CoreServices -m64
|
||||||
|
@ -256,6 +260,7 @@ CFG_LIB_NAME_i686-apple-darwin=lib$(1).dylib
|
||||||
CFG_STATIC_LIB_NAME_i686-apple-darwin=lib$(1).a
|
CFG_STATIC_LIB_NAME_i686-apple-darwin=lib$(1).a
|
||||||
CFG_LIB_GLOB_i686-apple-darwin=lib$(1)-*.dylib
|
CFG_LIB_GLOB_i686-apple-darwin=lib$(1)-*.dylib
|
||||||
CFG_LIB_DSYM_GLOB_i686-apple-darwin=lib$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_i686-apple-darwin=lib$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_i686-apple-darwin := -m32 -arch i386
|
||||||
CFG_GCCISH_CFLAGS_i686-apple-darwin := -Wall -Werror -g -fPIC -m32 -arch i386
|
CFG_GCCISH_CFLAGS_i686-apple-darwin := -Wall -Werror -g -fPIC -m32 -arch i386
|
||||||
CFG_GCCISH_CXXFLAGS_i686-apple-darwin := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_i686-apple-darwin := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_i686-apple-darwin := -dynamiclib -pthread -framework CoreServices -m32
|
CFG_GCCISH_LINK_FLAGS_i686-apple-darwin := -dynamiclib -pthread -framework CoreServices -m32
|
||||||
|
@ -283,6 +288,7 @@ CFG_LIB_NAME_arm-linux-androideabi=lib$(1).so
|
||||||
CFG_STATIC_LIB_NAME_arm-linux-androideabi=lib$(1).a
|
CFG_STATIC_LIB_NAME_arm-linux-androideabi=lib$(1).a
|
||||||
CFG_LIB_GLOB_arm-linux-androideabi=lib$(1)-*.so
|
CFG_LIB_GLOB_arm-linux-androideabi=lib$(1)-*.so
|
||||||
CFG_LIB_DSYM_GLOB_arm-linux-androideabi=lib$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_arm-linux-androideabi=lib$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_arm-linux-androideabi := -D__arm__ -DANDROID -D__ANDROID__
|
||||||
CFG_GCCISH_CFLAGS_arm-linux-androideabi := -Wall -g -fPIC -D__arm__ -DANDROID -D__ANDROID__
|
CFG_GCCISH_CFLAGS_arm-linux-androideabi := -Wall -g -fPIC -D__arm__ -DANDROID -D__ANDROID__
|
||||||
CFG_GCCISH_CXXFLAGS_arm-linux-androideabi := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_arm-linux-androideabi := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_arm-linux-androideabi := -shared -fPIC -ldl -g -lm -lsupc++
|
CFG_GCCISH_LINK_FLAGS_arm-linux-androideabi := -shared -fPIC -ldl -g -lm -lsupc++
|
||||||
|
@ -313,6 +319,7 @@ CFG_LIB_NAME_arm-unknown-linux-gnueabihf=lib$(1).so
|
||||||
CFG_STATIC_LIB_NAME_arm-unknown-linux-gnueabihf=lib$(1).a
|
CFG_STATIC_LIB_NAME_arm-unknown-linux-gnueabihf=lib$(1).a
|
||||||
CFG_LIB_GLOB_arm-unknown-linux-gnueabihf=lib$(1)-*.so
|
CFG_LIB_GLOB_arm-unknown-linux-gnueabihf=lib$(1)-*.so
|
||||||
CFG_LIB_DSYM_GLOB_arm-unknown-linux-gnueabihf=lib$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_arm-unknown-linux-gnueabihf=lib$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_arm-unknown-linux-gnueabihf := -D__arm__
|
||||||
CFG_GCCISH_CFLAGS_arm-unknown-linux-gnueabihf := -Wall -g -fPIC -D__arm__
|
CFG_GCCISH_CFLAGS_arm-unknown-linux-gnueabihf := -Wall -g -fPIC -D__arm__
|
||||||
CFG_GCCISH_CXXFLAGS_arm-unknown-linux-gnueabihf := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_arm-unknown-linux-gnueabihf := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_arm-unknown-linux-gnueabihf := -shared -fPIC -g
|
CFG_GCCISH_LINK_FLAGS_arm-unknown-linux-gnueabihf := -shared -fPIC -g
|
||||||
|
@ -343,6 +350,7 @@ CFG_LIB_NAME_arm-unknown-linux-gnueabi=lib$(1).so
|
||||||
CFG_STATIC_LIB_NAME_arm-unknown-linux-gnueabi=lib$(1).a
|
CFG_STATIC_LIB_NAME_arm-unknown-linux-gnueabi=lib$(1).a
|
||||||
CFG_LIB_GLOB_arm-unknown-linux-gnueabi=lib$(1)-*.so
|
CFG_LIB_GLOB_arm-unknown-linux-gnueabi=lib$(1)-*.so
|
||||||
CFG_LIB_DSYM_GLOB_arm-unknown-linux-gnueabi=lib$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_arm-unknown-linux-gnueabi=lib$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_arm-unknown-linux-gnueabi := -D__arm__ -mfpu=vfp
|
||||||
CFG_GCCISH_CFLAGS_arm-unknown-linux-gnueabi := -Wall -g -fPIC -D__arm__ -mfpu=vfp
|
CFG_GCCISH_CFLAGS_arm-unknown-linux-gnueabi := -Wall -g -fPIC -D__arm__ -mfpu=vfp
|
||||||
CFG_GCCISH_CXXFLAGS_arm-unknown-linux-gnueabi := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_arm-unknown-linux-gnueabi := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_arm-unknown-linux-gnueabi := -shared -fPIC -g
|
CFG_GCCISH_LINK_FLAGS_arm-unknown-linux-gnueabi := -shared -fPIC -g
|
||||||
|
@ -372,6 +380,7 @@ CFG_LIB_NAME_mips-unknown-linux-gnu=lib$(1).so
|
||||||
CFG_STATIC_LIB_NAME_mips-unknown-linux-gnu=lib$(1).a
|
CFG_STATIC_LIB_NAME_mips-unknown-linux-gnu=lib$(1).a
|
||||||
CFG_LIB_GLOB_mips-unknown-linux-gnu=lib$(1)-*.so
|
CFG_LIB_GLOB_mips-unknown-linux-gnu=lib$(1)-*.so
|
||||||
CFG_LIB_DSYM_GLOB_mips-unknown-linux-gnu=lib$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_mips-unknown-linux-gnu=lib$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_mips-unknown-linux-gnu := -mips32r2 -msoft-float -mabi=32 -mno-compact-eh
|
||||||
CFG_GCCISH_CFLAGS_mips-unknown-linux-gnu := -Wall -g -fPIC -mips32r2 -msoft-float -mabi=32 -mno-compact-eh
|
CFG_GCCISH_CFLAGS_mips-unknown-linux-gnu := -Wall -g -fPIC -mips32r2 -msoft-float -mabi=32 -mno-compact-eh
|
||||||
CFG_GCCISH_CXXFLAGS_mips-unknown-linux-gnu := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_mips-unknown-linux-gnu := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_mips-unknown-linux-gnu := -shared -fPIC -g -mips32r2 -msoft-float -mabi=32
|
CFG_GCCISH_LINK_FLAGS_mips-unknown-linux-gnu := -shared -fPIC -g -mips32r2 -msoft-float -mabi=32
|
||||||
|
@ -400,6 +409,7 @@ CFG_LIB_NAME_i686-pc-mingw32=$(1).dll
|
||||||
CFG_STATIC_LIB_NAME_i686-pc-mingw32=$(1).lib
|
CFG_STATIC_LIB_NAME_i686-pc-mingw32=$(1).lib
|
||||||
CFG_LIB_GLOB_i686-pc-mingw32=$(1)-*.dll
|
CFG_LIB_GLOB_i686-pc-mingw32=$(1)-*.dll
|
||||||
CFG_LIB_DSYM_GLOB_i686-pc-mingw32=$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_i686-pc-mingw32=$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_mips-i686-pc-mingw32 := -m32 -march=i686 -D_WIN32_WINNT=0x0600
|
||||||
CFG_GCCISH_CFLAGS_i686-pc-mingw32 := -Wall -Werror -g -m32 -march=i686 -D_WIN32_WINNT=0x0600 -I$(CFG_SRC_DIR)src/etc/mingw-fix-include
|
CFG_GCCISH_CFLAGS_i686-pc-mingw32 := -Wall -Werror -g -m32 -march=i686 -D_WIN32_WINNT=0x0600 -I$(CFG_SRC_DIR)src/etc/mingw-fix-include
|
||||||
CFG_GCCISH_CXXFLAGS_i686-pc-mingw32 := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_i686-pc-mingw32 := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_i686-pc-mingw32 := -shared -fPIC -g -m32
|
CFG_GCCISH_LINK_FLAGS_i686-pc-mingw32 := -shared -fPIC -g -m32
|
||||||
|
@ -428,6 +438,7 @@ CFG_LIB_NAME_i586-mingw32msvc=$(1).dll
|
||||||
CFG_STATIC_LIB_NAME_i586-mingw32msvc=$(1).lib
|
CFG_STATIC_LIB_NAME_i586-mingw32msvc=$(1).lib
|
||||||
CFG_LIB_GLOB_i586-mingw32msvc=$(1)-*.dll
|
CFG_LIB_GLOB_i586-mingw32msvc=$(1)-*.dll
|
||||||
CFG_LIB_DSYM_GLOB_i586-mingw32msvc=$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_i586-mingw32msvc=$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_i586-mingw32msvc := -march=i586 -m32
|
||||||
CFG_GCCISH_CFLAGS_i586-mingw32msvc := -Wall -Werror -g -march=i586 -m32
|
CFG_GCCISH_CFLAGS_i586-mingw32msvc := -Wall -Werror -g -march=i586 -m32
|
||||||
CFG_GCCISH_CXXFLAGS_i586-mingw32msvc := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_i586-mingw32msvc := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_i586-mingw32msvc := -shared -g -m32
|
CFG_GCCISH_LINK_FLAGS_i586-mingw32msvc := -shared -g -m32
|
||||||
|
@ -458,6 +469,7 @@ CFG_LIB_NAME_i686-w64-mingw32=$(1).dll
|
||||||
CFG_STATIC_LIB_NAME_i686-w64-mingw32=$(1).lib
|
CFG_STATIC_LIB_NAME_i686-w64-mingw32=$(1).lib
|
||||||
CFG_LIB_GLOB_i686-w64-mingw32=$(1)-*.dll
|
CFG_LIB_GLOB_i686-w64-mingw32=$(1)-*.dll
|
||||||
CFG_LIB_DSYM_GLOB_i686-w64-mingw32=$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_i686-w64-mingw32=$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_i586-w64-mingw32 := -march=i586 -m32 -D_WIN32_WINNT=0x0600
|
||||||
CFG_GCCISH_CFLAGS_i686-w64-mingw32 := -Wall -Werror -g -m32 -D_WIN32_WINNT=0x0600
|
CFG_GCCISH_CFLAGS_i686-w64-mingw32 := -Wall -Werror -g -m32 -D_WIN32_WINNT=0x0600
|
||||||
CFG_GCCISH_CXXFLAGS_i686-w64-mingw32 := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_i686-w64-mingw32 := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_i686-w64-mingw32 := -shared -g -m32
|
CFG_GCCISH_LINK_FLAGS_i686-w64-mingw32 := -shared -g -m32
|
||||||
|
@ -487,6 +499,7 @@ CFG_LIB_NAME_x86_64-w64-mingw32=$(1).dll
|
||||||
CFG_STATIC_LIB_NAME_x86_64-w64-mingw32=$(1).lib
|
CFG_STATIC_LIB_NAME_x86_64-w64-mingw32=$(1).lib
|
||||||
CFG_LIB_GLOB_x86_64-w64-mingw32=$(1)-*.dll
|
CFG_LIB_GLOB_x86_64-w64-mingw32=$(1)-*.dll
|
||||||
CFG_LIB_DSYM_GLOB_x86_64-w64-mingw32=$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_x86_64-w64-mingw32=$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_x86_64-w64-mingw32 := -m64 -D_WIN32_WINNT=0x0600
|
||||||
CFG_GCCISH_CFLAGS_x86_64-w64-mingw32 := -Wall -Werror -g -m64 -D_WIN32_WINNT=0x0600
|
CFG_GCCISH_CFLAGS_x86_64-w64-mingw32 := -Wall -Werror -g -m64 -D_WIN32_WINNT=0x0600
|
||||||
CFG_GCCISH_CXXFLAGS_x86_64-w64-mingw32 := -fno-rtti
|
CFG_GCCISH_CXXFLAGS_x86_64-w64-mingw32 := -fno-rtti
|
||||||
CFG_GCCISH_LINK_FLAGS_x86_64-w64-mingw32 := -shared -g -m64
|
CFG_GCCISH_LINK_FLAGS_x86_64-w64-mingw32 := -shared -g -m64
|
||||||
|
@ -515,6 +528,7 @@ CFG_LIB_NAME_x86_64-unknown-freebsd=lib$(1).so
|
||||||
CFG_STATIC_LIB_NAME_x86_64-unknown-freebsd=lib$(1).a
|
CFG_STATIC_LIB_NAME_x86_64-unknown-freebsd=lib$(1).a
|
||||||
CFG_LIB_GLOB_x86_64-unknown-freebsd=lib$(1)-*.so
|
CFG_LIB_GLOB_x86_64-unknown-freebsd=lib$(1)-*.so
|
||||||
CFG_LIB_DSYM_GLOB_x86_64-unknown-freebsd=$(1)-*.dylib.dSYM
|
CFG_LIB_DSYM_GLOB_x86_64-unknown-freebsd=$(1)-*.dylib.dSYM
|
||||||
|
CFG_CFLAGS_x86_64-unknown-freebsd := -I/usr/local/include
|
||||||
CFG_GCCISH_CFLAGS_x86_64-unknown-freebsd := -Wall -Werror -g -fPIC -I/usr/local/include
|
CFG_GCCISH_CFLAGS_x86_64-unknown-freebsd := -Wall -Werror -g -fPIC -I/usr/local/include
|
||||||
CFG_GCCISH_LINK_FLAGS_x86_64-unknown-freebsd := -shared -fPIC -g -pthread -lrt
|
CFG_GCCISH_LINK_FLAGS_x86_64-unknown-freebsd := -shared -fPIC -g -pthread -lrt
|
||||||
CFG_GCCISH_DEF_FLAG_x86_64-unknown-freebsd := -Wl,--export-dynamic,--dynamic-list=
|
CFG_GCCISH_DEF_FLAG_x86_64-unknown-freebsd := -Wl,--export-dynamic,--dynamic-list=
|
||||||
|
|
52
mk/rt.mk
52
mk/rt.mk
|
@ -122,10 +122,13 @@ $(foreach lib,$(NATIVE_LIBS), \
|
||||||
################################################################################
|
################################################################################
|
||||||
# Building third-party targets with external build systems
|
# Building third-party targets with external build systems
|
||||||
#
|
#
|
||||||
# The only current member of this section is libuv, but long ago this used to
|
# This location is meant for dependencies which have external build systems. It
|
||||||
# also be occupied by jemalloc. This location is meant for dependencies which
|
# is still assumed that the output of each of these steps is a static library
|
||||||
# have external build systems. It is still assumed that the output of each of
|
# in the correct location.
|
||||||
# these steps is a static library in the correct location.
|
################################################################################
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# libuv
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
define DEF_LIBUV_ARCH_VAR
|
define DEF_LIBUV_ARCH_VAR
|
||||||
|
@ -154,6 +157,11 @@ define DEF_THIRD_PARTY_TARGETS
|
||||||
|
|
||||||
ifeq ($$(CFG_WINDOWSY_$(1)), 1)
|
ifeq ($$(CFG_WINDOWSY_$(1)), 1)
|
||||||
LIBUV_OSTYPE_$(1) := win
|
LIBUV_OSTYPE_$(1) := win
|
||||||
|
# This isn't necessarily a desired option, but it's harmless and works around
|
||||||
|
# what appears to be a mingw-w64 bug.
|
||||||
|
#
|
||||||
|
# https://sourceforge.net/p/mingw-w64/bugs/395/
|
||||||
|
JEMALLOC_ARGS_$(1) := --enable-lazy-lock
|
||||||
else ifeq ($(OSTYPE_$(1)), apple-darwin)
|
else ifeq ($(OSTYPE_$(1)), apple-darwin)
|
||||||
LIBUV_OSTYPE_$(1) := mac
|
LIBUV_OSTYPE_$(1) := mac
|
||||||
else ifeq ($(OSTYPE_$(1)), unknown-freebsd)
|
else ifeq ($(OSTYPE_$(1)), unknown-freebsd)
|
||||||
|
@ -161,6 +169,7 @@ else ifeq ($(OSTYPE_$(1)), unknown-freebsd)
|
||||||
else ifeq ($(OSTYPE_$(1)), linux-androideabi)
|
else ifeq ($(OSTYPE_$(1)), linux-androideabi)
|
||||||
LIBUV_OSTYPE_$(1) := android
|
LIBUV_OSTYPE_$(1) := android
|
||||||
LIBUV_ARGS_$(1) := PLATFORM=android host=android OS=linux
|
LIBUV_ARGS_$(1) := PLATFORM=android host=android OS=linux
|
||||||
|
JEMALLOC_ARGS_$(1) := --disable-tls
|
||||||
else
|
else
|
||||||
LIBUV_OSTYPE_$(1) := linux
|
LIBUV_OSTYPE_$(1) := linux
|
||||||
endif
|
endif
|
||||||
|
@ -220,6 +229,41 @@ $$(LIBUV_DIR_$(1))/Release/libuv.a: $$(LIBUV_DEPS) $$(LIBUV_MAKEFILE_$(1)) \
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# jemalloc
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
ifdef CFG_ENABLE_FAST_MAKE
|
||||||
|
JEMALLOC_DEPS := $(S)/.gitmodules
|
||||||
|
else
|
||||||
|
JEMALLOC_DEPS := $(wildcard \
|
||||||
|
$(S)src/jemalloc/* \
|
||||||
|
$(S)src/jemalloc/*/* \
|
||||||
|
$(S)src/jemalloc/*/*/* \
|
||||||
|
$(S)src/jemalloc/*/*/*/*)
|
||||||
|
endif
|
||||||
|
|
||||||
|
JEMALLOC_NAME_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),jemalloc)
|
||||||
|
ifeq ($$(CFG_WINDOWSY_$(1)),1)
|
||||||
|
JEMALLOC_REAL_NAME_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),jemalloc_s)
|
||||||
|
else
|
||||||
|
JEMALLOC_REAL_NAME_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),jemalloc_pic)
|
||||||
|
endif
|
||||||
|
JEMALLOC_LIB_$(1) := $$(RT_OUTPUT_DIR_$(1))/$$(JEMALLOC_NAME_$(1))
|
||||||
|
JEMALLOC_BUILD_DIR_$(1) := $$(RT_OUTPUT_DIR_$(1))/jemalloc
|
||||||
|
|
||||||
|
$$(JEMALLOC_LIB_$(1)): $$(JEMALLOC_DEPS) $$(MKFILE_DEPS)
|
||||||
|
@$$(call E, make: jemalloc)
|
||||||
|
cd "$$(JEMALLOC_BUILD_DIR_$(1))"; "$(S)src/jemalloc/configure" \
|
||||||
|
$$(JEMALLOC_ARGS_$(1)) --enable-cc-silence --with-jemalloc-prefix=je_ \
|
||||||
|
--disable-experimental --build=$(CFG_BUILD) --host=$(1) \
|
||||||
|
CC="$$(CC_$(1))" \
|
||||||
|
AR="$$(AR_$(1))" \
|
||||||
|
RANLIB="$$(AR_$(1)) s" \
|
||||||
|
EXTRA_CFLAGS="$$(CFG_CFLAGS_$(1))"
|
||||||
|
$$(Q)$$(MAKE) -C "$$(JEMALLOC_BUILD_DIR_$(1))" build_lib_static
|
||||||
|
$$(Q)cp $$(JEMALLOC_BUILD_DIR_$(1))/lib/$$(JEMALLOC_REAL_NAME_$(1)) $$(JEMALLOC_LIB_$(1))
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
# compiler-rt
|
# compiler-rt
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
|
@ -240,6 +240,7 @@ ALL_HS := $(filter-out $(S)src/rt/vg/valgrind.h \
|
||||||
tidy:
|
tidy:
|
||||||
@$(call E, check: formatting)
|
@$(call E, check: formatting)
|
||||||
$(Q)find $(S)src -name '*.r[sc]' \
|
$(Q)find $(S)src -name '*.r[sc]' \
|
||||||
|
| grep '^$(S)src/jemalloc' -v \
|
||||||
| grep '^$(S)src/libuv' -v \
|
| grep '^$(S)src/libuv' -v \
|
||||||
| grep '^$(S)src/llvm' -v \
|
| grep '^$(S)src/llvm' -v \
|
||||||
| grep '^$(S)src/gyp' -v \
|
| grep '^$(S)src/gyp' -v \
|
||||||
|
@ -264,8 +265,9 @@ tidy:
|
||||||
$(Q)find $(S)src -type f -perm +111 \
|
$(Q)find $(S)src -type f -perm +111 \
|
||||||
-not -name '*.rs' -and -not -name '*.py' \
|
-not -name '*.rs' -and -not -name '*.py' \
|
||||||
-and -not -name '*.sh' \
|
-and -not -name '*.sh' \
|
||||||
| grep '^$(S)src/llvm' -v \
|
| grep '^$(S)src/jemalloc' -v \
|
||||||
| grep '^$(S)src/libuv' -v \
|
| grep '^$(S)src/libuv' -v \
|
||||||
|
| grep '^$(S)src/llvm' -v \
|
||||||
| grep '^$(S)src/rt/hoedown' -v \
|
| grep '^$(S)src/rt/hoedown' -v \
|
||||||
| grep '^$(S)src/gyp' -v \
|
| grep '^$(S)src/gyp' -v \
|
||||||
| grep '^$(S)src/etc' -v \
|
| grep '^$(S)src/etc' -v \
|
||||||
|
|
1
src/jemalloc
Submodule
1
src/jemalloc
Submodule
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 6a96910f2eaea6d2c705bb12379b23576b30d7d5
|
|
@ -33,10 +33,11 @@ use std::cmp;
|
||||||
use std::intrinsics::{TyDesc, get_tydesc};
|
use std::intrinsics::{TyDesc, get_tydesc};
|
||||||
use std::intrinsics;
|
use std::intrinsics;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
use std::mem::min_align_of;
|
||||||
use std::num;
|
use std::num;
|
||||||
use std::ptr::read;
|
use std::ptr::read;
|
||||||
use std::rc::Rc;
|
use std::rc::Rc;
|
||||||
use std::rt::global_heap;
|
use std::rt::heap::exchange_malloc;
|
||||||
|
|
||||||
// The way arena uses arrays is really deeply awful. The arrays are
|
// The way arena uses arrays is really deeply awful. The arrays are
|
||||||
// allocated, and have capacities reserved, but the fill for the array
|
// allocated, and have capacities reserved, but the fill for the array
|
||||||
|
@ -204,7 +205,7 @@ impl Arena {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn alloc_copy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
|
fn alloc_copy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
|
||||||
unsafe {
|
unsafe {
|
||||||
let ptr = self.alloc_copy_inner(mem::size_of::<T>(), mem::min_align_of::<T>());
|
let ptr = self.alloc_copy_inner(mem::size_of::<T>(), min_align_of::<T>());
|
||||||
let ptr: *mut T = transmute(ptr);
|
let ptr: *mut T = transmute(ptr);
|
||||||
mem::move_val_init(&mut (*ptr), op());
|
mem::move_val_init(&mut (*ptr), op());
|
||||||
return transmute(ptr);
|
return transmute(ptr);
|
||||||
|
@ -261,7 +262,7 @@ impl Arena {
|
||||||
unsafe {
|
unsafe {
|
||||||
let tydesc = get_tydesc::<T>();
|
let tydesc = get_tydesc::<T>();
|
||||||
let (ty_ptr, ptr) =
|
let (ty_ptr, ptr) =
|
||||||
self.alloc_noncopy_inner(mem::size_of::<T>(), mem::min_align_of::<T>());
|
self.alloc_noncopy_inner(mem::size_of::<T>(), min_align_of::<T>());
|
||||||
let ty_ptr: *mut uint = transmute(ty_ptr);
|
let ty_ptr: *mut uint = transmute(ty_ptr);
|
||||||
let ptr: *mut T = transmute(ptr);
|
let ptr: *mut T = transmute(ptr);
|
||||||
// Write in our tydesc along with a bit indicating that it
|
// Write in our tydesc along with a bit indicating that it
|
||||||
|
@ -353,7 +354,29 @@ struct TypedArenaChunk<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> TypedArenaChunk<T> {
|
impl<T> TypedArenaChunk<T> {
|
||||||
|
#[cfg(stage0)]
|
||||||
#[inline]
|
#[inline]
|
||||||
|
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
|
||||||
|
-> Box<TypedArenaChunk<T>> {
|
||||||
|
let mut size = mem::size_of::<TypedArenaChunk<T>>();
|
||||||
|
size = round_up(size, min_align_of::<T>());
|
||||||
|
let elem_size = mem::size_of::<T>();
|
||||||
|
let elems_size = elem_size.checked_mul(&capacity).unwrap();
|
||||||
|
size = size.checked_add(&elems_size).unwrap();
|
||||||
|
|
||||||
|
let mut chunk = unsafe {
|
||||||
|
let chunk = exchange_malloc(size);
|
||||||
|
let mut chunk: Box<TypedArenaChunk<T>> = cast::transmute(chunk);
|
||||||
|
mem::move_val_init(&mut chunk.next, next);
|
||||||
|
chunk
|
||||||
|
};
|
||||||
|
|
||||||
|
chunk.capacity = capacity;
|
||||||
|
chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
#[cfg(not(stage0))]
|
||||||
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
|
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
|
||||||
-> Box<TypedArenaChunk<T>> {
|
-> Box<TypedArenaChunk<T>> {
|
||||||
let mut size = mem::size_of::<TypedArenaChunk<T>>();
|
let mut size = mem::size_of::<TypedArenaChunk<T>>();
|
||||||
|
@ -363,7 +386,7 @@ impl<T> TypedArenaChunk<T> {
|
||||||
size = size.checked_add(&elems_size).unwrap();
|
size = size.checked_add(&elems_size).unwrap();
|
||||||
|
|
||||||
let mut chunk = unsafe {
|
let mut chunk = unsafe {
|
||||||
let chunk = global_heap::exchange_malloc(size);
|
let chunk = exchange_malloc(size, min_align_of::<TypedArenaChunk<T>>());
|
||||||
let mut chunk: Box<TypedArenaChunk<T>> = cast::transmute(chunk);
|
let mut chunk: Box<TypedArenaChunk<T>> = cast::transmute(chunk);
|
||||||
mem::move_val_init(&mut chunk.next, next);
|
mem::move_val_init(&mut chunk.next, next);
|
||||||
chunk
|
chunk
|
||||||
|
@ -402,7 +425,7 @@ impl<T> TypedArenaChunk<T> {
|
||||||
fn start(&self) -> *u8 {
|
fn start(&self) -> *u8 {
|
||||||
let this: *TypedArenaChunk<T> = self;
|
let this: *TypedArenaChunk<T> = self;
|
||||||
unsafe {
|
unsafe {
|
||||||
cast::transmute(round_up(this.offset(1) as uint, mem::min_align_of::<T>()))
|
cast::transmute(round_up(this.offset(1) as uint, min_align_of::<T>()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ mod table {
|
||||||
use std::prelude::Drop;
|
use std::prelude::Drop;
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
use std::ptr::RawPtr;
|
use std::ptr::RawPtr;
|
||||||
use std::rt::global_heap;
|
use std::rt::libc_heap;
|
||||||
use std::intrinsics::{size_of, min_align_of, transmute};
|
use std::intrinsics::{size_of, min_align_of, transmute};
|
||||||
use std::intrinsics::{move_val_init, set_memory};
|
use std::intrinsics::{move_val_init, set_memory};
|
||||||
use std::iter::{Iterator, range_step_inclusive};
|
use std::iter::{Iterator, range_step_inclusive};
|
||||||
|
@ -243,7 +243,7 @@ mod table {
|
||||||
keys_size, min_align_of::< K >(),
|
keys_size, min_align_of::< K >(),
|
||||||
vals_size, min_align_of::< V >());
|
vals_size, min_align_of::< V >());
|
||||||
|
|
||||||
let buffer = global_heap::malloc_raw(size) as *mut u8;
|
let buffer = libc_heap::malloc_raw(size) as *mut u8;
|
||||||
|
|
||||||
// FIXME #13094: If malloc was not at as aligned as we expected,
|
// FIXME #13094: If malloc was not at as aligned as we expected,
|
||||||
// our offset calculations are just plain wrong. We could support
|
// our offset calculations are just plain wrong. We could support
|
||||||
|
|
|
@ -29,13 +29,30 @@ use str::StrSlice;
|
||||||
|
|
||||||
#[allow(ctypes)]
|
#[allow(ctypes)]
|
||||||
extern {
|
extern {
|
||||||
fn malloc(size: uint) -> *u8;
|
#[cfg(stage0)]
|
||||||
fn free(ptr: *u8);
|
fn rust_malloc(size: uint) -> *u8;
|
||||||
|
#[cfg(not(stage0))]
|
||||||
|
fn rust_malloc(size: uint, align: uint) -> *u8;
|
||||||
|
fn rust_free(ptr: *u8, size: uint, align: uint);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(stage0)]
|
||||||
unsafe fn alloc(cap: uint) -> *mut Vec<()> {
|
unsafe fn alloc(cap: uint) -> *mut Vec<()> {
|
||||||
let cap = cap.checked_add(&mem::size_of::<Vec<()>>()).unwrap();
|
let cap = cap.checked_add(&mem::size_of::<Vec<()>>()).unwrap();
|
||||||
let ret = malloc(cap) as *mut Vec<()>;
|
let ret = rust_malloc(cap) as *mut Vec<()>;
|
||||||
|
if ret.is_null() {
|
||||||
|
intrinsics::abort();
|
||||||
|
}
|
||||||
|
(*ret).fill = 0;
|
||||||
|
(*ret).alloc = cap;
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(stage0))]
|
||||||
|
unsafe fn alloc(cap: uint) -> *mut Vec<()> {
|
||||||
|
let cap = cap.checked_add(&mem::size_of::<Vec<()>>()).unwrap();
|
||||||
|
// this should use the real alignment, but the new representation will take care of that
|
||||||
|
let ret = rust_malloc(cap, 8) as *mut Vec<()>;
|
||||||
if ret.is_null() {
|
if ret.is_null() {
|
||||||
intrinsics::abort();
|
intrinsics::abort();
|
||||||
}
|
}
|
||||||
|
@ -102,7 +119,8 @@ impl FromIterator<char> for ~str {
|
||||||
ptr::copy_nonoverlapping_memory(&mut (*ptr2).data,
|
ptr::copy_nonoverlapping_memory(&mut (*ptr2).data,
|
||||||
&(*ptr).data,
|
&(*ptr).data,
|
||||||
len);
|
len);
|
||||||
free(ptr as *u8);
|
// FIXME: #13994: port to the sized deallocation API when available
|
||||||
|
rust_free(ptr as *u8, 0, 8);
|
||||||
cast::forget(ret);
|
cast::forget(ret);
|
||||||
ret = cast::transmute(ptr2);
|
ret = cast::transmute(ptr2);
|
||||||
ptr = ptr2;
|
ptr = ptr2;
|
||||||
|
@ -172,7 +190,7 @@ impl<A: Clone> Clone for ~[A] {
|
||||||
for j in range(0, *i as int) {
|
for j in range(0, *i as int) {
|
||||||
ptr::read(&*p.offset(j));
|
ptr::read(&*p.offset(j));
|
||||||
}
|
}
|
||||||
free(ret as *u8);
|
rust_free(ret as *u8, 0, 8);
|
||||||
});
|
});
|
||||||
cast::transmute(ret)
|
cast::transmute(ret)
|
||||||
}
|
}
|
||||||
|
|
|
@ -339,7 +339,7 @@ pub fn mkdir(p: &CString, _mode: io::FilePermission) -> IoResult<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn readdir(p: &CString) -> IoResult<Vec<Path>> {
|
pub fn readdir(p: &CString) -> IoResult<Vec<Path>> {
|
||||||
use std::rt::global_heap::malloc_raw;
|
use std::rt::libc_heap::malloc_raw;
|
||||||
|
|
||||||
fn prune(root: &CString, dirs: Vec<Path>) -> Vec<Path> {
|
fn prune(root: &CString, dirs: Vec<Path>) -> Vec<Path> {
|
||||||
let root = unsafe { CString::new(root.with_ref(|p| p), false) };
|
let root = unsafe { CString::new(root.with_ref(|p| p), false) };
|
||||||
|
|
|
@ -345,7 +345,8 @@ fn require_alloc_fn(bcx: &Block, info_ty: ty::t, it: LangItem) -> ast::DefId {
|
||||||
|
|
||||||
pub fn malloc_raw_dyn<'a>(bcx: &'a Block<'a>,
|
pub fn malloc_raw_dyn<'a>(bcx: &'a Block<'a>,
|
||||||
ptr_ty: ty::t,
|
ptr_ty: ty::t,
|
||||||
size: ValueRef)
|
size: ValueRef,
|
||||||
|
align: ValueRef)
|
||||||
-> Result<'a> {
|
-> Result<'a> {
|
||||||
let _icx = push_ctxt("malloc_raw_exchange");
|
let _icx = push_ctxt("malloc_raw_exchange");
|
||||||
let ccx = bcx.ccx();
|
let ccx = bcx.ccx();
|
||||||
|
@ -353,7 +354,7 @@ pub fn malloc_raw_dyn<'a>(bcx: &'a Block<'a>,
|
||||||
// Allocate space:
|
// Allocate space:
|
||||||
let r = callee::trans_lang_call(bcx,
|
let r = callee::trans_lang_call(bcx,
|
||||||
require_alloc_fn(bcx, ptr_ty, ExchangeMallocFnLangItem),
|
require_alloc_fn(bcx, ptr_ty, ExchangeMallocFnLangItem),
|
||||||
[size],
|
[size, align],
|
||||||
None);
|
None);
|
||||||
|
|
||||||
let llty_ptr = type_of::type_of(ccx, ptr_ty);
|
let llty_ptr = type_of::type_of(ccx, ptr_ty);
|
||||||
|
|
|
@ -67,7 +67,7 @@ use middle::typeck::MethodCall;
|
||||||
use util::common::indenter;
|
use util::common::indenter;
|
||||||
use util::ppaux::Repr;
|
use util::ppaux::Repr;
|
||||||
use util::nodemap::NodeMap;
|
use util::nodemap::NodeMap;
|
||||||
use middle::trans::machine::{llsize_of, llsize_of_alloc};
|
use middle::trans::machine::{llalign_of_min, llsize_of, llsize_of_alloc};
|
||||||
use middle::trans::type_::Type;
|
use middle::trans::type_::Type;
|
||||||
|
|
||||||
use syntax::ast;
|
use syntax::ast;
|
||||||
|
@ -1170,10 +1170,11 @@ fn trans_uniq_expr<'a>(bcx: &'a Block<'a>,
|
||||||
let fcx = bcx.fcx;
|
let fcx = bcx.fcx;
|
||||||
let llty = type_of::type_of(bcx.ccx(), contents_ty);
|
let llty = type_of::type_of(bcx.ccx(), contents_ty);
|
||||||
let size = llsize_of(bcx.ccx(), llty);
|
let size = llsize_of(bcx.ccx(), llty);
|
||||||
|
let align = C_uint(bcx.ccx(), llalign_of_min(bcx.ccx(), llty) as uint);
|
||||||
// We need to a make a pointer type because box_ty is ty_bot
|
// We need to a make a pointer type because box_ty is ty_bot
|
||||||
// if content_ty is, e.g. box fail!().
|
// if content_ty is, e.g. box fail!().
|
||||||
let real_box_ty = ty::mk_uniq(bcx.tcx(), contents_ty);
|
let real_box_ty = ty::mk_uniq(bcx.tcx(), contents_ty);
|
||||||
let Result { bcx, val } = malloc_raw_dyn(bcx, real_box_ty, size);
|
let Result { bcx, val } = malloc_raw_dyn(bcx, real_box_ty, size, align);
|
||||||
// Unique boxes do not allocate for zero-size types. The standard library
|
// Unique boxes do not allocate for zero-size types. The standard library
|
||||||
// may assume that `free` is never called on the pointer returned for
|
// may assume that `free` is never called on the pointer returned for
|
||||||
// `Box<ZeroSizeType>`.
|
// `Box<ZeroSizeType>`.
|
||||||
|
|
|
@ -278,7 +278,9 @@ pub fn trans_uniq_vstore<'a>(bcx: &'a Block<'a>,
|
||||||
|
|
||||||
let vecsize = Add(bcx, alloc, llsize_of(ccx, ccx.opaque_vec_type));
|
let vecsize = Add(bcx, alloc, llsize_of(ccx, ccx.opaque_vec_type));
|
||||||
|
|
||||||
let Result { bcx: bcx, val: val } = malloc_raw_dyn(bcx, vec_ty, vecsize);
|
// ~[T] is not going to be changed to support alignment, since it's obsolete.
|
||||||
|
let align = C_uint(ccx, 8);
|
||||||
|
let Result { bcx: bcx, val: val } = malloc_raw_dyn(bcx, vec_ty, vecsize, align);
|
||||||
Store(bcx, fill, GEPi(bcx, val, [0u, abi::vec_elt_fill]));
|
Store(bcx, fill, GEPi(bcx, val, [0u, abi::vec_elt_fill]));
|
||||||
Store(bcx, alloc, GEPi(bcx, val, [0u, abi::vec_elt_alloc]));
|
Store(bcx, alloc, GEPi(bcx, val, [0u, abi::vec_elt_alloc]));
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
use libc::{size_t, c_int, c_uint, c_void, c_char, c_double};
|
use libc::{size_t, c_int, c_uint, c_void, c_char, c_double};
|
||||||
use libc::{ssize_t, sockaddr, free, addrinfo};
|
use libc::{ssize_t, sockaddr, free, addrinfo};
|
||||||
use libc;
|
use libc;
|
||||||
use std::rt::global_heap::malloc_raw;
|
use std::rt::libc_heap::malloc_raw;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
use libc::uintptr_t;
|
use libc::uintptr_t;
|
||||||
|
|
|
@ -81,7 +81,7 @@ use str::StrSlice;
|
||||||
use str;
|
use str;
|
||||||
use slice::{ImmutableVector, MutableVector};
|
use slice::{ImmutableVector, MutableVector};
|
||||||
use slice;
|
use slice;
|
||||||
use rt::global_heap::malloc_raw;
|
use rt::libc_heap::malloc_raw;
|
||||||
use raw::Slice;
|
use raw::Slice;
|
||||||
|
|
||||||
/// The representation of a C String.
|
/// The representation of a C String.
|
||||||
|
|
|
@ -160,7 +160,7 @@ mod tests {
|
||||||
use super::CVec;
|
use super::CVec;
|
||||||
use libc;
|
use libc;
|
||||||
use ptr;
|
use ptr;
|
||||||
use rt::global_heap::malloc_raw;
|
use rt::libc_heap::malloc_raw;
|
||||||
|
|
||||||
fn malloc(n: uint) -> CVec<u8> {
|
fn malloc(n: uint) -> CVec<u8> {
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|
|
@ -110,6 +110,7 @@
|
||||||
// Don't link to std. We are std.
|
// Don't link to std. We are std.
|
||||||
#![no_std]
|
#![no_std]
|
||||||
|
|
||||||
|
#![allow(deprecated)]
|
||||||
#![deny(missing_doc)]
|
#![deny(missing_doc)]
|
||||||
|
|
||||||
// When testing libstd, bring in libuv as the I/O backend so tests can print
|
// When testing libstd, bring in libuv as the I/O backend so tests can print
|
||||||
|
|
|
@ -32,7 +32,8 @@ use ops::{Deref, Drop};
|
||||||
use option::{Option, Some, None};
|
use option::{Option, Some, None};
|
||||||
use ptr;
|
use ptr;
|
||||||
use ptr::RawPtr;
|
use ptr::RawPtr;
|
||||||
use rt::global_heap::exchange_free;
|
use mem::{min_align_of, size_of};
|
||||||
|
use rt::heap::exchange_free;
|
||||||
|
|
||||||
struct RcBox<T> {
|
struct RcBox<T> {
|
||||||
value: T,
|
value: T,
|
||||||
|
@ -104,7 +105,8 @@ impl<T> Drop for Rc<T> {
|
||||||
self.dec_weak();
|
self.dec_weak();
|
||||||
|
|
||||||
if self.weak() == 0 {
|
if self.weak() == 0 {
|
||||||
exchange_free(self.ptr as *u8)
|
exchange_free(self.ptr as *mut u8, size_of::<RcBox<T>>(),
|
||||||
|
min_align_of::<RcBox<T>>())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -177,7 +179,8 @@ impl<T> Drop for Weak<T> {
|
||||||
// the weak count starts at 1, and will only go to
|
// the weak count starts at 1, and will only go to
|
||||||
// zero if all the strong pointers have disappeared.
|
// zero if all the strong pointers have disappeared.
|
||||||
if self.weak() == 0 {
|
if self.weak() == 0 {
|
||||||
exchange_free(self.ptr as *u8)
|
exchange_free(self.ptr as *mut u8, size_of::<RcBox<T>>(),
|
||||||
|
min_align_of::<RcBox<T>>())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,139 +0,0 @@
|
||||||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
|
||||||
// file at the top-level directory of this distribution and at
|
|
||||||
// http://rust-lang.org/COPYRIGHT.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
||||||
// option. This file may not be copied, modified, or distributed
|
|
||||||
// except according to those terms.
|
|
||||||
|
|
||||||
|
|
||||||
//! The global (exchange) heap.
|
|
||||||
|
|
||||||
use libc::{c_void, size_t, free, malloc, realloc};
|
|
||||||
use ptr::{RawPtr, mut_null};
|
|
||||||
use intrinsics::abort;
|
|
||||||
use raw;
|
|
||||||
use mem::size_of;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn get_box_size(body_size: uint, body_align: uint) -> uint {
|
|
||||||
let header_size = size_of::<raw::Box<()>>();
|
|
||||||
let total_size = align_to(header_size, body_align) + body_size;
|
|
||||||
total_size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rounds |size| to the nearest |alignment|. Invariant: |alignment| is a power
|
|
||||||
// of two.
|
|
||||||
#[inline]
|
|
||||||
fn align_to(size: uint, align: uint) -> uint {
|
|
||||||
assert!(align != 0);
|
|
||||||
(size + align - 1) & !(align - 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A wrapper around libc::malloc, aborting on out-of-memory
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn malloc_raw(size: uint) -> *mut u8 {
|
|
||||||
// `malloc(0)` may allocate, but it may also return a null pointer
|
|
||||||
// http://pubs.opengroup.org/onlinepubs/9699919799/functions/malloc.html
|
|
||||||
if size == 0 {
|
|
||||||
mut_null()
|
|
||||||
} else {
|
|
||||||
let p = malloc(size as size_t);
|
|
||||||
if p.is_null() {
|
|
||||||
// we need a non-allocating way to print an error here
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
p as *mut u8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A wrapper around libc::realloc, aborting on out-of-memory
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn realloc_raw(ptr: *mut u8, size: uint) -> *mut u8 {
|
|
||||||
// `realloc(ptr, 0)` may allocate, but it may also return a null pointer
|
|
||||||
// http://pubs.opengroup.org/onlinepubs/9699919799/functions/realloc.html
|
|
||||||
if size == 0 {
|
|
||||||
free(ptr as *mut c_void);
|
|
||||||
mut_null()
|
|
||||||
} else {
|
|
||||||
let p = realloc(ptr as *mut c_void, size as size_t);
|
|
||||||
if p.is_null() {
|
|
||||||
// we need a non-allocating way to print an error here
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
p as *mut u8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The allocator for unique pointers without contained managed pointers.
|
|
||||||
#[cfg(not(test))]
|
|
||||||
#[lang="exchange_malloc"]
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn exchange_malloc(size: uint) -> *mut u8 {
|
|
||||||
// The compiler never calls `exchange_free` on Box<ZeroSizeType>, so
|
|
||||||
// zero-size allocations can point to this `static`. It would be incorrect
|
|
||||||
// to use a null pointer, due to enums assuming types like unique pointers
|
|
||||||
// are never null.
|
|
||||||
static EMPTY: () = ();
|
|
||||||
|
|
||||||
if size == 0 {
|
|
||||||
&EMPTY as *() as *mut u8
|
|
||||||
} else {
|
|
||||||
malloc_raw(size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME: #7496
|
|
||||||
#[cfg(not(test))]
|
|
||||||
#[lang="closure_exchange_malloc"]
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn closure_exchange_malloc_(drop_glue: fn(*mut u8), size: uint, align: uint) -> *u8 {
|
|
||||||
closure_exchange_malloc(drop_glue, size, align)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint, align: uint) -> *u8 {
|
|
||||||
let total_size = get_box_size(size, align);
|
|
||||||
let p = malloc_raw(total_size);
|
|
||||||
|
|
||||||
let alloc = p as *mut raw::Box<()>;
|
|
||||||
(*alloc).drop_glue = drop_glue;
|
|
||||||
|
|
||||||
alloc as *u8
|
|
||||||
}
|
|
||||||
|
|
||||||
// NB: Calls to free CANNOT be allowed to fail, as throwing an exception from
|
|
||||||
// inside a landing pad may corrupt the state of the exception handler.
|
|
||||||
#[cfg(not(test))]
|
|
||||||
#[lang="exchange_free"]
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn exchange_free_(ptr: *u8) {
|
|
||||||
exchange_free(ptr)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn exchange_free(ptr: *u8) {
|
|
||||||
free(ptr as *mut c_void);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod bench {
|
|
||||||
extern crate test;
|
|
||||||
use self::test::Bencher;
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn alloc_owned_small(b: &mut Bencher) {
|
|
||||||
b.iter(|| {
|
|
||||||
box 10
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn alloc_owned_big(b: &mut Bencher) {
|
|
||||||
b.iter(|| {
|
|
||||||
box [10, ..1000]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
222
src/libstd/rt/heap.rs
Normal file
222
src/libstd/rt/heap.rs
Normal file
|
@ -0,0 +1,222 @@
|
||||||
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
||||||
|
// file at the top-level directory of this distribution and at
|
||||||
|
// http://rust-lang.org/COPYRIGHT.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||||
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||||
|
// option. This file may not be copied, modified, or distributed
|
||||||
|
// except according to those terms.
|
||||||
|
|
||||||
|
// FIXME: #13994: port to the sized deallocation API when available
|
||||||
|
// FIXME: #13996: need a way to mark the `allocate` and `reallocate` return values as `noalias`
|
||||||
|
|
||||||
|
use intrinsics::{abort, cttz32};
|
||||||
|
use libc::{c_int, c_void, size_t};
|
||||||
|
use ptr::RawPtr;
|
||||||
|
|
||||||
|
#[link(name = "jemalloc", kind = "static")]
|
||||||
|
extern {
|
||||||
|
fn je_mallocx(size: size_t, flags: c_int) -> *mut c_void;
|
||||||
|
fn je_rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
|
||||||
|
fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t;
|
||||||
|
fn je_dallocx(ptr: *mut c_void, flags: c_int);
|
||||||
|
fn je_nallocx(size: size_t, flags: c_int) -> size_t;
|
||||||
|
}
|
||||||
|
|
||||||
|
// -lpthread needs to occur after -ljemalloc, the earlier argument isn't enough
|
||||||
|
#[cfg(not(windows), not(target_os = "android"))]
|
||||||
|
#[link(name = "pthread")]
|
||||||
|
extern {}
|
||||||
|
|
||||||
|
// MALLOCX_ALIGN(a) macro
|
||||||
|
#[inline(always)]
|
||||||
|
fn mallocx_align(a: uint) -> c_int { unsafe { cttz32(a as u32) as c_int } }
|
||||||
|
|
||||||
|
/// Return a pointer to `size` bytes of memory.
|
||||||
|
///
|
||||||
|
/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The
|
||||||
|
/// alignment must be no larger than the largest supported page size on the platform.
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 {
|
||||||
|
let ptr = je_mallocx(size as size_t, mallocx_align(align)) as *mut u8;
|
||||||
|
if ptr.is_null() {
|
||||||
|
abort()
|
||||||
|
}
|
||||||
|
ptr
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extend or shrink the allocation referenced by `ptr` to `size` bytes of memory.
|
||||||
|
///
|
||||||
|
/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The
|
||||||
|
/// alignment must be no larger than the largest supported page size on the platform.
|
||||||
|
///
|
||||||
|
/// The `old_size` and `align` parameters are the parameters that were used to create the
|
||||||
|
/// allocation referenced by `ptr`. The `old_size` parameter may also be the value returned by
|
||||||
|
/// `usable_size` for the requested size.
|
||||||
|
#[inline]
|
||||||
|
#[allow(unused_variable)] // for the parameter names in the documentation
|
||||||
|
pub unsafe fn reallocate(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> *mut u8 {
|
||||||
|
let ptr = je_rallocx(ptr as *mut c_void, size as size_t, mallocx_align(align)) as *mut u8;
|
||||||
|
if ptr.is_null() {
|
||||||
|
abort()
|
||||||
|
}
|
||||||
|
ptr
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extend or shrink the allocation referenced by `ptr` to `size` bytes of memory in-place.
|
||||||
|
///
|
||||||
|
/// Return true if successful, otherwise false if the allocation was not altered.
|
||||||
|
///
|
||||||
|
/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The
|
||||||
|
/// alignment must be no larger than the largest supported page size on the platform.
|
||||||
|
///
|
||||||
|
/// The `old_size` and `align` parameters are the parameters that were used to
|
||||||
|
/// create the allocation referenced by `ptr`. The `old_size` parameter may be
|
||||||
|
/// any value in range_inclusive(requested_size, usable_size).
|
||||||
|
#[inline]
|
||||||
|
#[allow(unused_variable)] // for the parameter names in the documentation
|
||||||
|
pub unsafe fn reallocate_inplace(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> bool {
|
||||||
|
je_xallocx(ptr as *mut c_void, size as size_t, 0, mallocx_align(align)) == size as size_t
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deallocate the memory referenced by `ptr`.
|
||||||
|
///
|
||||||
|
/// The `ptr` parameter must not be null.
|
||||||
|
///
|
||||||
|
/// The `size` and `align` parameters are the parameters that were used to create the
|
||||||
|
/// allocation referenced by `ptr`. The `size` parameter may also be the value returned by
|
||||||
|
/// `usable_size` for the requested size.
|
||||||
|
#[inline]
|
||||||
|
#[allow(unused_variable)] // for the parameter names in the documentation
|
||||||
|
pub unsafe fn deallocate(ptr: *mut u8, size: uint, align: uint) {
|
||||||
|
je_dallocx(ptr as *mut c_void, mallocx_align(align))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the usable size of an allocation created with the specified the `size` and `align`.
|
||||||
|
#[inline]
|
||||||
|
pub fn usable_size(size: uint, align: uint) -> uint {
|
||||||
|
unsafe { je_nallocx(size as size_t, mallocx_align(align)) as uint }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The allocator for unique pointers.
|
||||||
|
#[cfg(stage0)]
|
||||||
|
#[lang="exchange_malloc"]
|
||||||
|
#[inline(always)]
|
||||||
|
pub unsafe fn exchange_malloc_(size: uint) -> *mut u8 {
|
||||||
|
exchange_malloc(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The allocator for unique pointers.
|
||||||
|
#[cfg(not(test), not(stage0))]
|
||||||
|
#[lang="exchange_malloc"]
|
||||||
|
#[inline(always)]
|
||||||
|
pub unsafe fn exchange_malloc_(size: uint, align: uint) -> *mut u8 {
|
||||||
|
exchange_malloc(size, align)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The allocator for unique pointers.
|
||||||
|
#[cfg(stage0)]
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn exchange_malloc(size: uint) -> *mut u8 {
|
||||||
|
// The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size
|
||||||
|
// allocations can point to this `static`. It would be incorrect to use a null
|
||||||
|
// pointer, due to enums assuming types like unique pointers are never null.
|
||||||
|
static EMPTY: () = ();
|
||||||
|
|
||||||
|
if size == 0 {
|
||||||
|
&EMPTY as *() as *mut u8
|
||||||
|
} else {
|
||||||
|
allocate(size, 8)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The allocator for unique pointers.
|
||||||
|
#[cfg(not(stage0))]
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 {
|
||||||
|
// The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size
|
||||||
|
// allocations can point to this `static`. It would be incorrect to use a null
|
||||||
|
// pointer, due to enums assuming types like unique pointers are never null.
|
||||||
|
static EMPTY: () = ();
|
||||||
|
|
||||||
|
if size == 0 {
|
||||||
|
&EMPTY as *() as *mut u8
|
||||||
|
} else {
|
||||||
|
allocate(size, align)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(test))]
|
||||||
|
#[lang="exchange_free"]
|
||||||
|
#[inline]
|
||||||
|
// FIXME: #13994 (rustc should pass align and size here)
|
||||||
|
pub unsafe fn exchange_free_(ptr: *mut u8) {
|
||||||
|
exchange_free(ptr, 0, 8)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn exchange_free(ptr: *mut u8, size: uint, align: uint) {
|
||||||
|
deallocate(ptr, size, align);
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: #7496
|
||||||
|
#[cfg(not(test))]
|
||||||
|
#[lang="closure_exchange_malloc"]
|
||||||
|
#[inline]
|
||||||
|
unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint, align: uint) -> *mut u8 {
|
||||||
|
let total_size = ::rt::util::get_box_size(size, align);
|
||||||
|
let p = allocate(total_size, 8);
|
||||||
|
|
||||||
|
let alloc = p as *mut ::raw::Box<()>;
|
||||||
|
(*alloc).drop_glue = drop_glue;
|
||||||
|
|
||||||
|
alloc as *mut u8
|
||||||
|
}
|
||||||
|
|
||||||
|
// hack for libcore
|
||||||
|
#[no_mangle]
|
||||||
|
#[doc(hidden)]
|
||||||
|
#[deprecated]
|
||||||
|
#[cfg(stage0, not(test))]
|
||||||
|
pub extern "C" fn rust_malloc(size: uint) -> *mut u8 {
|
||||||
|
unsafe { exchange_malloc(size) }
|
||||||
|
}
|
||||||
|
|
||||||
|
// hack for libcore
|
||||||
|
#[no_mangle]
|
||||||
|
#[doc(hidden)]
|
||||||
|
#[deprecated]
|
||||||
|
#[cfg(not(stage0), not(test))]
|
||||||
|
pub extern "C" fn rust_malloc(size: uint, align: uint) -> *mut u8 {
|
||||||
|
unsafe { exchange_malloc(size, align) }
|
||||||
|
}
|
||||||
|
|
||||||
|
// hack for libcore
|
||||||
|
#[no_mangle]
|
||||||
|
#[doc(hidden)]
|
||||||
|
#[deprecated]
|
||||||
|
#[cfg(not(test))]
|
||||||
|
pub extern "C" fn rust_free(ptr: *mut u8, size: uint, align: uint) {
|
||||||
|
unsafe { exchange_free(ptr, size, align) }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod bench {
|
||||||
|
extern crate test;
|
||||||
|
use self::test::Bencher;
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn alloc_owned_small(b: &mut Bencher) {
|
||||||
|
b.iter(|| {
|
||||||
|
box 10
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn alloc_owned_big(b: &mut Bencher) {
|
||||||
|
b.iter(|| {
|
||||||
|
box [10, ..1000]
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
51
src/libstd/rt/libc_heap.rs
Normal file
51
src/libstd/rt/libc_heap.rs
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||||
|
// file at the top-level directory of this distribution and at
|
||||||
|
// http://rust-lang.org/COPYRIGHT.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||||
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||||
|
// option. This file may not be copied, modified, or distributed
|
||||||
|
// except according to those terms.
|
||||||
|
|
||||||
|
|
||||||
|
//! The global (exchange) heap.
|
||||||
|
|
||||||
|
use libc::{c_void, size_t, free, malloc, realloc};
|
||||||
|
use ptr::{RawPtr, mut_null};
|
||||||
|
use intrinsics::abort;
|
||||||
|
|
||||||
|
/// A wrapper around libc::malloc, aborting on out-of-memory
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn malloc_raw(size: uint) -> *mut u8 {
|
||||||
|
// `malloc(0)` may allocate, but it may also return a null pointer
|
||||||
|
// http://pubs.opengroup.org/onlinepubs/9699919799/functions/malloc.html
|
||||||
|
if size == 0 {
|
||||||
|
mut_null()
|
||||||
|
} else {
|
||||||
|
let p = malloc(size as size_t);
|
||||||
|
if p.is_null() {
|
||||||
|
// we need a non-allocating way to print an error here
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
p as *mut u8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A wrapper around libc::realloc, aborting on out-of-memory
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn realloc_raw(ptr: *mut u8, size: uint) -> *mut u8 {
|
||||||
|
// `realloc(ptr, 0)` may allocate, but it may also return a null pointer
|
||||||
|
// http://pubs.opengroup.org/onlinepubs/9699919799/functions/realloc.html
|
||||||
|
if size == 0 {
|
||||||
|
free(ptr as *mut c_void);
|
||||||
|
mut_null()
|
||||||
|
} else {
|
||||||
|
let p = realloc(ptr as *mut c_void, size as size_t);
|
||||||
|
if p.is_null() {
|
||||||
|
// we need a non-allocating way to print an error here
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
p as *mut u8
|
||||||
|
}
|
||||||
|
}
|
|
@ -12,12 +12,13 @@
|
||||||
|
|
||||||
use cast;
|
use cast;
|
||||||
use iter::Iterator;
|
use iter::Iterator;
|
||||||
|
use libc::{c_void, free};
|
||||||
use mem;
|
use mem;
|
||||||
use ops::Drop;
|
use ops::Drop;
|
||||||
use option::{Option, None, Some};
|
use option::{Option, None, Some};
|
||||||
use ptr;
|
use ptr;
|
||||||
use ptr::RawPtr;
|
use ptr::RawPtr;
|
||||||
use rt::global_heap;
|
use rt::libc_heap;
|
||||||
use rt::local::Local;
|
use rt::local::Local;
|
||||||
use rt::task::Task;
|
use rt::task::Task;
|
||||||
use raw;
|
use raw;
|
||||||
|
@ -58,7 +59,7 @@ impl LocalHeap {
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn alloc(&mut self, drop_glue: fn(*mut u8), size: uint, align: uint) -> *mut Box {
|
pub fn alloc(&mut self, drop_glue: fn(*mut u8), size: uint, align: uint) -> *mut Box {
|
||||||
let total_size = global_heap::get_box_size(size, align);
|
let total_size = ::rt::util::get_box_size(size, align);
|
||||||
let alloc = self.memory_region.malloc(total_size);
|
let alloc = self.memory_region.malloc(total_size);
|
||||||
{
|
{
|
||||||
// Make sure that we can't use `mybox` outside of this scope
|
// Make sure that we can't use `mybox` outside of this scope
|
||||||
|
@ -187,7 +188,7 @@ impl MemoryRegion {
|
||||||
fn malloc(&mut self, size: uint) -> *mut Box {
|
fn malloc(&mut self, size: uint) -> *mut Box {
|
||||||
let total_size = size + AllocHeader::size();
|
let total_size = size + AllocHeader::size();
|
||||||
let alloc: *AllocHeader = unsafe {
|
let alloc: *AllocHeader = unsafe {
|
||||||
global_heap::malloc_raw(total_size) as *AllocHeader
|
libc_heap::malloc_raw(total_size) as *AllocHeader
|
||||||
};
|
};
|
||||||
|
|
||||||
let alloc: &mut AllocHeader = unsafe { cast::transmute(alloc) };
|
let alloc: &mut AllocHeader = unsafe { cast::transmute(alloc) };
|
||||||
|
@ -206,8 +207,7 @@ impl MemoryRegion {
|
||||||
|
|
||||||
let total_size = size + AllocHeader::size();
|
let total_size = size + AllocHeader::size();
|
||||||
let alloc: *AllocHeader = unsafe {
|
let alloc: *AllocHeader = unsafe {
|
||||||
global_heap::realloc_raw(orig_alloc as *mut u8,
|
libc_heap::realloc_raw(orig_alloc as *mut u8, total_size) as *AllocHeader
|
||||||
total_size) as *AllocHeader
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let alloc: &mut AllocHeader = unsafe { cast::transmute(alloc) };
|
let alloc: &mut AllocHeader = unsafe { cast::transmute(alloc) };
|
||||||
|
@ -226,7 +226,7 @@ impl MemoryRegion {
|
||||||
self.release(cast::transmute(alloc));
|
self.release(cast::transmute(alloc));
|
||||||
rtassert!(self.live_allocations > 0);
|
rtassert!(self.live_allocations > 0);
|
||||||
self.live_allocations -= 1;
|
self.live_allocations -= 1;
|
||||||
global_heap::exchange_free(alloc as *u8)
|
free(alloc as *mut c_void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ language and an implementation must be provided regardless of the
|
||||||
execution environment.
|
execution environment.
|
||||||
|
|
||||||
Of foremost importance is the global exchange heap, in the module
|
Of foremost importance is the global exchange heap, in the module
|
||||||
`global_heap`. Very little practical Rust code can be written without
|
`heap`. Very little practical Rust code can be written without
|
||||||
access to the global heap. Unlike most of `rt` the global heap is
|
access to the global heap. Unlike most of `rt` the global heap is
|
||||||
truly a global resource and generally operates independently of the
|
truly a global resource and generally operates independently of the
|
||||||
rest of the runtime.
|
rest of the runtime.
|
||||||
|
@ -86,10 +86,13 @@ pub mod shouldnt_be_public {
|
||||||
// Internal macros used by the runtime.
|
// Internal macros used by the runtime.
|
||||||
mod macros;
|
mod macros;
|
||||||
|
|
||||||
// The global (exchange) heap.
|
/// Wrappers around malloc / realloc aborting on out-of-memory.
|
||||||
pub mod global_heap;
|
pub mod libc_heap;
|
||||||
|
|
||||||
// Implementations of language-critical runtime features like @.
|
/// The low-level memory allocation API.
|
||||||
|
pub mod heap;
|
||||||
|
|
||||||
|
/// Implementations of language-critical runtime features like @.
|
||||||
pub mod task;
|
pub mod task;
|
||||||
|
|
||||||
// The EventLoop and internal synchronous I/O interface.
|
// The EventLoop and internal synchronous I/O interface.
|
||||||
|
|
|
@ -26,6 +26,23 @@ use slice::ImmutableVector;
|
||||||
// FIXME: Once the runtime matures remove the `true` below to turn off rtassert, etc.
|
// FIXME: Once the runtime matures remove the `true` below to turn off rtassert, etc.
|
||||||
pub static ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert);
|
pub static ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert);
|
||||||
|
|
||||||
|
#[deprecated]
|
||||||
|
#[doc(hidden)]
|
||||||
|
#[inline]
|
||||||
|
pub fn get_box_size(body_size: uint, body_align: uint) -> uint {
|
||||||
|
let header_size = ::mem::size_of::<::raw::Box<()>>();
|
||||||
|
let total_size = align_to(header_size, body_align) + body_size;
|
||||||
|
total_size
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rounds |size| to the nearest |alignment|. Invariant: |alignment| is a power
|
||||||
|
// of two.
|
||||||
|
#[inline]
|
||||||
|
fn align_to(size: uint, align: uint) -> uint {
|
||||||
|
assert!(align != 0);
|
||||||
|
(size + align - 1) & !(align - 1)
|
||||||
|
}
|
||||||
|
|
||||||
/// Get the number of cores available
|
/// Get the number of cores available
|
||||||
pub fn num_cpus() -> uint {
|
pub fn num_cpus() -> uint {
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|
|
@ -110,7 +110,7 @@ use ops::Drop;
|
||||||
use option::{None, Option, Some};
|
use option::{None, Option, Some};
|
||||||
use ptr::RawPtr;
|
use ptr::RawPtr;
|
||||||
use ptr;
|
use ptr;
|
||||||
use rt::global_heap::{exchange_free};
|
use rt::heap::{exchange_malloc, exchange_free};
|
||||||
use unstable::finally::try_finally;
|
use unstable::finally::try_finally;
|
||||||
use vec::Vec;
|
use vec::Vec;
|
||||||
|
|
||||||
|
@ -292,9 +292,9 @@ pub trait CloneableVector<T> {
|
||||||
impl<'a, T: Clone> CloneableVector<T> for &'a [T] {
|
impl<'a, T: Clone> CloneableVector<T> for &'a [T] {
|
||||||
/// Returns a copy of `v`.
|
/// Returns a copy of `v`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[cfg(stage0)]
|
||||||
fn to_owned(&self) -> ~[T] {
|
fn to_owned(&self) -> ~[T] {
|
||||||
use RawVec = core::raw::Vec;
|
use RawVec = core::raw::Vec;
|
||||||
use rt::global_heap::{malloc_raw, exchange_free};
|
|
||||||
use num::{CheckedAdd, CheckedMul};
|
use num::{CheckedAdd, CheckedMul};
|
||||||
use option::Expect;
|
use option::Expect;
|
||||||
|
|
||||||
|
@ -305,7 +305,8 @@ impl<'a, T: Clone> CloneableVector<T> for &'a [T] {
|
||||||
let size = size.expect("overflow in to_owned()");
|
let size = size.expect("overflow in to_owned()");
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let ret = malloc_raw(size) as *mut RawVec<()>;
|
// this should pass the real required alignment
|
||||||
|
let ret = exchange_malloc(size) as *mut RawVec<()>;
|
||||||
|
|
||||||
(*ret).fill = len * mem::nonzero_size_of::<T>();
|
(*ret).fill = len * mem::nonzero_size_of::<T>();
|
||||||
(*ret).alloc = len * mem::nonzero_size_of::<T>();
|
(*ret).alloc = len * mem::nonzero_size_of::<T>();
|
||||||
|
@ -329,7 +330,55 @@ impl<'a, T: Clone> CloneableVector<T> for &'a [T] {
|
||||||
for j in range(0, *i as int) {
|
for j in range(0, *i as int) {
|
||||||
ptr::read(&*p.offset(j));
|
ptr::read(&*p.offset(j));
|
||||||
}
|
}
|
||||||
exchange_free(ret as *u8);
|
// FIXME: #13994 (should pass align and size here)
|
||||||
|
exchange_free(ret as *mut u8, 0, 8);
|
||||||
|
});
|
||||||
|
cast::transmute(ret)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a copy of `v`.
|
||||||
|
#[inline]
|
||||||
|
#[cfg(not(stage0))]
|
||||||
|
fn to_owned(&self) -> ~[T] {
|
||||||
|
use RawVec = core::raw::Vec;
|
||||||
|
use num::{CheckedAdd, CheckedMul};
|
||||||
|
use option::Expect;
|
||||||
|
|
||||||
|
let len = self.len();
|
||||||
|
let data_size = len.checked_mul(&mem::size_of::<T>());
|
||||||
|
let data_size = data_size.expect("overflow in to_owned()");
|
||||||
|
let size = mem::size_of::<RawVec<()>>().checked_add(&data_size);
|
||||||
|
let size = size.expect("overflow in to_owned()");
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
// this should pass the real required alignment
|
||||||
|
let ret = exchange_malloc(size, 8) as *mut RawVec<()>;
|
||||||
|
|
||||||
|
(*ret).fill = len * mem::nonzero_size_of::<T>();
|
||||||
|
(*ret).alloc = len * mem::nonzero_size_of::<T>();
|
||||||
|
|
||||||
|
// Be careful with the following loop. We want it to be optimized
|
||||||
|
// to a memcpy (or something similarly fast) when T is Copy. LLVM
|
||||||
|
// is easily confused, so any extra operations during the loop can
|
||||||
|
// prevent this optimization.
|
||||||
|
let mut i = 0;
|
||||||
|
let p = &mut (*ret).data as *mut _ as *mut T;
|
||||||
|
try_finally(
|
||||||
|
&mut i, (),
|
||||||
|
|i, ()| while *i < len {
|
||||||
|
mem::move_val_init(
|
||||||
|
&mut(*p.offset(*i as int)),
|
||||||
|
self.unsafe_ref(*i).clone());
|
||||||
|
*i += 1;
|
||||||
|
},
|
||||||
|
|i| if *i < len {
|
||||||
|
// we must be failing, clean up after ourselves
|
||||||
|
for j in range(0, *i as int) {
|
||||||
|
ptr::read(&*p.offset(j));
|
||||||
|
}
|
||||||
|
// FIXME: #13994 (should pass align and size here)
|
||||||
|
exchange_free(ret as *mut u8, 0, 8);
|
||||||
});
|
});
|
||||||
cast::transmute(ret)
|
cast::transmute(ret)
|
||||||
}
|
}
|
||||||
|
@ -768,7 +817,8 @@ impl<T> Drop for MoveItems<T> {
|
||||||
// destroy the remaining elements
|
// destroy the remaining elements
|
||||||
for _x in *self {}
|
for _x in *self {}
|
||||||
unsafe {
|
unsafe {
|
||||||
exchange_free(self.allocation as *u8)
|
// FIXME: #13994 (should pass align and size here)
|
||||||
|
exchange_free(self.allocation, 0, 8)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -434,7 +434,7 @@ mod imp {
|
||||||
|
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
mod imp {
|
mod imp {
|
||||||
use rt::global_heap::malloc_raw;
|
use rt::libc_heap::malloc_raw;
|
||||||
use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR};
|
use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR};
|
||||||
use libc;
|
use libc;
|
||||||
use ptr;
|
use ptr;
|
||||||
|
|
|
@ -12,13 +12,12 @@
|
||||||
|
|
||||||
use cast::{forget, transmute};
|
use cast::{forget, transmute};
|
||||||
use clone::Clone;
|
use clone::Clone;
|
||||||
use cmp::{Ord, Eq, Ordering, TotalEq, TotalOrd};
|
use cmp::{Ord, Eq, Ordering, TotalEq, TotalOrd, max};
|
||||||
use container::{Container, Mutable};
|
use container::{Container, Mutable};
|
||||||
use default::Default;
|
use default::Default;
|
||||||
use fmt;
|
use fmt;
|
||||||
use iter::{DoubleEndedIterator, FromIterator, Extendable, Iterator, range};
|
use iter::{DoubleEndedIterator, FromIterator, Extendable, Iterator, range};
|
||||||
use libc::{free, c_void};
|
use mem::{min_align_of, move_val_init, size_of};
|
||||||
use mem::{size_of, move_val_init};
|
|
||||||
use mem;
|
use mem;
|
||||||
use num;
|
use num;
|
||||||
use num::{CheckedMul, CheckedAdd};
|
use num::{CheckedMul, CheckedAdd};
|
||||||
|
@ -26,9 +25,9 @@ use ops::{Add, Drop};
|
||||||
use option::{None, Option, Some, Expect};
|
use option::{None, Option, Some, Expect};
|
||||||
use ptr::RawPtr;
|
use ptr::RawPtr;
|
||||||
use ptr;
|
use ptr;
|
||||||
use rt::global_heap::{malloc_raw, realloc_raw};
|
|
||||||
use raw::Slice;
|
use raw::Slice;
|
||||||
use RawVec = raw::Vec;
|
use RawVec = raw::Vec;
|
||||||
|
use rt::heap::{allocate, reallocate, deallocate};
|
||||||
use slice::{ImmutableEqVector, ImmutableVector, Items, MutItems, MutableVector};
|
use slice::{ImmutableEqVector, ImmutableVector, Items, MutItems, MutableVector};
|
||||||
use slice::{MutableTotalOrdVector, OwnedVector, Vector};
|
use slice::{MutableTotalOrdVector, OwnedVector, Vector};
|
||||||
use slice::{MutableVectorAllocating};
|
use slice::{MutableVectorAllocating};
|
||||||
|
@ -92,11 +91,12 @@ impl<T> Vec<T> {
|
||||||
/// let vec: Vec<int> = Vec::with_capacity(10);
|
/// let vec: Vec<int> = Vec::with_capacity(10);
|
||||||
/// ```
|
/// ```
|
||||||
pub fn with_capacity(capacity: uint) -> Vec<T> {
|
pub fn with_capacity(capacity: uint) -> Vec<T> {
|
||||||
|
if size_of::<T>() == 0 { return Vec { len: 0, cap: ::uint::MAX, ptr: 0 as *mut T } }
|
||||||
if capacity == 0 {
|
if capacity == 0 {
|
||||||
Vec::new()
|
Vec::new()
|
||||||
} else {
|
} else {
|
||||||
let size = capacity.checked_mul(&size_of::<T>()).expect("capacity overflow");
|
let size = capacity.checked_mul(&size_of::<T>()).expect("capacity overflow");
|
||||||
let ptr = unsafe { malloc_raw(size) };
|
let ptr = unsafe { allocate(size, min_align_of::<T>()) };
|
||||||
Vec { len: 0, cap: capacity, ptr: ptr as *mut T }
|
Vec { len: 0, cap: capacity, ptr: ptr as *mut T }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -401,6 +401,23 @@ impl<T> Container for Vec<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: #13996: need a way to mark the return value as `noalias`
|
||||||
|
#[inline(never)]
|
||||||
|
unsafe fn alloc_or_realloc<T>(ptr: *mut T, size: uint, old_size: uint) -> *mut T {
|
||||||
|
if old_size == 0 {
|
||||||
|
allocate(size, min_align_of::<T>()) as *mut T
|
||||||
|
} else {
|
||||||
|
reallocate(ptr as *mut u8, size, min_align_of::<T>(), old_size) as *mut T
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
unsafe fn dealloc<T>(ptr: *mut T, len: uint) {
|
||||||
|
if size_of::<T>() != 0 {
|
||||||
|
deallocate(ptr as *mut u8, len * size_of::<T>(), min_align_of::<T>())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> Vec<T> {
|
impl<T> Vec<T> {
|
||||||
/// Returns the number of elements the vector can hold without
|
/// Returns the number of elements the vector can hold without
|
||||||
/// reallocating.
|
/// reallocating.
|
||||||
|
@ -477,33 +494,38 @@ impl<T> Vec<T> {
|
||||||
/// assert_eq!(vec.capacity(), 11);
|
/// assert_eq!(vec.capacity(), 11);
|
||||||
/// ```
|
/// ```
|
||||||
pub fn reserve_exact(&mut self, capacity: uint) {
|
pub fn reserve_exact(&mut self, capacity: uint) {
|
||||||
|
if size_of::<T>() == 0 { return }
|
||||||
if capacity > self.cap {
|
if capacity > self.cap {
|
||||||
let size = capacity.checked_mul(&size_of::<T>()).expect("capacity overflow");
|
let size = capacity.checked_mul(&size_of::<T>()).expect("capacity overflow");
|
||||||
self.cap = capacity;
|
|
||||||
unsafe {
|
unsafe {
|
||||||
self.ptr = realloc_raw(self.ptr as *mut u8, size) as *mut T;
|
self.ptr = alloc_or_realloc(self.ptr, size, self.cap * size_of::<T>());
|
||||||
}
|
}
|
||||||
|
self.cap = capacity;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Shrink the capacity of the vector to match the length
|
/// Shrink the capacity of the vector as much as possible
|
||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// let mut vec = vec!(1, 2, 3);
|
/// let mut vec = vec!(1, 2, 3);
|
||||||
/// vec.shrink_to_fit();
|
/// vec.shrink_to_fit();
|
||||||
/// assert_eq!(vec.capacity(), vec.len());
|
|
||||||
/// ```
|
/// ```
|
||||||
pub fn shrink_to_fit(&mut self) {
|
pub fn shrink_to_fit(&mut self) {
|
||||||
|
if size_of::<T>() == 0 { return }
|
||||||
if self.len == 0 {
|
if self.len == 0 {
|
||||||
unsafe { free(self.ptr as *mut c_void) };
|
if self.cap != 0 {
|
||||||
self.cap = 0;
|
unsafe {
|
||||||
self.ptr = 0 as *mut T;
|
dealloc(self.ptr, self.cap)
|
||||||
|
}
|
||||||
|
self.cap = 0;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
unsafe {
|
unsafe {
|
||||||
// Overflow check is unnecessary as the vector is already at least this large.
|
// Overflow check is unnecessary as the vector is already at least this large.
|
||||||
self.ptr = realloc_raw(self.ptr as *mut u8, self.len * size_of::<T>()) as *mut T;
|
self.ptr = reallocate(self.ptr as *mut u8, self.len * size_of::<T>(),
|
||||||
|
min_align_of::<T>(), self.cap * size_of::<T>()) as *mut T;
|
||||||
}
|
}
|
||||||
self.cap = self.len;
|
self.cap = self.len;
|
||||||
}
|
}
|
||||||
|
@ -546,15 +568,20 @@ impl<T> Vec<T> {
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn push(&mut self, value: T) {
|
pub fn push(&mut self, value: T) {
|
||||||
|
if size_of::<T>() == 0 {
|
||||||
|
// zero-size types consume no memory, so we can't rely on the address space running out
|
||||||
|
self.len = self.len.checked_add(&1).expect("length overflow");
|
||||||
|
unsafe { forget(value); }
|
||||||
|
return
|
||||||
|
}
|
||||||
if self.len == self.cap {
|
if self.len == self.cap {
|
||||||
if self.cap == 0 { self.cap += 2 }
|
|
||||||
let old_size = self.cap * size_of::<T>();
|
let old_size = self.cap * size_of::<T>();
|
||||||
self.cap = self.cap * 2;
|
let size = max(old_size, 2 * size_of::<T>()) * 2;
|
||||||
let size = old_size * 2;
|
|
||||||
if old_size > size { fail!("capacity overflow") }
|
if old_size > size { fail!("capacity overflow") }
|
||||||
unsafe {
|
unsafe {
|
||||||
self.ptr = realloc_raw(self.ptr as *mut u8, size) as *mut T;
|
self.ptr = alloc_or_realloc(self.ptr, size, self.cap * size_of::<T>());
|
||||||
}
|
}
|
||||||
|
self.cap = max(self.cap, 2) * 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -638,9 +665,10 @@ impl<T> Vec<T> {
|
||||||
pub fn move_iter(self) -> MoveItems<T> {
|
pub fn move_iter(self) -> MoveItems<T> {
|
||||||
unsafe {
|
unsafe {
|
||||||
let iter = transmute(self.as_slice().iter());
|
let iter = transmute(self.as_slice().iter());
|
||||||
let ptr = self.ptr as *mut c_void;
|
let ptr = self.ptr;
|
||||||
|
let cap = self.cap;
|
||||||
forget(self);
|
forget(self);
|
||||||
MoveItems { allocation: ptr, iter: iter }
|
MoveItems { allocation: ptr, cap: cap, iter: iter }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1386,11 +1414,13 @@ impl<T> Drop for Vec<T> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// This is (and should always remain) a no-op if the fields are
|
// This is (and should always remain) a no-op if the fields are
|
||||||
// zeroed (when moving out, because of #[unsafe_no_drop_flag]).
|
// zeroed (when moving out, because of #[unsafe_no_drop_flag]).
|
||||||
unsafe {
|
if self.cap != 0 {
|
||||||
for x in self.as_mut_slice().iter() {
|
unsafe {
|
||||||
ptr::read(x);
|
for x in self.as_mut_slice().iter() {
|
||||||
|
ptr::read(x);
|
||||||
|
}
|
||||||
|
dealloc(self.ptr, self.cap)
|
||||||
}
|
}
|
||||||
free(self.ptr as *mut c_void)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1409,7 +1439,8 @@ impl<T:fmt::Show> fmt::Show for Vec<T> {
|
||||||
|
|
||||||
/// An iterator that moves out of a vector.
|
/// An iterator that moves out of a vector.
|
||||||
pub struct MoveItems<T> {
|
pub struct MoveItems<T> {
|
||||||
allocation: *mut c_void, // the block of memory allocated for the vector
|
allocation: *mut T, // the block of memory allocated for the vector
|
||||||
|
cap: uint, // the capacity of the vector
|
||||||
iter: Items<'static, T>
|
iter: Items<'static, T>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1440,9 +1471,11 @@ impl<T> DoubleEndedIterator<T> for MoveItems<T> {
|
||||||
impl<T> Drop for MoveItems<T> {
|
impl<T> Drop for MoveItems<T> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// destroy the remaining elements
|
// destroy the remaining elements
|
||||||
for _x in *self {}
|
if self.cap != 0 {
|
||||||
unsafe {
|
for _x in *self {}
|
||||||
free(self.allocation)
|
unsafe {
|
||||||
|
dealloc(self.allocation, self.cap);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1493,7 +1526,7 @@ impl<T> FromVec<T> for ~[T] {
|
||||||
let vp = v.as_mut_ptr();
|
let vp = v.as_mut_ptr();
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let ret = malloc_raw(size) as *mut RawVec<()>;
|
let ret = allocate(size, 8) as *mut RawVec<()>;
|
||||||
|
|
||||||
(*ret).fill = len * mem::nonzero_size_of::<T>();
|
(*ret).fill = len * mem::nonzero_size_of::<T>();
|
||||||
(*ret).alloc = len * mem::nonzero_size_of::<T>();
|
(*ret).alloc = len * mem::nonzero_size_of::<T>();
|
||||||
|
|
|
@ -15,8 +15,9 @@
|
||||||
|
|
||||||
use std::cast;
|
use std::cast;
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
use std::rt::global_heap;
|
use std::rt::heap::exchange_free;
|
||||||
use std::sync::atomics;
|
use std::sync::atomics;
|
||||||
|
use std::mem::{min_align_of, size_of};
|
||||||
|
|
||||||
/// An atomically reference counted wrapper for shared state.
|
/// An atomically reference counted wrapper for shared state.
|
||||||
///
|
///
|
||||||
|
@ -190,7 +191,8 @@ impl<T: Share + Send> Drop for Arc<T> {
|
||||||
|
|
||||||
if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
|
if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
|
||||||
atomics::fence(atomics::Acquire);
|
atomics::fence(atomics::Acquire);
|
||||||
unsafe { global_heap::exchange_free(self.x as *u8) }
|
unsafe { exchange_free(self.x as *mut u8, size_of::<ArcInner<T>>(),
|
||||||
|
min_align_of::<ArcInner<T>>()) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -240,7 +242,8 @@ impl<T: Share + Send> Drop for Weak<T> {
|
||||||
// the memory orderings
|
// the memory orderings
|
||||||
if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
|
if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
|
||||||
atomics::fence(atomics::Acquire);
|
atomics::fence(atomics::Acquire);
|
||||||
unsafe { global_heap::exchange_free(self.x as *u8) }
|
unsafe { exchange_free(self.x as *mut u8, size_of::<ArcInner<T>>(),
|
||||||
|
min_align_of::<ArcInner<T>>()) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue