Support 128-bit integers on platforms without native support (#103)
* Use sized integer types * Add support for integer types not supported on some platforms * Add feature to test non-native integers in CI
This commit is contained in:
parent
b7bfb21242
commit
41f20fa3a5
17 changed files with 1215 additions and 350 deletions
|
@ -10,10 +10,17 @@ jobs:
|
|||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
libgccjit_version: ["libgccjit.so", "libgccjit_without_int128.so"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: llvm/llvm-project
|
||||
path: llvm
|
||||
|
||||
- name: Install packages
|
||||
run: sudo apt-get install ninja-build ripgrep
|
||||
|
||||
|
@ -21,19 +28,25 @@ jobs:
|
|||
uses: dawidd6/action-download-artifact@v2
|
||||
with:
|
||||
workflow: main.yml
|
||||
name: libgccjit.so
|
||||
name: ${{ matrix.libgccjit_version }}
|
||||
path: gcc-build
|
||||
repo: antoyo/gcc
|
||||
search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts.
|
||||
|
||||
- name: Setup path to libgccjit
|
||||
run: |
|
||||
echo $(readlink -f gcc-build) > gcc_path
|
||||
# NOTE: the filename is still libgccjit.so even when the artifact name is different.
|
||||
ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0
|
||||
|
||||
- name: Set LIBRARY_PATH
|
||||
- name: Set env
|
||||
run: |
|
||||
echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
|
||||
echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
|
||||
echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV
|
||||
|
||||
- name: Set RUST_COMPILER_RT_ROOT
|
||||
run: echo "RUST_COMPILER_RT_ROOT="${{ env.workspace }}/llvm/compiler-rt >> $GITHUB_ENV
|
||||
|
||||
# https://github.com/actions/cache/issues/133
|
||||
- name: Fixup owner of ~/.cargo/
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -18,3 +18,4 @@ gimple*
|
|||
res
|
||||
test-backend
|
||||
gcc_path
|
||||
benchmarks
|
||||
|
|
4
Cargo.lock
generated
4
Cargo.lock
generated
|
@ -41,7 +41,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "gccjit"
|
||||
version = "1.0.0"
|
||||
source = "git+https://github.com/antoyo/gccjit.rs#e68fce53af18dce4d40e6b7090f881ff86a2e892"
|
||||
source = "git+https://github.com/antoyo/gccjit.rs#cbb07c6601ba4246fc2967c4d770403c57192ca2"
|
||||
dependencies = [
|
||||
"gccjit_sys",
|
||||
]
|
||||
|
@ -49,7 +49,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "gccjit_sys"
|
||||
version = "0.0.1"
|
||||
source = "git+https://github.com/antoyo/gccjit.rs#e68fce53af18dce4d40e6b7090f881ff86a2e892"
|
||||
source = "git+https://github.com/antoyo/gccjit.rs#cbb07c6601ba4246fc2967c4d770403c57192ca2"
|
||||
dependencies = [
|
||||
"libc 0.1.12",
|
||||
]
|
||||
|
|
|
@ -109,6 +109,13 @@ Or add a breakpoint to `add_error` in gdb and print the line number using:
|
|||
|
||||
```
|
||||
p loc->m_line
|
||||
p loc->m_filename->m_buffer
|
||||
```
|
||||
|
||||
To print a debug representation of a tree:
|
||||
|
||||
```c
|
||||
debug_tree(expr);
|
||||
```
|
||||
|
||||
To get the `rustc` command to run in `gdb`, add the `--verbose` flag to `cargo build`.
|
||||
|
@ -134,4 +141,5 @@ To get the `rustc` command to run in `gdb`, add the `--verbose` flag to `cargo b
|
|||
* Set `linker='-Clinker=m68k-linux-gcc'`.
|
||||
* Set the path to the cross-compiling libgccjit in `gcc_path`.
|
||||
* Disable the 128-bit integer types if the target doesn't support them by using `let i128_type = context.new_type::<i64>();` in `context.rs` (same for u128_type).
|
||||
* Comment the line: `context.add_command_line_option("-masm=intel");` in src/base.rs.
|
||||
* (might not be necessary) Disable the compilation of libstd.so (and possibly libcore.so?).
|
||||
|
|
12
build.sh
12
build.sh
|
@ -13,13 +13,21 @@ fi
|
|||
export LD_LIBRARY_PATH="$GCC_PATH"
|
||||
export LIBRARY_PATH="$GCC_PATH"
|
||||
|
||||
features=
|
||||
|
||||
if [[ "$1" == "--features" ]]; then
|
||||
shift
|
||||
features="--features $1"
|
||||
shift
|
||||
fi
|
||||
|
||||
if [[ "$1" == "--release" ]]; then
|
||||
export CHANNEL='release'
|
||||
CARGO_INCREMENTAL=1 cargo rustc --release
|
||||
CARGO_INCREMENTAL=1 cargo rustc --release $features
|
||||
else
|
||||
echo $LD_LIBRARY_PATH
|
||||
export CHANNEL='debug'
|
||||
cargo rustc
|
||||
cargo rustc $features
|
||||
fi
|
||||
|
||||
source config.sh
|
||||
|
|
|
@ -22,7 +22,7 @@ if [[ "$1" == "--release" ]]; then
|
|||
RUSTFLAGS="$RUSTFLAGS -Zmir-opt-level=3" cargo build --target $TARGET_TRIPLE --release
|
||||
else
|
||||
sysroot_channel='debug'
|
||||
cargo build --target $TARGET_TRIPLE
|
||||
cargo build --target $TARGET_TRIPLE --features compiler_builtins/c
|
||||
fi
|
||||
|
||||
# Copy files to sysroot
|
||||
|
|
|
@ -45,7 +45,7 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
|
|||
if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") {
|
||||
println!("Module {}", module.name);
|
||||
}
|
||||
if env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) {
|
||||
if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1") || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) {
|
||||
println!("Dumping reproducer {}", module.name);
|
||||
let _ = fs::create_dir("/tmp/reproducers");
|
||||
// FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
|
||||
|
@ -54,6 +54,11 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
|
|||
context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
|
||||
println!("Dumped reproducer {}", module.name);
|
||||
}
|
||||
if env::var("CG_GCCJIT_DUMP_TO_FILE").as_deref() == Ok("1") {
|
||||
let _ = fs::create_dir("/tmp/gccjit_dumps");
|
||||
let path = &format!("/tmp/gccjit_dumps/{}.c", module.name);
|
||||
context.dump_to_file(path, true);
|
||||
}
|
||||
context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<GccContext>, u64) {
|
||||
pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol, supports_128bit_integers: bool) -> (ModuleCodegen<GccContext>, u64) {
|
||||
let prof_timer = tcx.prof.generic_activity("codegen_module");
|
||||
let start_time = Instant::now();
|
||||
|
||||
|
@ -60,7 +60,7 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul
|
|||
let (module, _) = tcx.dep_graph.with_task(
|
||||
dep_node,
|
||||
tcx,
|
||||
cgu_name,
|
||||
(cgu_name, supports_128bit_integers),
|
||||
module_codegen,
|
||||
Some(dep_graph::hash_result),
|
||||
);
|
||||
|
@ -71,7 +71,7 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul
|
|||
// the time we needed for codegenning it.
|
||||
let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
|
||||
|
||||
fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<GccContext> {
|
||||
fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, supports_128bit_integers): (Symbol, bool)) -> ModuleCodegen<GccContext> {
|
||||
let cgu = tcx.codegen_unit(cgu_name);
|
||||
// Instantiate monomorphizations without filling out definitions yet...
|
||||
//let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
|
||||
|
@ -106,7 +106,7 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul
|
|||
}
|
||||
|
||||
{
|
||||
let cx = CodegenCx::new(&context, cgu, tcx);
|
||||
let cx = CodegenCx::new(&context, cgu, tcx, supports_128bit_integers);
|
||||
|
||||
let mono_items = cgu.items_in_deterministic_order(tcx);
|
||||
for &(mono_item, (linkage, visibility)) in &mono_items {
|
||||
|
|
228
src/builder.rs
228
src/builder.rs
|
@ -94,7 +94,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
|
||||
let size = self.cx.int_width(src.get_type()) / 8;
|
||||
let size = src.get_type().get_size();
|
||||
|
||||
let func = self.current_func();
|
||||
|
||||
|
@ -141,8 +141,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
|
||||
let size = self.cx.int_width(src.get_type());
|
||||
let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
|
||||
let size = src.get_type().get_size();
|
||||
let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
|
||||
let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
|
||||
let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
|
||||
let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
|
||||
|
@ -290,7 +290,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
|
||||
if return_type != void_type {
|
||||
unsafe { RETURN_VALUE_COUNT += 1 };
|
||||
let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
|
||||
let result = current_func.new_local(None, return_type, &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
|
||||
current_block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
|
||||
result.to_rvalue()
|
||||
}
|
||||
|
@ -309,7 +309,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
|
||||
pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
|
||||
// gccjit requires to use the result of functions, even when it's not used.
|
||||
// That's why we assign the result to a local.
|
||||
let return_type = self.context.new_type::<bool>();
|
||||
|
@ -317,7 +317,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
let current_func = current_block.get_function();
|
||||
// TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
|
||||
unsafe { RETURN_VALUE_COUNT += 1 };
|
||||
let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
|
||||
let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
|
||||
current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
|
||||
result.to_rvalue()
|
||||
}
|
||||
|
@ -468,23 +468,16 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// FIXME(antoyo): this should not be required.
|
||||
if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
|
||||
b = self.context.new_cast(None, b, a.get_type());
|
||||
}
|
||||
a + b
|
||||
fn add(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.gcc_add(a, b)
|
||||
}
|
||||
|
||||
fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a + b
|
||||
}
|
||||
|
||||
fn sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
if a.get_type() != b.get_type() {
|
||||
b = self.context.new_cast(None, b, a.get_type());
|
||||
}
|
||||
a - b
|
||||
fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.gcc_sub(a, b)
|
||||
}
|
||||
|
||||
fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -492,7 +485,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a * b
|
||||
self.gcc_mul(a, b)
|
||||
}
|
||||
|
||||
fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -500,8 +493,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): convert the arguments to unsigned?
|
||||
a / b
|
||||
self.gcc_udiv(a, b)
|
||||
}
|
||||
|
||||
fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -511,8 +503,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): convert the arguments to signed?
|
||||
a / b
|
||||
self.gcc_sdiv(a, b)
|
||||
}
|
||||
|
||||
fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -529,11 +520,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a % b
|
||||
self.gcc_urem(a, b)
|
||||
}
|
||||
|
||||
fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a % b
|
||||
self.gcc_srem(a, b)
|
||||
}
|
||||
|
||||
fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -549,81 +540,33 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if a_type.is_unsigned(self) && b_type.is_signed(self) {
|
||||
let a = self.context.new_cast(None, a, b_type);
|
||||
let result = a << b;
|
||||
self.context.new_cast(None, result, a_type)
|
||||
}
|
||||
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
|
||||
let b = self.context.new_cast(None, b, a_type);
|
||||
a << b
|
||||
}
|
||||
else {
|
||||
a << b
|
||||
}
|
||||
self.gcc_shl(a, b)
|
||||
}
|
||||
|
||||
fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
|
||||
// TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if a_type.is_unsigned(self) && b_type.is_signed(self) {
|
||||
let a = self.context.new_cast(None, a, b_type);
|
||||
let result = a >> b;
|
||||
self.context.new_cast(None, result, a_type)
|
||||
}
|
||||
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
|
||||
let b = self.context.new_cast(None, b, a_type);
|
||||
a >> b
|
||||
}
|
||||
else {
|
||||
a >> b
|
||||
}
|
||||
self.gcc_lshr(a, b)
|
||||
}
|
||||
|
||||
fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): check whether behavior is an arithmetic shift for >> .
|
||||
// FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if a_type.is_unsigned(self) && b_type.is_signed(self) {
|
||||
let a = self.context.new_cast(None, a, b_type);
|
||||
let result = a >> b;
|
||||
self.context.new_cast(None, result, a_type)
|
||||
}
|
||||
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
|
||||
let b = self.context.new_cast(None, b, a_type);
|
||||
a >> b
|
||||
}
|
||||
else {
|
||||
a >> b
|
||||
}
|
||||
// It seems to be if the value is signed.
|
||||
self.gcc_lshr(a, b)
|
||||
}
|
||||
|
||||
fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
if a.get_type() != b.get_type() {
|
||||
b = self.context.new_cast(None, b, a.get_type());
|
||||
}
|
||||
a & b
|
||||
fn and(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.gcc_and(a, b)
|
||||
}
|
||||
|
||||
fn or(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
if a.get_type() != b.get_type() {
|
||||
b = self.context.new_cast(None, b, a.get_type());
|
||||
}
|
||||
a | b
|
||||
fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.cx.gcc_or(a, b)
|
||||
}
|
||||
|
||||
fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a ^ b
|
||||
self.gcc_xor(a, b)
|
||||
}
|
||||
|
||||
fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
|
||||
self.gcc_neg(a)
|
||||
}
|
||||
|
||||
fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -631,14 +574,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let operation =
|
||||
if a.get_type().is_bool() {
|
||||
UnaryOp::LogicalNegate
|
||||
}
|
||||
else {
|
||||
UnaryOp::BitwiseNegate
|
||||
};
|
||||
self.cx.context.new_unary_op(None, operation, a.get_type(), a)
|
||||
self.gcc_not(a)
|
||||
}
|
||||
|
||||
fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -646,7 +582,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a + b
|
||||
self.gcc_add(a, b)
|
||||
}
|
||||
|
||||
fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -655,7 +591,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
|
||||
fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): should generate poison value?
|
||||
a - b
|
||||
self.gcc_sub(a, b)
|
||||
}
|
||||
|
||||
fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -687,76 +623,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
|
||||
use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
|
||||
|
||||
let new_kind =
|
||||
match typ.kind() {
|
||||
Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
t @ (Uint(_) | Int(_)) => t.clone(),
|
||||
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
|
||||
};
|
||||
|
||||
// TODO(antoyo): remove duplication with intrinsic?
|
||||
let name =
|
||||
match oop {
|
||||
OverflowOp::Add =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_add_overflow",
|
||||
Int(I16) => "__builtin_add_overflow",
|
||||
Int(I32) => "__builtin_sadd_overflow",
|
||||
Int(I64) => "__builtin_saddll_overflow",
|
||||
Int(I128) => "__builtin_add_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_add_overflow",
|
||||
Uint(U16) => "__builtin_add_overflow",
|
||||
Uint(U32) => "__builtin_uadd_overflow",
|
||||
Uint(U64) => "__builtin_uaddll_overflow",
|
||||
Uint(U128) => "__builtin_add_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Sub =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_sub_overflow",
|
||||
Int(I16) => "__builtin_sub_overflow",
|
||||
Int(I32) => "__builtin_ssub_overflow",
|
||||
Int(I64) => "__builtin_ssubll_overflow",
|
||||
Int(I128) => "__builtin_sub_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_sub_overflow",
|
||||
Uint(U16) => "__builtin_sub_overflow",
|
||||
Uint(U32) => "__builtin_usub_overflow",
|
||||
Uint(U64) => "__builtin_usubll_overflow",
|
||||
Uint(U128) => "__builtin_sub_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Mul =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_mul_overflow",
|
||||
Int(I16) => "__builtin_mul_overflow",
|
||||
Int(I32) => "__builtin_smul_overflow",
|
||||
Int(I64) => "__builtin_smulll_overflow",
|
||||
Int(I128) => "__builtin_mul_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_mul_overflow",
|
||||
Uint(U16) => "__builtin_mul_overflow",
|
||||
Uint(U32) => "__builtin_umul_overflow",
|
||||
Uint(U64) => "__builtin_umulll_overflow",
|
||||
Uint(U128) => "__builtin_mul_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
};
|
||||
|
||||
let intrinsic = self.context.get_builtin_function(&name);
|
||||
let res = self.current_func()
|
||||
// TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
|
||||
.new_local(None, rhs.get_type(), "binopResult")
|
||||
.get_address(None);
|
||||
let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
|
||||
(res.dereference(None).to_rvalue(), overflow)
|
||||
self.gcc_checked_binop(oop, typ, lhs, rhs)
|
||||
}
|
||||
|
||||
fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
|
||||
|
@ -1003,7 +870,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
/* Casts */
|
||||
fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): check that it indeed truncate the value.
|
||||
self.context.new_cast(None, value, dest_ty)
|
||||
self.gcc_int_cast(value, dest_ty)
|
||||
}
|
||||
|
||||
fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -1016,19 +883,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_cast(None, value, dest_ty)
|
||||
self.gcc_float_to_uint_cast(value, dest_ty)
|
||||
}
|
||||
|
||||
fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_cast(None, value, dest_ty)
|
||||
self.gcc_float_to_int_cast(value, dest_ty)
|
||||
}
|
||||
|
||||
fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_cast(None, value, dest_ty)
|
||||
self.gcc_uint_to_float_cast(value, dest_ty)
|
||||
}
|
||||
|
||||
fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_cast(None, value, dest_ty)
|
||||
self.gcc_int_to_float_cast(value, dest_ty)
|
||||
}
|
||||
|
||||
fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -1054,7 +921,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
|
||||
fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
|
||||
// NOTE: is_signed is for value, not dest_typ.
|
||||
self.cx.context.new_cast(None, value, dest_typ)
|
||||
self.gcc_int_cast(value, dest_typ)
|
||||
}
|
||||
|
||||
fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -1075,21 +942,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
/* Comparisons */
|
||||
fn icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let left_type = lhs.get_type();
|
||||
let right_type = rhs.get_type();
|
||||
if left_type != right_type {
|
||||
// NOTE: because libgccjit cannot compare function pointers.
|
||||
if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() {
|
||||
lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
|
||||
rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
|
||||
}
|
||||
// NOTE: hack because we try to cast a vector type to the same vector type.
|
||||
else if format!("{:?}", left_type) != format!("{:?}", right_type) {
|
||||
rhs = self.context.new_cast(None, rhs, left_type);
|
||||
}
|
||||
}
|
||||
self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
|
||||
fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.gcc_icmp(op, lhs, rhs)
|
||||
}
|
||||
|
||||
fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
|
||||
|
@ -1156,7 +1010,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
then_block.add_assignment(None, variable, then_val);
|
||||
then_block.end_with_jump(None, after_block);
|
||||
|
||||
if then_val.get_type() != else_val.get_type() {
|
||||
if !then_val.get_type().is_compatible_with(else_val.get_type()) {
|
||||
else_val = self.context.new_cast(None, else_val, then_val.get_type());
|
||||
}
|
||||
else_block.add_assignment(None, variable, else_val);
|
||||
|
@ -1322,7 +1176,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
|
||||
let size = self.cx.int_width(src.get_type()) / 8;
|
||||
let size = src.get_type().get_size();
|
||||
let name =
|
||||
match op {
|
||||
AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
|
||||
|
@ -1396,7 +1250,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
// Fix the code in codegen_ssa::base::from_immediate.
|
||||
return value;
|
||||
}
|
||||
self.context.new_cast(None, value, dest_typ)
|
||||
self.gcc_int_cast(value, dest_typ)
|
||||
}
|
||||
|
||||
fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
|
||||
|
@ -1470,7 +1324,7 @@ impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
trait ToGccComp {
|
||||
pub trait ToGccComp {
|
||||
fn to_gcc_comparison(&self) -> ComparisonOp;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use gccjit::LValue;
|
||||
use gccjit::{Block, CType, RValue, Type, ToRValue};
|
||||
use gccjit::{Block, RValue, Type, ToRValue};
|
||||
use rustc_codegen_ssa::mir::place::PlaceRef;
|
||||
use rustc_codegen_ssa::traits::{
|
||||
BaseTypeMethods,
|
||||
|
@ -111,29 +109,15 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
|
||||
self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
|
||||
self.gcc_int(typ, int)
|
||||
}
|
||||
|
||||
fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
|
||||
self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
|
||||
self.gcc_uint(typ, int)
|
||||
}
|
||||
|
||||
fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
|
||||
if num >> 64 != 0 {
|
||||
// FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
|
||||
let low = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
|
||||
let high = self.context.new_rvalue_from_long(typ, (num >> 64) as u64 as i64);
|
||||
|
||||
let sixty_four = self.context.new_rvalue_from_long(typ, 64);
|
||||
(high << sixty_four) | self.context.new_cast(None, low, typ)
|
||||
}
|
||||
else if typ.is_i128(self) {
|
||||
let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
|
||||
self.context.new_cast(None, num, typ)
|
||||
}
|
||||
else {
|
||||
self.context.new_rvalue_from_long(typ, num as u64 as i64)
|
||||
}
|
||||
self.gcc_uint_big(typ, num)
|
||||
}
|
||||
|
||||
fn const_bool(&self, val: bool) -> RValue<'gcc> {
|
||||
|
@ -425,11 +409,11 @@ impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
|
|||
}
|
||||
|
||||
fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
|
||||
self.unqualified() == cx.context.new_c_type(CType::Int128t)
|
||||
self.unqualified() == cx.i128_type.unqualified()
|
||||
}
|
||||
|
||||
fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
|
||||
self.unqualified() == cx.context.new_c_type(CType::UInt128t)
|
||||
self.unqualified() == cx.u128_type.unqualified()
|
||||
}
|
||||
|
||||
fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
|
||||
|
|
|
@ -62,6 +62,8 @@ pub struct CodegenCx<'gcc, 'tcx> {
|
|||
pub ulonglong_type: Type<'gcc>,
|
||||
pub sizet_type: Type<'gcc>,
|
||||
|
||||
pub supports_128bit_integers: bool,
|
||||
|
||||
pub float_type: Type<'gcc>,
|
||||
pub double_type: Type<'gcc>,
|
||||
|
||||
|
@ -110,22 +112,29 @@ pub struct CodegenCx<'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
|
||||
pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>, supports_128bit_integers: bool) -> Self {
|
||||
let check_overflow = tcx.sess.overflow_checks();
|
||||
// TODO(antoyo): fix this mess. libgccjit seems to return random type when using new_int_type().
|
||||
let isize_type = context.new_c_type(CType::LongLong);
|
||||
let usize_type = context.new_c_type(CType::ULongLong);
|
||||
let bool_type = context.new_type::<bool>();
|
||||
let i8_type = context.new_type::<i8>();
|
||||
let i16_type = context.new_type::<i16>();
|
||||
let i32_type = context.new_type::<i32>();
|
||||
let i64_type = context.new_c_type(CType::LongLong);
|
||||
let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
|
||||
let u8_type = context.new_type::<u8>();
|
||||
let u16_type = context.new_type::<u16>();
|
||||
let u32_type = context.new_type::<u32>();
|
||||
let u64_type = context.new_c_type(CType::ULongLong);
|
||||
let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
|
||||
|
||||
let i8_type = context.new_c_type(CType::Int8t);
|
||||
let i16_type = context.new_c_type(CType::Int16t);
|
||||
let i32_type = context.new_c_type(CType::Int32t);
|
||||
let i64_type = context.new_c_type(CType::Int64t);
|
||||
let u8_type = context.new_c_type(CType::UInt8t);
|
||||
let u16_type = context.new_c_type(CType::UInt16t);
|
||||
let u32_type = context.new_c_type(CType::UInt32t);
|
||||
let u64_type = context.new_c_type(CType::UInt64t);
|
||||
|
||||
let (i128_type, u128_type) =
|
||||
if supports_128bit_integers {
|
||||
let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
|
||||
let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
|
||||
(i128_type, u128_type)
|
||||
}
|
||||
else {
|
||||
let i128_type = context.new_array_type(None, i64_type, 2);
|
||||
let u128_type = context.new_array_type(None, u64_type, 2);
|
||||
(i128_type, u128_type)
|
||||
};
|
||||
|
||||
let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
|
||||
|
||||
|
@ -139,8 +148,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
|||
let ulonglong_type = context.new_c_type(CType::ULongLong);
|
||||
let sizet_type = context.new_c_type(CType::SizeT);
|
||||
|
||||
assert_eq!(isize_type, i64_type);
|
||||
assert_eq!(usize_type, u64_type);
|
||||
let isize_type = context.new_c_type(CType::LongLong);
|
||||
let usize_type = context.new_c_type(CType::ULongLong);
|
||||
let bool_type = context.new_type::<bool>();
|
||||
|
||||
// TODO(antoyo): only have those assertions on x86_64.
|
||||
assert_eq!(isize_type.get_size(), i64_type.get_size());
|
||||
assert_eq!(usize_type.get_size(), u64_type.get_size());
|
||||
|
||||
let mut functions = FxHashMap::default();
|
||||
let builtins = [
|
||||
|
@ -190,6 +204,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
|||
ulonglong_type,
|
||||
sizet_type,
|
||||
|
||||
supports_128bit_integers,
|
||||
|
||||
float_type,
|
||||
double_type,
|
||||
|
||||
|
@ -221,6 +237,41 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
|||
function
|
||||
}
|
||||
|
||||
pub fn is_native_int_type(&self, typ: Type<'gcc>) -> bool {
|
||||
let types = [
|
||||
self.u8_type,
|
||||
self.u16_type,
|
||||
self.u32_type,
|
||||
self.u64_type,
|
||||
self.i8_type,
|
||||
self.i16_type,
|
||||
self.i32_type,
|
||||
self.i64_type,
|
||||
];
|
||||
|
||||
for native_type in types {
|
||||
if native_type.is_compatible_with(typ) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
self.supports_128bit_integers &&
|
||||
(self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
|
||||
}
|
||||
|
||||
pub fn is_non_native_int_type(&self, typ: Type<'gcc>) -> bool {
|
||||
!self.supports_128bit_integers &&
|
||||
(self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
|
||||
}
|
||||
|
||||
pub fn is_native_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
|
||||
self.is_native_int_type(typ) || typ == self.bool_type
|
||||
}
|
||||
|
||||
pub fn is_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
|
||||
self.is_native_int_type(typ) || self.is_non_native_int_type(typ) || typ == self.bool_type
|
||||
}
|
||||
|
||||
pub fn sess(&self) -> &Session {
|
||||
&self.tcx.sess
|
||||
}
|
||||
|
|
737
src/int.rs
Normal file
737
src/int.rs
Normal file
|
@ -0,0 +1,737 @@
|
|||
//! Module to handle integer operations.
|
||||
//! This module exists because some integer types are not supported on some gcc platforms, e.g.
|
||||
//! 128-bit integers on 32-bit platforms and thus require to be handled manually.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp};
|
||||
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
|
||||
use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
|
||||
use rustc_middle::ty::Ty;
|
||||
|
||||
use crate::builder::ToGccComp;
|
||||
use crate::{builder::Builder, common::{SignType, TypeReflection}, context::CodegenCx};
|
||||
|
||||
impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// 128-bit unsigned %: __umodti3
|
||||
self.multiplicative_operation(BinaryOp::Modulo, "mod", false, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_srem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// 128-bit signed %: __modti3
|
||||
self.multiplicative_operation(BinaryOp::Modulo, "mod", true, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_not(&self, a: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let typ = a.get_type();
|
||||
if self.is_native_int_type_or_bool(typ) {
|
||||
let operation =
|
||||
if typ.is_bool() {
|
||||
UnaryOp::LogicalNegate
|
||||
}
|
||||
else {
|
||||
UnaryOp::BitwiseNegate
|
||||
};
|
||||
self.cx.context.new_unary_op(None, operation, typ, a)
|
||||
}
|
||||
else {
|
||||
// TODO(antoyo): use __negdi2 and __negti2 instead?
|
||||
let element_type = typ.dyncast_array().expect("element type");
|
||||
let values = [
|
||||
self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)),
|
||||
self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)),
|
||||
];
|
||||
self.cx.context.new_array_constructor(None, typ, &values)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
if self.is_native_int_type(a_type) {
|
||||
self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
|
||||
}
|
||||
else {
|
||||
let param_a = self.context.new_parameter(None, a_type, "a");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a], "__negti2", false);
|
||||
self.context.new_call(None, func, &[a])
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
let a_native = self.is_native_int_type(a_type);
|
||||
let b_native = self.is_native_int_type(b_type);
|
||||
if a_native && b_native {
|
||||
// FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
|
||||
// TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
|
||||
if a_type.is_unsigned(self) && b_type.is_signed(self) {
|
||||
let a = self.context.new_cast(None, a, b_type);
|
||||
let result = a >> b;
|
||||
self.context.new_cast(None, result, a_type)
|
||||
}
|
||||
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
|
||||
let b = self.context.new_cast(None, b, a_type);
|
||||
a >> b
|
||||
}
|
||||
else {
|
||||
a >> b
|
||||
}
|
||||
}
|
||||
else if a_native && !b_native {
|
||||
self.gcc_lshr(a, self.gcc_int_cast(b, a_type))
|
||||
}
|
||||
else {
|
||||
// NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most
|
||||
// significant half of the number) which uses lshr.
|
||||
|
||||
let native_int_type = a_type.dyncast_array().expect("get element type");
|
||||
|
||||
let func = self.current_func();
|
||||
let then_block = func.new_block("then");
|
||||
let else_block = func.new_block("else");
|
||||
let after_block = func.new_block("after");
|
||||
let b0_block = func.new_block("b0");
|
||||
let actual_else_block = func.new_block("actual_else");
|
||||
|
||||
let result = func.new_local(None, a_type, "shiftResult");
|
||||
|
||||
let sixty_four = self.gcc_int(native_int_type, 64);
|
||||
let sixty_three = self.gcc_int(native_int_type, 63);
|
||||
let zero = self.gcc_zero(native_int_type);
|
||||
let b = self.gcc_int_cast(b, native_int_type);
|
||||
let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
|
||||
self.llbb().end_with_conditional(None, condition, then_block, else_block);
|
||||
|
||||
// TODO(antoyo): take endianness into account.
|
||||
let shift_value = self.gcc_sub(b, sixty_four);
|
||||
let high = self.high(a);
|
||||
let sign =
|
||||
if a_type.is_signed(self) {
|
||||
high >> sixty_three
|
||||
}
|
||||
else {
|
||||
zero
|
||||
};
|
||||
let values = [
|
||||
high >> shift_value,
|
||||
sign,
|
||||
];
|
||||
let array_value = self.context.new_array_constructor(None, a_type, &values);
|
||||
then_block.add_assignment(None, result, array_value);
|
||||
then_block.end_with_jump(None, after_block);
|
||||
|
||||
let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
|
||||
else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
|
||||
|
||||
b0_block.add_assignment(None, result, a);
|
||||
b0_block.end_with_jump(None, after_block);
|
||||
|
||||
let shift_value = self.gcc_sub(sixty_four, b);
|
||||
// NOTE: cast low to its unsigned type in order to perform a logical right shift.
|
||||
let unsigned_type = native_int_type.to_unsigned(&self.cx);
|
||||
let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
|
||||
let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type);
|
||||
let shifted_low = self.context.new_cast(None, shifted_low, native_int_type);
|
||||
let values = [
|
||||
(high << shift_value) | shifted_low,
|
||||
high >> b,
|
||||
];
|
||||
let array_value = self.context.new_array_constructor(None, a_type, &values);
|
||||
actual_else_block.add_assignment(None, result, array_value);
|
||||
actual_else_block.end_with_jump(None, after_block);
|
||||
|
||||
// NOTE: since jumps were added in a place rustc does not expect, the current block in the
|
||||
// state need to be updated.
|
||||
self.block = Some(after_block);
|
||||
*self.cx.current_block.borrow_mut() = Some(after_block);
|
||||
|
||||
result.to_rvalue()
|
||||
}
|
||||
}
|
||||
|
||||
fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
|
||||
if a.get_type() != b.get_type() {
|
||||
b = self.context.new_cast(None, b, a.get_type());
|
||||
}
|
||||
self.context.new_binary_op(None, operation, a_type, a, b)
|
||||
}
|
||||
else {
|
||||
let signed = a_type.is_compatible_with(self.i128_type);
|
||||
let func_name =
|
||||
match (operation, signed) {
|
||||
(BinaryOp::Plus, true) => "__rust_i128_add",
|
||||
(BinaryOp::Plus, false) => "__rust_u128_add",
|
||||
(BinaryOp::Minus, true) => "__rust_i128_sub",
|
||||
(BinaryOp::Minus, false) => "__rust_u128_sub",
|
||||
_ => unreachable!("unexpected additive operation {:?}", operation),
|
||||
};
|
||||
let param_a = self.context.new_parameter(None, a_type, "a");
|
||||
let param_b = self.context.new_parameter(None, b_type, "b");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
|
||||
self.context.new_call(None, func, &[a, b])
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_add(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.additive_operation(BinaryOp::Plus, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_mul(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.multiplicative_operation(BinaryOp::Mult, "mul", true, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_sub(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.additive_operation(BinaryOp::Minus, a, b)
|
||||
}
|
||||
|
||||
fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
|
||||
self.context.new_binary_op(None, operation, a_type, a, b)
|
||||
}
|
||||
else {
|
||||
let sign =
|
||||
if signed {
|
||||
""
|
||||
}
|
||||
else {
|
||||
"u"
|
||||
};
|
||||
let func_name = format!("__{}{}ti3", sign, operation_name);
|
||||
let param_a = self.context.new_parameter(None, a_type, "a");
|
||||
let param_b = self.context.new_parameter(None, b_type, "b");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
|
||||
self.context.new_call(None, func, &[a, b])
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): check if the types are signed?
|
||||
// 128-bit, signed: __divti3
|
||||
// TODO(antoyo): convert the arguments to signed?
|
||||
self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_udiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// 128-bit, unsigned: __udivti3
|
||||
self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_checked_binop(&self, oop: OverflowOp, typ: Ty<'_>, lhs: <Self as BackendTypes>::Value, rhs: <Self as BackendTypes>::Value) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
|
||||
use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
|
||||
|
||||
let new_kind =
|
||||
match typ.kind() {
|
||||
Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
t @ (Uint(_) | Int(_)) => t.clone(),
|
||||
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
|
||||
};
|
||||
|
||||
// TODO(antoyo): remove duplication with intrinsic?
|
||||
let name =
|
||||
if self.is_native_int_type(lhs.get_type()) {
|
||||
match oop {
|
||||
OverflowOp::Add =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_add_overflow",
|
||||
Int(I16) => "__builtin_add_overflow",
|
||||
Int(I32) => "__builtin_sadd_overflow",
|
||||
Int(I64) => "__builtin_saddll_overflow",
|
||||
Int(I128) => "__builtin_add_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_add_overflow",
|
||||
Uint(U16) => "__builtin_add_overflow",
|
||||
Uint(U32) => "__builtin_uadd_overflow",
|
||||
Uint(U64) => "__builtin_uaddll_overflow",
|
||||
Uint(U128) => "__builtin_add_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Sub =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_sub_overflow",
|
||||
Int(I16) => "__builtin_sub_overflow",
|
||||
Int(I32) => "__builtin_ssub_overflow",
|
||||
Int(I64) => "__builtin_ssubll_overflow",
|
||||
Int(I128) => "__builtin_sub_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_sub_overflow",
|
||||
Uint(U16) => "__builtin_sub_overflow",
|
||||
Uint(U32) => "__builtin_usub_overflow",
|
||||
Uint(U64) => "__builtin_usubll_overflow",
|
||||
Uint(U128) => "__builtin_sub_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Mul =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_mul_overflow",
|
||||
Int(I16) => "__builtin_mul_overflow",
|
||||
Int(I32) => "__builtin_smul_overflow",
|
||||
Int(I64) => "__builtin_smulll_overflow",
|
||||
Int(I128) => "__builtin_mul_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_mul_overflow",
|
||||
Uint(U16) => "__builtin_mul_overflow",
|
||||
Uint(U32) => "__builtin_umul_overflow",
|
||||
Uint(U64) => "__builtin_umulll_overflow",
|
||||
Uint(U128) => "__builtin_mul_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
else {
|
||||
match new_kind {
|
||||
Int(I128) | Uint(U128) => {
|
||||
let func_name =
|
||||
match oop {
|
||||
OverflowOp::Add =>
|
||||
match new_kind {
|
||||
Int(I128) => "__rust_i128_addo",
|
||||
Uint(U128) => "__rust_u128_addo",
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Sub =>
|
||||
match new_kind {
|
||||
Int(I128) => "__rust_i128_subo",
|
||||
Uint(U128) => "__rust_u128_subo",
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Mul =>
|
||||
match new_kind {
|
||||
Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead?
|
||||
Uint(U128) => "__rust_u128_mulo",
|
||||
_ => unreachable!(),
|
||||
},
|
||||
};
|
||||
let a_type = lhs.get_type();
|
||||
let b_type = rhs.get_type();
|
||||
let param_a = self.context.new_parameter(None, a_type, "a");
|
||||
let param_b = self.context.new_parameter(None, b_type, "b");
|
||||
let result_field = self.context.new_field(None, a_type, "result");
|
||||
let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
|
||||
let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
|
||||
let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
|
||||
let result = self.context.new_call(None, func, &[lhs, rhs]);
|
||||
let overflow = result.access_field(None, overflow_field);
|
||||
let int_result = result.access_field(None, result_field);
|
||||
return (int_result, overflow);
|
||||
},
|
||||
_ => {
|
||||
match oop {
|
||||
OverflowOp::Mul =>
|
||||
match new_kind {
|
||||
Int(I32) => "__mulosi4",
|
||||
Int(I64) => "__mulodi4",
|
||||
_ => unreachable!(),
|
||||
},
|
||||
_ => unimplemented!("overflow operation for {:?}", new_kind),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let intrinsic = self.context.get_builtin_function(&name);
|
||||
let res = self.current_func()
|
||||
// TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
|
||||
.new_local(None, rhs.get_type(), "binopResult")
|
||||
.get_address(None);
|
||||
let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
|
||||
(res.dereference(None).to_rvalue(), overflow)
|
||||
}
|
||||
|
||||
pub fn gcc_icmp(&self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = lhs.get_type();
|
||||
let b_type = rhs.get_type();
|
||||
if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) {
|
||||
let signed = a_type.is_compatible_with(self.i128_type);
|
||||
let sign =
|
||||
if signed {
|
||||
""
|
||||
}
|
||||
else {
|
||||
"u"
|
||||
};
|
||||
let func_name = format!("__{}cmpti2", sign);
|
||||
let param_a = self.context.new_parameter(None, a_type, "a");
|
||||
let param_b = self.context.new_parameter(None, b_type, "b");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, self.int_type, &[param_a, param_b], func_name, false);
|
||||
let cmp = self.context.new_call(None, func, &[lhs, rhs]);
|
||||
let (op, limit) =
|
||||
match op {
|
||||
IntPredicate::IntEQ => {
|
||||
return self.context.new_comparison(None, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type));
|
||||
},
|
||||
IntPredicate::IntNE => {
|
||||
return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type));
|
||||
},
|
||||
IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
|
||||
IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
|
||||
IntPredicate::IntULT => (ComparisonOp::Equals, 0),
|
||||
IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1),
|
||||
IntPredicate::IntSGT => (ComparisonOp::Equals, 2),
|
||||
IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1),
|
||||
IntPredicate::IntSLT => (ComparisonOp::Equals, 0),
|
||||
IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1),
|
||||
};
|
||||
self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit))
|
||||
}
|
||||
else {
|
||||
let left_type = lhs.get_type();
|
||||
let right_type = rhs.get_type();
|
||||
if left_type != right_type {
|
||||
// NOTE: because libgccjit cannot compare function pointers.
|
||||
if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() {
|
||||
lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
|
||||
rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
|
||||
}
|
||||
// NOTE: hack because we try to cast a vector type to the same vector type.
|
||||
else if format!("{:?}", left_type) != format!("{:?}", right_type) {
|
||||
rhs = self.context.new_cast(None, rhs, left_type);
|
||||
}
|
||||
}
|
||||
self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
|
||||
a ^ b
|
||||
}
|
||||
else {
|
||||
let values = [
|
||||
self.low(a) ^ self.low(b),
|
||||
self.high(a) ^ self.high(b),
|
||||
];
|
||||
self.context.new_array_constructor(None, a_type, &values)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
let a_native = self.is_native_int_type(a_type);
|
||||
let b_native = self.is_native_int_type(b_type);
|
||||
if a_native && b_native {
|
||||
// FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
|
||||
if a_type.is_unsigned(self) && b_type.is_signed(self) {
|
||||
let a = self.context.new_cast(None, a, b_type);
|
||||
let result = a << b;
|
||||
self.context.new_cast(None, result, a_type)
|
||||
}
|
||||
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
|
||||
let b = self.context.new_cast(None, b, a_type);
|
||||
a << b
|
||||
}
|
||||
else {
|
||||
a << b
|
||||
}
|
||||
}
|
||||
else if a_native && !b_native {
|
||||
self.gcc_shl(a, self.gcc_int_cast(b, a_type))
|
||||
}
|
||||
else {
|
||||
// NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl.
|
||||
let native_int_type = a_type.dyncast_array().expect("get element type");
|
||||
|
||||
let func = self.current_func();
|
||||
let then_block = func.new_block("then");
|
||||
let else_block = func.new_block("else");
|
||||
let after_block = func.new_block("after");
|
||||
let b0_block = func.new_block("b0");
|
||||
let actual_else_block = func.new_block("actual_else");
|
||||
|
||||
let result = func.new_local(None, a_type, "shiftResult");
|
||||
|
||||
let b = self.gcc_int_cast(b, native_int_type);
|
||||
let sixty_four = self.gcc_int(native_int_type, 64);
|
||||
let zero = self.gcc_zero(native_int_type);
|
||||
let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
|
||||
self.llbb().end_with_conditional(None, condition, then_block, else_block);
|
||||
|
||||
// TODO(antoyo): take endianness into account.
|
||||
let values = [
|
||||
zero,
|
||||
self.low(a) << (b - sixty_four),
|
||||
];
|
||||
let array_value = self.context.new_array_constructor(None, a_type, &values);
|
||||
then_block.add_assignment(None, result, array_value);
|
||||
then_block.end_with_jump(None, after_block);
|
||||
|
||||
let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
|
||||
else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
|
||||
|
||||
b0_block.add_assignment(None, result, a);
|
||||
b0_block.end_with_jump(None, after_block);
|
||||
|
||||
// NOTE: cast low to its unsigned type in order to perform a logical right shift.
|
||||
let unsigned_type = native_int_type.to_unsigned(&self.cx);
|
||||
let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
|
||||
let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type);
|
||||
let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type);
|
||||
let values = [
|
||||
self.low(a) << b,
|
||||
(self.high(a) << b) | high_low,
|
||||
];
|
||||
|
||||
let array_value = self.context.new_array_constructor(None, a_type, &values);
|
||||
actual_else_block.add_assignment(None, result, array_value);
|
||||
actual_else_block.end_with_jump(None, after_block);
|
||||
|
||||
// NOTE: since jumps were added in a place rustc does not expect, the current block in the
|
||||
// state need to be updated.
|
||||
self.block = Some(after_block);
|
||||
*self.cx.current_block.borrow_mut() = Some(after_block);
|
||||
|
||||
result.to_rvalue()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_bswap(&mut self, mut arg: RValue<'gcc>, width: u64) -> RValue<'gcc> {
|
||||
let arg_type = arg.get_type();
|
||||
if !self.is_native_int_type(arg_type) {
|
||||
let native_int_type = arg_type.dyncast_array().expect("get element type");
|
||||
let lsb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 0)).to_rvalue();
|
||||
let swapped_lsb = self.gcc_bswap(lsb, width / 2);
|
||||
let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type);
|
||||
let msb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 1)).to_rvalue();
|
||||
let swapped_msb = self.gcc_bswap(msb, width / 2);
|
||||
let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type);
|
||||
|
||||
// NOTE: we also need to swap the two elements here, in addition to swapping inside
|
||||
// the elements themselves like done above.
|
||||
return self.context.new_array_constructor(None, arg_type, &[swapped_msb, swapped_lsb]);
|
||||
}
|
||||
|
||||
// TODO(antoyo): check if it's faster to use string literals and a
|
||||
// match instead of format!.
|
||||
let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
|
||||
// FIXME(antoyo): this cast should not be necessary. Remove
|
||||
// when having proper sized integer types.
|
||||
let param_type = bswap.get_param(0).to_rvalue().get_type();
|
||||
if param_type != arg_type {
|
||||
arg = self.bitcast(arg, param_type);
|
||||
}
|
||||
self.cx.context.new_call(None, bswap, &[arg])
|
||||
}
|
||||
}
|
||||
|
||||
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
|
||||
if self.is_native_int_type_or_bool(typ) {
|
||||
self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
|
||||
}
|
||||
else {
|
||||
// NOTE: set the sign in high.
|
||||
self.from_low_high(typ, int, -(int.is_negative() as i64))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
|
||||
if self.is_native_int_type_or_bool(typ) {
|
||||
self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
|
||||
}
|
||||
else {
|
||||
self.from_low_high(typ, int as i64, 0)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
|
||||
let low = num as u64;
|
||||
let high = (num >> 64) as u64;
|
||||
if num >> 64 != 0 {
|
||||
// FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
|
||||
if self.is_native_int_type(typ) {
|
||||
let low = self.context.new_rvalue_from_long(self.u64_type, low as i64);
|
||||
let high = self.context.new_rvalue_from_long(typ, high as i64);
|
||||
|
||||
let sixty_four = self.context.new_rvalue_from_long(typ, 64);
|
||||
let shift = high << sixty_four;
|
||||
shift | self.context.new_cast(None, low, typ)
|
||||
}
|
||||
else {
|
||||
self.from_low_high(typ, low as i64, high as i64)
|
||||
}
|
||||
}
|
||||
else if typ.is_i128(self) {
|
||||
let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
|
||||
self.gcc_int_cast(num, typ)
|
||||
}
|
||||
else {
|
||||
self.gcc_uint(typ, num as u64)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
if self.is_native_int_type_or_bool(typ) {
|
||||
self.context.new_rvalue_zero(typ)
|
||||
}
|
||||
else {
|
||||
self.from_low_high(typ, 0, 0)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 {
|
||||
if self.is_native_int_type_or_bool(typ) {
|
||||
typ.get_size() as u64 * 8
|
||||
}
|
||||
else {
|
||||
// NOTE: the only unsupported types are u128 and i128.
|
||||
128
|
||||
}
|
||||
}
|
||||
|
||||
fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
let a_native = self.is_native_int_type_or_bool(a_type);
|
||||
let b_native = self.is_native_int_type_or_bool(b_type);
|
||||
if a_native && b_native {
|
||||
if a_type != b_type {
|
||||
b = self.context.new_cast(None, b, a_type);
|
||||
}
|
||||
self.context.new_binary_op(None, operation, a_type, a, b)
|
||||
}
|
||||
else {
|
||||
assert!(!a_native && !b_native, "both types should either be native or non-native for or operation");
|
||||
let native_int_type = a_type.dyncast_array().expect("get element type");
|
||||
let values = [
|
||||
self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)),
|
||||
self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)),
|
||||
];
|
||||
self.context.new_array_constructor(None, a_type, &values)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_or(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.bitwise_operation(BinaryOp::BitwiseOr, a, b)
|
||||
}
|
||||
|
||||
// TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
|
||||
pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
let value_type = value.get_type();
|
||||
if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) {
|
||||
self.context.new_cast(None, value, dest_typ)
|
||||
}
|
||||
else if self.is_native_int_type_or_bool(dest_typ) {
|
||||
self.context.new_cast(None, self.low(value), dest_typ)
|
||||
}
|
||||
else if self.is_native_int_type_or_bool(value_type) {
|
||||
let dest_element_type = dest_typ.dyncast_array().expect("get element type");
|
||||
|
||||
// NOTE: set the sign of the value.
|
||||
let zero = self.context.new_rvalue_zero(value_type);
|
||||
let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
|
||||
let is_negative = self.gcc_int_cast(is_negative, dest_element_type);
|
||||
let values = [
|
||||
self.context.new_cast(None, value, dest_element_type),
|
||||
self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative),
|
||||
];
|
||||
self.context.new_array_constructor(None, dest_typ, &values)
|
||||
}
|
||||
else {
|
||||
// Since u128 and i128 are the only types that can be unsupported, we know the type of
|
||||
// value and the destination type have the same size, so a bitcast is fine.
|
||||
self.context.new_bitcast(None, value, dest_typ)
|
||||
}
|
||||
}
|
||||
|
||||
fn int_to_float_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
let value_type = value.get_type();
|
||||
if self.is_native_int_type_or_bool(value_type) {
|
||||
return self.context.new_cast(None, value, dest_typ);
|
||||
}
|
||||
|
||||
let name_suffix =
|
||||
match self.type_kind(dest_typ) {
|
||||
TypeKind::Float => "tisf",
|
||||
TypeKind::Double => "tidf",
|
||||
kind => panic!("cannot cast a non-native integer to type {:?}", kind),
|
||||
};
|
||||
let sign =
|
||||
if signed {
|
||||
""
|
||||
}
|
||||
else {
|
||||
"un"
|
||||
};
|
||||
let func_name = format!("__float{}{}", sign, name_suffix);
|
||||
let param = self.context.new_parameter(None, value_type, "n");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
|
||||
self.context.new_call(None, func, &[value])
|
||||
}
|
||||
|
||||
pub fn gcc_int_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.int_to_float_cast(true, value, dest_typ)
|
||||
}
|
||||
|
||||
pub fn gcc_uint_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.int_to_float_cast(false, value, dest_typ)
|
||||
}
|
||||
|
||||
fn float_to_int_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
let value_type = value.get_type();
|
||||
if self.is_native_int_type_or_bool(dest_typ) {
|
||||
return self.context.new_cast(None, value, dest_typ);
|
||||
}
|
||||
|
||||
let name_suffix =
|
||||
match self.type_kind(value_type) {
|
||||
TypeKind::Float => "sfti",
|
||||
TypeKind::Double => "dfti",
|
||||
kind => panic!("cannot cast a {:?} to non-native integer", kind),
|
||||
};
|
||||
let sign =
|
||||
if signed {
|
||||
""
|
||||
}
|
||||
else {
|
||||
"uns"
|
||||
};
|
||||
let func_name = format!("__fix{}{}", sign, name_suffix);
|
||||
let param = self.context.new_parameter(None, value_type, "n");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
|
||||
self.context.new_call(None, func, &[value])
|
||||
}
|
||||
|
||||
pub fn gcc_float_to_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.float_to_int_cast(true, value, dest_typ)
|
||||
}
|
||||
|
||||
pub fn gcc_float_to_uint_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.float_to_int_cast(false, value, dest_typ)
|
||||
}
|
||||
|
||||
fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 1))
|
||||
.to_rvalue()
|
||||
}
|
||||
|
||||
fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 0))
|
||||
.to_rvalue()
|
||||
}
|
||||
|
||||
fn from_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
|
||||
let native_int_type = typ.dyncast_array().expect("get element type");
|
||||
let values = [
|
||||
self.context.new_rvalue_from_long(native_int_type, low),
|
||||
self.context.new_rvalue_from_long(native_int_type, high),
|
||||
];
|
||||
self.context.new_array_constructor(None, typ, &values)
|
||||
}
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
pub mod llvm;
|
||||
mod simd;
|
||||
|
||||
use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
|
||||
use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType};
|
||||
use rustc_codegen_ssa::MemFlags;
|
||||
use rustc_codegen_ssa::base::wants_msvc_seh;
|
||||
use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
|
||||
|
@ -175,11 +175,11 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
|
||||
let arg = args[0].immediate();
|
||||
let result = func.new_local(None, arg.get_type(), "zeros");
|
||||
let zero = self.cx.context.new_rvalue_zero(arg.get_type());
|
||||
let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero);
|
||||
let zero = self.cx.gcc_zero(arg.get_type());
|
||||
let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero);
|
||||
self.llbb().end_with_conditional(None, cond, then_block, else_block);
|
||||
|
||||
let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64);
|
||||
let zero_result = self.cx.gcc_uint(arg.get_type(), width);
|
||||
then_block.add_assignment(None, result, zero_result);
|
||||
then_block.end_with_jump(None, after_block);
|
||||
|
||||
|
@ -195,8 +195,8 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
sym::cttz => self.count_trailing_zeroes(width, arg),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
else_block.add_assignment(None, result, zeros);
|
||||
else_block.end_with_jump(None, after_block);
|
||||
self.llbb().add_assignment(None, result, zeros);
|
||||
self.llbb().end_with_jump(None, after_block);
|
||||
|
||||
// NOTE: since jumps were added in a place rustc does not
|
||||
// expect, the current blocks in the state need to be updated.
|
||||
|
@ -217,17 +217,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||
args[0].immediate() // byte swap a u8/i8 is just a no-op
|
||||
}
|
||||
else {
|
||||
// TODO(antoyo): check if it's faster to use string literals and a
|
||||
// match instead of format!.
|
||||
let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
|
||||
let mut arg = args[0].immediate();
|
||||
// FIXME(antoyo): this cast should not be necessary. Remove
|
||||
// when having proper sized integer types.
|
||||
let param_type = bswap.get_param(0).to_rvalue().get_type();
|
||||
if param_type != arg.get_type() {
|
||||
arg = self.bitcast(arg, param_type);
|
||||
}
|
||||
self.cx.context.new_call(None, bswap, &[arg])
|
||||
self.gcc_bswap(args[0].immediate(), width)
|
||||
}
|
||||
},
|
||||
sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
|
||||
|
@ -526,7 +516,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
|
||||
let value =
|
||||
if result_type.is_signed(self.cx) {
|
||||
self.context.new_cast(None, value, typ)
|
||||
self.gcc_int_cast(value, typ)
|
||||
}
|
||||
else {
|
||||
value
|
||||
|
@ -673,30 +663,33 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
},
|
||||
128 => {
|
||||
// TODO(antoyo): find a more efficient implementation?
|
||||
let sixty_four = self.context.new_rvalue_from_long(typ, 64);
|
||||
let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
|
||||
let low = self.context.new_cast(None, value, self.u64_type);
|
||||
let sixty_four = self.gcc_int(typ, 64);
|
||||
let right_shift = self.gcc_lshr(value, sixty_four);
|
||||
let high = self.gcc_int_cast(right_shift, self.u64_type);
|
||||
let low = self.gcc_int_cast(value, self.u64_type);
|
||||
|
||||
let reversed_high = self.bit_reverse(64, high);
|
||||
let reversed_low = self.bit_reverse(64, low);
|
||||
|
||||
let new_low = self.context.new_cast(None, reversed_high, typ);
|
||||
let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four;
|
||||
let new_low = self.gcc_int_cast(reversed_high, typ);
|
||||
let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four);
|
||||
|
||||
new_low | new_high
|
||||
self.gcc_or(new_low, new_high)
|
||||
},
|
||||
_ => {
|
||||
panic!("cannot bit reverse with width = {}", width);
|
||||
},
|
||||
};
|
||||
|
||||
self.context.new_cast(None, result, result_type)
|
||||
self.gcc_int_cast(result, result_type)
|
||||
}
|
||||
|
||||
fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
|
||||
fn count_leading_zeroes(&mut self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): use width?
|
||||
let arg_type = arg.get_type();
|
||||
let count_leading_zeroes =
|
||||
// TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
|
||||
// instead of using is_uint().
|
||||
if arg_type.is_uint(&self.cx) {
|
||||
"__builtin_clz"
|
||||
}
|
||||
|
@ -712,9 +705,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
let result = self.current_func()
|
||||
.new_local(None, array_type, "count_loading_zeroes_results");
|
||||
|
||||
let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
|
||||
let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
|
||||
let low = self.context.new_cast(None, arg, self.u64_type);
|
||||
let sixty_four = self.const_uint(arg_type, 64);
|
||||
let shift = self.lshr(arg, sixty_four);
|
||||
let high = self.gcc_int_cast(shift, self.u64_type);
|
||||
let low = self.gcc_int_cast(arg, self.u64_type);
|
||||
|
||||
let zero = self.context.new_rvalue_zero(self.usize_type);
|
||||
let one = self.context.new_rvalue_one(self.usize_type);
|
||||
|
@ -723,17 +717,18 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
let clzll = self.context.get_builtin_function("__builtin_clzll");
|
||||
|
||||
let first_elem = self.context.new_array_access(None, result, zero);
|
||||
let first_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[high]), arg_type);
|
||||
let first_value = self.gcc_int_cast(self.context.new_call(None, clzll, &[high]), arg_type);
|
||||
self.llbb()
|
||||
.add_assignment(None, first_elem, first_value);
|
||||
|
||||
let second_elem = self.context.new_array_access(None, result, one);
|
||||
let second_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[low]), arg_type) + sixty_four;
|
||||
let cast = self.gcc_int_cast(self.context.new_call(None, clzll, &[low]), arg_type);
|
||||
let second_value = self.add(cast, sixty_four);
|
||||
self.llbb()
|
||||
.add_assignment(None, second_elem, second_value);
|
||||
|
||||
let third_elem = self.context.new_array_access(None, result, two);
|
||||
let third_value = self.context.new_rvalue_from_long(arg_type, 128);
|
||||
let third_value = self.const_uint(arg_type, 128);
|
||||
self.llbb()
|
||||
.add_assignment(None, third_elem, third_value);
|
||||
|
||||
|
@ -749,13 +744,13 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
|
||||
let res = self.context.new_array_access(None, result, index);
|
||||
|
||||
return self.context.new_cast(None, res, arg_type);
|
||||
return self.gcc_int_cast(res.to_rvalue(), arg_type);
|
||||
}
|
||||
else {
|
||||
let count_leading_zeroes = self.context.get_builtin_function("__builtin_clz");
|
||||
let arg = self.context.new_cast(None, arg, self.uint_type);
|
||||
let diff = self.int_width(self.uint_type) - self.int_width(arg_type);
|
||||
let diff = self.context.new_rvalue_from_long(self.int_type, diff);
|
||||
let count_leading_zeroes = self.context.get_builtin_function("__builtin_clzll");
|
||||
let arg = self.context.new_cast(None, arg, self.ulonglong_type);
|
||||
let diff = self.ulonglong_type.get_size() as i64 - arg_type.get_size() as i64;
|
||||
let diff = self.context.new_rvalue_from_long(self.int_type, diff * 8);
|
||||
let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
|
||||
return self.context.new_cast(None, res, arg_type);
|
||||
};
|
||||
|
@ -764,18 +759,20 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
self.context.new_cast(None, res, arg_type)
|
||||
}
|
||||
|
||||
fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
|
||||
fn count_trailing_zeroes(&mut self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let result_type = arg.get_type();
|
||||
let arg =
|
||||
if result_type.is_signed(self.cx) {
|
||||
let new_type = result_type.to_unsigned(self.cx);
|
||||
self.context.new_cast(None, arg, new_type)
|
||||
self.gcc_int_cast(arg, new_type)
|
||||
}
|
||||
else {
|
||||
arg
|
||||
};
|
||||
let arg_type = arg.get_type();
|
||||
let (count_trailing_zeroes, expected_type) =
|
||||
// TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
|
||||
// instead of using is_uint().
|
||||
if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
|
||||
// NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
|
||||
("__builtin_ctz", self.cx.uint_type)
|
||||
|
@ -792,9 +789,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
let result = self.current_func()
|
||||
.new_local(None, array_type, "count_loading_zeroes_results");
|
||||
|
||||
let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
|
||||
let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
|
||||
let low = self.context.new_cast(None, arg, self.u64_type);
|
||||
let sixty_four = self.gcc_int(arg_type, 64);
|
||||
let shift = self.gcc_lshr(arg, sixty_four);
|
||||
let high = self.gcc_int_cast(shift, self.u64_type);
|
||||
let low = self.gcc_int_cast(arg, self.u64_type);
|
||||
|
||||
let zero = self.context.new_rvalue_zero(self.usize_type);
|
||||
let one = self.context.new_rvalue_one(self.usize_type);
|
||||
|
@ -803,17 +801,17 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
let ctzll = self.context.get_builtin_function("__builtin_ctzll");
|
||||
|
||||
let first_elem = self.context.new_array_access(None, result, zero);
|
||||
let first_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[low]), arg_type);
|
||||
let first_value = self.gcc_int_cast(self.context.new_call(None, ctzll, &[low]), arg_type);
|
||||
self.llbb()
|
||||
.add_assignment(None, first_elem, first_value);
|
||||
|
||||
let second_elem = self.context.new_array_access(None, result, one);
|
||||
let second_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[high]), arg_type) + sixty_four;
|
||||
let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(None, ctzll, &[high]), arg_type), sixty_four);
|
||||
self.llbb()
|
||||
.add_assignment(None, second_elem, second_value);
|
||||
|
||||
let third_elem = self.context.new_array_access(None, result, two);
|
||||
let third_value = self.context.new_rvalue_from_long(arg_type, 128);
|
||||
let third_value = self.gcc_int(arg_type, 128);
|
||||
self.llbb()
|
||||
.add_assignment(None, third_elem, third_value);
|
||||
|
||||
|
@ -829,10 +827,20 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
|
||||
let res = self.context.new_array_access(None, result, index);
|
||||
|
||||
return self.context.new_cast(None, res, result_type);
|
||||
return self.gcc_int_cast(res.to_rvalue(), result_type);
|
||||
}
|
||||
else {
|
||||
unimplemented!("count_trailing_zeroes for {:?}", arg_type);
|
||||
let count_trailing_zeroes = self.context.get_builtin_function("__builtin_ctzll");
|
||||
let arg_size = arg_type.get_size();
|
||||
let casted_arg = self.context.new_cast(None, arg, self.ulonglong_type);
|
||||
let byte_diff = self.ulonglong_type.get_size() as i64 - arg_size as i64;
|
||||
let diff = self.context.new_rvalue_from_long(self.int_type, byte_diff * 8);
|
||||
let mask = self.context.new_rvalue_from_long(arg_type, -1); // To get the value with all bits set.
|
||||
let masked = mask & self.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, arg);
|
||||
let cond = self.context.new_comparison(None, ComparisonOp::Equals, masked, mask);
|
||||
let diff = diff * self.context.new_cast(None, cond, self.int_type);
|
||||
let res = self.context.new_call(None, count_trailing_zeroes, &[casted_arg]) - diff;
|
||||
return self.context.new_cast(None, res, result_type);
|
||||
};
|
||||
let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
|
||||
let arg =
|
||||
|
@ -846,18 +854,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
self.context.new_cast(None, res, result_type)
|
||||
}
|
||||
|
||||
fn int_width(&self, typ: Type<'gcc>) -> i64 {
|
||||
self.cx.int_width(typ) as i64
|
||||
}
|
||||
|
||||
fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
|
||||
fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): use the optimized version with fewer operations.
|
||||
let result_type = value.get_type();
|
||||
let value_type = result_type.to_unsigned(self.cx);
|
||||
|
||||
let value =
|
||||
if result_type.is_signed(self.cx) {
|
||||
self.context.new_cast(None, value, value_type)
|
||||
self.gcc_int_cast(value, value_type)
|
||||
}
|
||||
else {
|
||||
value
|
||||
|
@ -867,13 +871,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
// TODO(antoyo): implement in the normal algorithm below to have a more efficient
|
||||
// implementation (that does not require a call to __popcountdi2).
|
||||
let popcount = self.context.get_builtin_function("__builtin_popcountll");
|
||||
let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
|
||||
let high = self.context.new_cast(None, value >> sixty_four, self.cx.ulonglong_type);
|
||||
let sixty_four = self.gcc_int(value_type, 64);
|
||||
let right_shift = self.gcc_lshr(value, sixty_four);
|
||||
let high = self.gcc_int_cast(right_shift, self.cx.ulonglong_type);
|
||||
let high = self.context.new_call(None, popcount, &[high]);
|
||||
let low = self.context.new_cast(None, value, self.cx.ulonglong_type);
|
||||
let low = self.gcc_int_cast(value, self.cx.ulonglong_type);
|
||||
let low = self.context.new_call(None, popcount, &[low]);
|
||||
let res = high + low;
|
||||
return self.context.new_cast(None, res, result_type);
|
||||
return self.gcc_int_cast(res, result_type);
|
||||
}
|
||||
|
||||
// First step.
|
||||
|
@ -935,13 +940,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
|
||||
// Algorithm from: https://blog.regehr.org/archives/1063
|
||||
fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
|
||||
let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
|
||||
let shift = shift % max;
|
||||
let max = self.const_uint(shift.get_type(), width);
|
||||
let shift = self.urem(shift, max);
|
||||
let lhs = self.shl(value, shift);
|
||||
let result_neg = self.neg(shift);
|
||||
let result_and =
|
||||
self.and(
|
||||
self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
|
||||
self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
|
||||
result_neg,
|
||||
self.const_uint(shift.get_type(), width - 1),
|
||||
);
|
||||
let rhs = self.lshr(value, result_and);
|
||||
self.or(lhs, rhs)
|
||||
|
@ -949,13 +955,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
|
||||
// Algorithm from: https://blog.regehr.org/archives/1063
|
||||
fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
|
||||
let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
|
||||
let shift = shift % max;
|
||||
let max = self.const_uint(shift.get_type(), width);
|
||||
let shift = self.urem(shift, max);
|
||||
let lhs = self.lshr(value, shift);
|
||||
let result_neg = self.neg(shift);
|
||||
let result_and =
|
||||
self.and(
|
||||
self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
|
||||
self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
|
||||
result_neg,
|
||||
self.const_uint(shift.get_type(), width - 1),
|
||||
);
|
||||
let rhs = self.shl(value, result_and);
|
||||
self.or(lhs, rhs)
|
||||
|
@ -1015,31 +1022,52 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
|||
fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
|
||||
if signed {
|
||||
// Also based on algorithm from: https://stackoverflow.com/a/56531252/389119
|
||||
let func_name =
|
||||
match width {
|
||||
8 => "__builtin_sub_overflow",
|
||||
16 => "__builtin_sub_overflow",
|
||||
32 => "__builtin_ssub_overflow",
|
||||
64 => "__builtin_ssubll_overflow",
|
||||
128 => "__builtin_sub_overflow",
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let overflow_func = self.context.get_builtin_function(func_name);
|
||||
let result_type = lhs.get_type();
|
||||
let func = self.current_func.borrow().expect("func");
|
||||
let res = func.new_local(None, result_type, "saturating_diff");
|
||||
let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
|
||||
let supports_native_type = self.is_native_int_type(result_type);
|
||||
let overflow =
|
||||
if supports_native_type {
|
||||
let func_name =
|
||||
match width {
|
||||
8 => "__builtin_sub_overflow",
|
||||
16 => "__builtin_sub_overflow",
|
||||
32 => "__builtin_ssub_overflow",
|
||||
64 => "__builtin_ssubll_overflow",
|
||||
128 => "__builtin_sub_overflow",
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let overflow_func = self.context.get_builtin_function(func_name);
|
||||
self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
|
||||
}
|
||||
else {
|
||||
let func_name =
|
||||
match width {
|
||||
128 => "__rust_i128_subo",
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let param_a = self.context.new_parameter(None, result_type, "a");
|
||||
let param_b = self.context.new_parameter(None, result_type, "b");
|
||||
let result_field = self.context.new_field(None, result_type, "result");
|
||||
let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
|
||||
let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
|
||||
let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
|
||||
let result = self.context.new_call(None, func, &[lhs, rhs]);
|
||||
let overflow = result.access_field(None, overflow_field);
|
||||
let int_result = result.access_field(None, result_field);
|
||||
self.llbb().add_assignment(None, res, int_result);
|
||||
overflow
|
||||
};
|
||||
|
||||
let then_block = func.new_block("then");
|
||||
let after_block = func.new_block("after");
|
||||
|
||||
let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
|
||||
let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
|
||||
let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
|
||||
self.context.new_rvalue_from_int(unsigned_type, 0)
|
||||
);
|
||||
let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
|
||||
then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
|
||||
// NOTE: convert the type to unsigned to have an unsigned shift.
|
||||
let unsigned_type = result_type.to_unsigned(&self.cx);
|
||||
let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
|
||||
let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
|
||||
let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
|
||||
then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
|
||||
then_block.end_with_jump(None, after_block);
|
||||
|
||||
self.llbb().end_with_conditional(None, overflow, then_block, after_block);
|
||||
|
|
27
src/lib.rs
27
src/lib.rs
|
@ -1,4 +1,5 @@
|
|||
/*
|
||||
* TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?)
|
||||
* TODO(antoyo): support #[inline] attributes.
|
||||
* TODO(antoyo): support LTO (gcc's equivalent to Thin LTO is enabled by -fwhopr: https://stackoverflow.com/questions/64954525/does-gcc-have-thin-lto).
|
||||
*
|
||||
|
@ -21,6 +22,7 @@ extern crate rustc_middle;
|
|||
extern crate rustc_session;
|
||||
extern crate rustc_span;
|
||||
extern crate rustc_target;
|
||||
extern crate tempfile;
|
||||
|
||||
// This prevents duplicating functions and statics that are already part of the host rustc process.
|
||||
#[allow(unused_extern_crates)]
|
||||
|
@ -40,15 +42,16 @@ mod context;
|
|||
mod coverageinfo;
|
||||
mod debuginfo;
|
||||
mod declare;
|
||||
mod int;
|
||||
mod intrinsic;
|
||||
mod mono_item;
|
||||
mod type_;
|
||||
mod type_of;
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use gccjit::{Context, OptimizationLevel};
|
||||
use gccjit::{Context, OptimizationLevel, CType};
|
||||
use rustc_ast::expand::allocator::AllocatorKind;
|
||||
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
|
||||
use rustc_codegen_ssa::base::codegen_crate;
|
||||
|
@ -65,6 +68,7 @@ use rustc_session::config::{Lto, OptLevel, OutputFilenames};
|
|||
use rustc_session::Session;
|
||||
use rustc_span::Symbol;
|
||||
use rustc_span::fatal_error::FatalError;
|
||||
use tempfile::TempDir;
|
||||
|
||||
pub struct PrintOnPanic<F: Fn() -> String>(pub F);
|
||||
|
||||
|
@ -77,13 +81,24 @@ impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
|
|||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct GccCodegenBackend;
|
||||
pub struct GccCodegenBackend {
|
||||
supports_128bit_integers: Arc<Mutex<bool>>,
|
||||
}
|
||||
|
||||
impl CodegenBackend for GccCodegenBackend {
|
||||
fn init(&self, sess: &Session) {
|
||||
if sess.lto() != Lto::No {
|
||||
sess.warn("LTO is not supported. You may get a linker error.");
|
||||
}
|
||||
|
||||
let temp_dir = TempDir::new().expect("cannot create temporary directory");
|
||||
let temp_file = temp_dir.into_path().join("result.asm");
|
||||
let check_context = Context::default();
|
||||
check_context.set_print_errors_to_stderr(false);
|
||||
let _int128_ty = check_context.new_c_type(CType::UInt128t);
|
||||
// NOTE: we cannot just call compile() as this would require other files than libgccjit.so.
|
||||
check_context.compile_to_file(gccjit::OutputKind::Assembler, temp_file.to_str().expect("path to str"));
|
||||
*self.supports_128bit_integers.lock().expect("lock") = check_context.get_last_error() == Ok(None);
|
||||
}
|
||||
|
||||
fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> {
|
||||
|
@ -129,7 +144,7 @@ impl ExtraBackendMethods for GccCodegenBackend {
|
|||
}
|
||||
|
||||
fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
|
||||
base::compile_codegen_unit(tcx, cgu_name)
|
||||
base::compile_codegen_unit(tcx, cgu_name, *self.supports_128bit_integers.lock().expect("lock"))
|
||||
}
|
||||
|
||||
fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel) -> TargetMachineFactoryFn<Self> {
|
||||
|
@ -237,7 +252,9 @@ impl WriteBackendMethods for GccCodegenBackend {
|
|||
/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
|
||||
#[no_mangle]
|
||||
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
|
||||
Box::new(GccCodegenBackend)
|
||||
Box::new(GccCodegenBackend {
|
||||
supports_128bit_integers: Arc::new(Mutex::new(false)),
|
||||
})
|
||||
}
|
||||
|
||||
fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
|
||||
|
|
28
src/type_.rs
28
src/type_.rs
|
@ -7,7 +7,6 @@ use rustc_middle::bug;
|
|||
use rustc_middle::ty::layout::TyAndLayout;
|
||||
use rustc_target::abi::{AddressSpace, Align, Integer, Size};
|
||||
|
||||
use crate::common::TypeReflection;
|
||||
use crate::context::CodegenCx;
|
||||
use crate::type_of::LayoutGccExt;
|
||||
|
||||
|
@ -119,9 +118,15 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
|
||||
if typ.is_integral() {
|
||||
if self.is_int_type_or_bool(typ) {
|
||||
TypeKind::Integer
|
||||
}
|
||||
else if typ == self.float_type {
|
||||
TypeKind::Float
|
||||
}
|
||||
else if typ == self.double_type {
|
||||
TypeKind::Double
|
||||
}
|
||||
else if typ.dyncast_vector().is_some() {
|
||||
TypeKind::Vector
|
||||
}
|
||||
|
@ -175,24 +180,7 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
|||
}
|
||||
|
||||
fn int_width(&self, typ: Type<'gcc>) -> u64 {
|
||||
if typ.is_i8(self) || typ.is_u8(self) {
|
||||
8
|
||||
}
|
||||
else if typ.is_i16(self) || typ.is_u16(self) {
|
||||
16
|
||||
}
|
||||
else if typ.is_i32(self) || typ.is_u32(self) {
|
||||
32
|
||||
}
|
||||
else if typ.is_i64(self) || typ.is_u64(self) {
|
||||
64
|
||||
}
|
||||
else if typ.is_i128(self) || typ.is_u128(self) {
|
||||
128
|
||||
}
|
||||
else {
|
||||
panic!("Cannot get width of int type {:?}", typ);
|
||||
}
|
||||
self.gcc_int_width(typ)
|
||||
}
|
||||
|
||||
fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> {
|
||||
|
|
24
test.sh
24
test.sh
|
@ -14,14 +14,26 @@ fi
|
|||
export LD_LIBRARY_PATH="$GCC_PATH"
|
||||
export LIBRARY_PATH="$GCC_PATH"
|
||||
|
||||
features=
|
||||
|
||||
if [[ "$1" == "--features" ]]; then
|
||||
shift
|
||||
features="--features $1"
|
||||
shift
|
||||
fi
|
||||
|
||||
if [[ "$1" == "--release" ]]; then
|
||||
export CHANNEL='release'
|
||||
CARGO_INCREMENTAL=1 cargo rustc --release
|
||||
CARGO_INCREMENTAL=1 cargo rustc --release $features
|
||||
shift
|
||||
else
|
||||
echo $LD_LIBRARY_PATH
|
||||
export CHANNEL='debug'
|
||||
cargo rustc
|
||||
cargo rustc $features
|
||||
fi
|
||||
|
||||
if [[ "$1" == "--build" ]]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
source config.sh
|
||||
|
@ -206,6 +218,14 @@ case $1 in
|
|||
clean_ui_tests
|
||||
;;
|
||||
|
||||
"--std-tests")
|
||||
std_tests
|
||||
;;
|
||||
|
||||
"--build-sysroot")
|
||||
build_sysroot
|
||||
;;
|
||||
|
||||
*)
|
||||
clean
|
||||
mini_tests
|
||||
|
|
151
tests/run/int.rs
Normal file
151
tests/run/int.rs
Normal file
|
@ -0,0 +1,151 @@
|
|||
// Compiler:
|
||||
//
|
||||
// Run-time:
|
||||
// status: 0
|
||||
|
||||
#![feature(arbitrary_self_types, auto_traits, core_intrinsics, lang_items, start, intrinsics)]
|
||||
|
||||
#![no_std]
|
||||
|
||||
mod intrinsics {
|
||||
extern "rust-intrinsic" {
|
||||
pub fn abort() -> !;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Core
|
||||
*/
|
||||
|
||||
mod libc {
|
||||
#[link(name = "c")]
|
||||
extern "C" {
|
||||
pub fn puts(s: *const u8) -> i32;
|
||||
}
|
||||
}
|
||||
|
||||
#[panic_handler]
|
||||
fn panic_handler(_: &core::panic::PanicInfo) -> ! {
|
||||
unsafe {
|
||||
core::intrinsics::abort();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Code
|
||||
*/
|
||||
|
||||
#[start]
|
||||
fn main(argc: isize, _argv: *const *const u8) -> isize {
|
||||
let var = 134217856_u128;
|
||||
let var2 = 10475372733397991552_u128;
|
||||
let var3 = 193236519889708027473620326106273939584_u128;
|
||||
let var4 = 123236519889708027473620326106273939584_u128;
|
||||
let var5 = 153236519889708027473620326106273939584_u128;
|
||||
let var6 = 18446744073709551616_i128;
|
||||
let var7 = 170141183460469231731687303715884105728_u128;
|
||||
|
||||
// Shifts.
|
||||
assert_eq!(var << (argc as u128 - 1), var);
|
||||
assert_eq!(var << argc as u128, 268435712);
|
||||
assert_eq!(var << (argc + 32) as u128, 1152922604118474752);
|
||||
assert_eq!(var << (argc + 48) as u128, 75557935783508361347072);
|
||||
assert_eq!(var << (argc + 60) as u128, 309485304969250248077606912);
|
||||
assert_eq!(var << (argc + 62) as u128, 1237941219877000992310427648);
|
||||
assert_eq!(var << (argc + 63) as u128, 2475882439754001984620855296);
|
||||
assert_eq!(var << (argc + 80) as u128, 324518863143436548128224745357312);
|
||||
|
||||
assert_eq!(var2 << argc as u128, 20950745466795983104);
|
||||
assert_eq!(var2 << (argc as u128 - 1), var2);
|
||||
assert_eq!(var2 << (argc + 32) as u128, 89982766606709001335848566784);
|
||||
assert_eq!(var2 << (argc + 48) as u128, 5897110592337281111546171672756224);
|
||||
assert_eq!(var2 << (argc + 60) as u128, 24154564986213503432893119171609493504);
|
||||
assert_eq!(var2 << (argc + 62) as u128, 96618259944854013731572476686437974016);
|
||||
assert_eq!(var2 << (argc + 63) as u128, 193236519889708027463144953372875948032);
|
||||
|
||||
assert_eq!(var3 << argc as u128, 46190672858477591483866044780779667712);
|
||||
assert_eq!(var3 << (argc as u128 - 1), var3);
|
||||
assert_eq!(var3 << (argc + 32) as u128, 21267668304951024224840338247585366016);
|
||||
assert_eq!(var3 << (argc + 48) as u128, 1335125106377253154015353231953100800);
|
||||
assert_eq!(var3 << (argc + 60) as u128, 24154564986213503432893119171609493504);
|
||||
assert_eq!(var3 << (argc + 62) as u128, 96618259944854013731572476686437974016);
|
||||
assert_eq!(var3 << (argc + 63) as u128, 193236519889708027463144953372875948032);
|
||||
|
||||
assert_eq!(var >> (argc as u128 - 1), var);
|
||||
assert_eq!(var >> argc as u128, 67108928);
|
||||
assert_eq!(var >> (argc + 32) as u128, 0);
|
||||
assert_eq!(var >> (argc + 48) as u128, 0);
|
||||
assert_eq!(var >> (argc + 60) as u128, 0);
|
||||
assert_eq!(var >> (argc + 62) as u128, 0);
|
||||
assert_eq!(var >> (argc + 63) as u128, 0);
|
||||
|
||||
assert_eq!(var2 >> argc as u128, 5237686366698995776);
|
||||
assert_eq!(var2 >> (argc as u128 - 1), var2);
|
||||
assert_eq!(var2 >> (argc + 32) as u128, 1219493888);
|
||||
assert_eq!(var2 >> (argc + 48) as u128, 18608);
|
||||
assert_eq!(var2 >> (argc + 60) as u128, 4);
|
||||
assert_eq!(var2 >> (argc + 62) as u128, 1);
|
||||
assert_eq!(var2 >> (argc + 63) as u128, 0);
|
||||
|
||||
assert_eq!(var3 >> (argc as u128 - 1), var3);
|
||||
assert_eq!(var3 >> argc as u128, 96618259944854013736810163053136969792);
|
||||
assert_eq!(var3 >> (argc + 32) as u128, 22495691651677250335181635584);
|
||||
assert_eq!(var3 >> (argc + 48) as u128, 343257013727985387194544);
|
||||
assert_eq!(var3 >> (argc + 60) as u128, 83802981867183932420);
|
||||
assert_eq!(var3 >> (argc + 62) as u128, 20950745466795983105);
|
||||
assert_eq!(var3 >> (argc + 63) as u128, 10475372733397991552);
|
||||
assert_eq!(var3 >> (argc + 80) as u128, 79920751444992);
|
||||
|
||||
assert_eq!(var6 >> argc as u128, 9223372036854775808);
|
||||
assert_eq!((var6 - 1) >> argc as u128, 9223372036854775807);
|
||||
assert_eq!(var7 >> argc as u128, 85070591730234615865843651857942052864);
|
||||
|
||||
// Casts
|
||||
assert_eq!((var >> (argc + 32) as u128) as u64, 0);
|
||||
assert_eq!((var >> argc as u128) as u64, 67108928);
|
||||
|
||||
// Addition.
|
||||
assert_eq!(var + argc as u128, 134217857);
|
||||
|
||||
assert_eq!(var2 + argc as u128, 10475372733397991553);
|
||||
assert_eq!(var2 + (var2 + argc as u128) as u128, 20950745466795983105);
|
||||
|
||||
assert_eq!(var3 + argc as u128, 193236519889708027473620326106273939585);
|
||||
|
||||
// Subtraction
|
||||
assert_eq!(var - argc as u128, 134217855);
|
||||
|
||||
assert_eq!(var2 - argc as u128, 10475372733397991551);
|
||||
|
||||
assert_eq!(var3 - argc as u128, 193236519889708027473620326106273939583);
|
||||
|
||||
// Multiplication
|
||||
assert_eq!(var * (argc + 1) as u128, 268435712);
|
||||
assert_eq!(var * (argc as u128 + var2), 1405982069077538020949770368);
|
||||
|
||||
assert_eq!(var2 * (argc + 1) as u128, 20950745466795983104);
|
||||
assert_eq!(var2 * (argc as u128 + var2), 109733433903618109003204073240861360256);
|
||||
|
||||
assert_eq!(var3 * argc as u128, 193236519889708027473620326106273939584);
|
||||
|
||||
assert_eq!(var4 * (argc + 1) as u128, 246473039779416054947240652212547879168);
|
||||
|
||||
assert_eq!(var5 * (argc + 1) as u128, 306473039779416054947240652212547879168);
|
||||
|
||||
// Division.
|
||||
assert_eq!(var / (argc + 1) as u128, 67108928);
|
||||
assert_eq!(var / (argc + 2) as u128, 44739285);
|
||||
|
||||
assert_eq!(var2 / (argc + 1) as u128, 5237686366698995776);
|
||||
assert_eq!(var2 / (argc + 2) as u128, 3491790911132663850);
|
||||
|
||||
assert_eq!(var3 / (argc + 1) as u128, 96618259944854013736810163053136969792);
|
||||
assert_eq!(var3 / (argc + 2) as u128, 64412173296569342491206775368757979861);
|
||||
assert_eq!(var3 / (argc as u128 + var4), 1);
|
||||
assert_eq!(var3 / (argc as u128 + var2), 18446744073709551615);
|
||||
|
||||
assert_eq!(var4 / (argc + 1) as u128, 61618259944854013736810163053136969792);
|
||||
assert_eq!(var4 / (argc + 2) as u128, 41078839963236009157873442035424646528);
|
||||
|
||||
0
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue