1
Fork 0

Consistently use the most significant bit of vector masks

This improves the codegen for vector `select`, `gather`, `scatter` and
boolean reduction intrinsics and fixes rust-lang/portable-simd#316.

The current behavior of most mask operations during llvm codegen is to
truncate the mask vector to <N x i1>, telling llvm to use the least
significat bit. The exception is the `simd_bitmask` intrinsics, which
already used the most signifiant bit.

Since sse/avx instructions are defined to use the most significant bit,
truncating means that llvm has to insert a left shift to move the bit
into the most significant position, before the mask can actually be
used.

Similarly on aarch64, mask operations like blend work bit by bit,
repeating the least significant bit across the whole lane involves
shifting it into the sign position and then comparing against zero.

By shifting before truncating to <N x i1>, we tell llvm that we only
consider the most significant bit, removing the need for additional
shift instructions in the assembly.
This commit is contained in:
Jörn Horstmann 2023-01-04 23:55:40 +01:00
parent c2270becb6
commit 3779b8e32e
13 changed files with 280 additions and 172 deletions

View file

@ -1182,6 +1182,60 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}};
}
/// Returns the bitwidth of the `$ty` argument if it is an `Int` type.
macro_rules! require_int_ty {
($ty: expr, $diag: expr) => {
match $ty {
ty::Int(i) => i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
_ => {
return_error!($diag);
}
}
};
}
/// Returns the bitwidth of the `$ty` argument if it is an `Int` or `Uint` type.
macro_rules! require_int_or_uint_ty {
($ty: expr, $diag: expr) => {
match $ty {
ty::Int(i) => i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
ty::Uint(i) => {
i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
}
_ => {
return_error!($diag);
}
}
};
}
/// Converts a vector mask, where each element has a bit width equal to the data elements it is used with,
/// down to an i1 based mask that can be used by llvm intrinsics.
///
/// The rust simd semantics are that each element should either consist of all ones or all zeroes,
/// but this information is not available to llvm. Truncating the vector effectively uses the lowest bit,
/// but codegen for several targets is better if we consider the highest bit by shifting.
///
/// For x86 SSE/AVX targets this is beneficial since most instructions with mask parameters only consider the highest bit.
/// So even though on llvm level we have an additional shift, in the final assembly there is no shift or truncate and
/// instead the mask can be used as is.
///
/// For aarch64 and other targets there is a benefit because a mask from the sign bit can be more
/// efficiently converted to an all ones / all zeroes mask by comparing whether each element is negative.
fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
bx: &mut Builder<'a, 'll, 'tcx>,
i_xn: &'ll Value,
in_elem_bitwidth: u64,
in_len: u64,
) -> &'ll Value {
// Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
let shift_idx = bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
let shift_indices = vec![shift_idx; in_len as _];
let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
// Truncate vector to an <i1 x N>
bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
}
let tcx = bx.tcx();
let sig = tcx.normalize_erasing_late_bound_regions(bx.typing_env(), callee_ty.fn_sig(tcx));
let arg_tys = sig.inputs();
@ -1433,14 +1487,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
m_len,
v_len
});
match m_elem_ty.kind() {
ty::Int(_) => {}
_ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }),
}
// truncate the mask to a vector of i1s
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, m_len as u64);
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
let in_elem_bitwidth =
require_int_ty!(m_elem_ty.kind(), InvalidMonomorphization::MaskType {
span,
name,
ty: m_elem_ty
});
let m_i1s = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len);
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
}
@ -1457,33 +1510,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let expected_bytes = in_len.div_ceil(8);
// Integer vector <i{in_bitwidth} x in_len>:
let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
ty::Int(i) => (
args[0].immediate(),
i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
),
ty::Uint(i) => (
args[0].immediate(),
i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
),
_ => return_error!(InvalidMonomorphization::VectorArgument {
let in_elem_bitwidth =
require_int_or_uint_ty!(in_elem.kind(), InvalidMonomorphization::VectorArgument {
span,
name,
in_ty,
in_elem
}),
};
});
// LLVM doesn't always know the inputs are `0` or `!0`, so we shift here so it optimizes to
// `pmovmskb` and similar on x86.
let shift_indices =
vec![
bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
in_len as _
];
let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
// Truncate vector to an <i1 x N>
let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
let i1xn = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, in_len);
// Bitcast <i1 x N> to iN:
let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
@ -1704,28 +1739,21 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
);
match element_ty2.kind() {
ty::Int(_) => (),
_ => {
return_error!(InvalidMonomorphization::ThirdArgElementType {
span,
name,
expected_element: element_ty2,
third_arg: arg_tys[2]
});
}
}
let mask_elem_bitwidth =
require_int_ty!(element_ty2.kind(), InvalidMonomorphization::ThirdArgElementType {
span,
name,
expected_element: element_ty2,
third_arg: arg_tys[2]
});
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, in_len);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
let mask_ty = bx.type_vector(bx.type_i1(), in_len);
// Type of the vector of pointers:
let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
@ -1810,27 +1838,21 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
);
require!(
matches!(mask_elem.kind(), ty::Int(_)),
InvalidMonomorphization::ThirdArgElementType {
let m_elem_bitwidth =
require_int_ty!(mask_elem.kind(), InvalidMonomorphization::ThirdArgElementType {
span,
name,
expected_element: values_elem,
third_arg: mask_ty,
}
);
});
let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
let mask_ty = bx.type_vector(bx.type_i1(), mask_len);
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, mask_len);
(bx.trunc(args[0].immediate(), i1xn), i1xn)
};
let llvm_pointer = bx.type_ptr();
// Type of the vector of elements:
@ -1901,27 +1923,21 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
);
require!(
matches!(mask_elem.kind(), ty::Int(_)),
InvalidMonomorphization::ThirdArgElementType {
let m_elem_bitwidth =
require_int_ty!(mask_elem.kind(), InvalidMonomorphization::ThirdArgElementType {
span,
name,
expected_element: values_elem,
third_arg: mask_ty,
}
);
});
let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
let mask_ty = bx.type_vector(bx.type_i1(), mask_len);
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, in_len);
(bx.trunc(args[0].immediate(), i1xn), i1xn)
};
let ret_t = bx.type_void();
let llvm_pointer = bx.type_ptr();
@ -1995,28 +2011,21 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
);
// The element type of the third argument must be a signed integer type of any width:
match element_ty2.kind() {
ty::Int(_) => (),
_ => {
return_error!(InvalidMonomorphization::ThirdArgElementType {
span,
name,
expected_element: element_ty2,
third_arg: arg_tys[2]
});
}
}
let mask_elem_bitwidth =
require_int_ty!(element_ty2.kind(), InvalidMonomorphization::ThirdArgElementType {
span,
name,
expected_element: element_ty2,
third_arg: arg_tys[2]
});
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, in_len);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
let mask_ty = bx.type_vector(bx.type_i1(), in_len);
let ret_t = bx.type_void();
@ -2164,8 +2173,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
});
args[0].immediate()
} else {
match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {}
let bitwidth = match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
}
_ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
@ -2174,12 +2188,9 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
in_elem,
ret_ty
}),
}
};
// boolean reductions operate on vectors of i1s:
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, in_len as u64);
bx.trunc(args[0].immediate(), i1xn)
vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth, in_len as _)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {

View file

@ -35,8 +35,8 @@ pub unsafe extern "C" fn gather_f64x4(mask: m64x4, ptrs: pf64x4) -> f64x4 {
// FIXME: This should also get checked to generate a gather instruction for avx2.
// Currently llvm scalarizes this code, see https://github.com/llvm/llvm-project/issues/59789
//
// x86-avx512: vpsllq ymm0, ymm0, 63
// x86-avx512-NEXT: vpmovq2m k1, ymm0
// x86-avx512-NOT: vpsllq
// x86-avx512: vpmovq2m k1, ymm0
// x86-avx512-NEXT: vpxor xmm0, xmm0, xmm0
// x86-avx512-NEXT: vgatherqpd ymm0 {k1}, {{(ymmword)|(qword)}} ptr [1*ymm1]
simd_gather(f64x4([0_f64, 0_f64, 0_f64, 0_f64]), ptrs, mask)

View file

@ -47,9 +47,9 @@ extern "rust-intrinsic" {
pub unsafe extern "C" fn load_i8x16(mask: m8x16, pointer: *const i8) -> i8x16 {
// Since avx2 supports no masked loads for bytes, the code tests each individual bit
// and jumps to code that inserts individual bytes.
// x86-avx2: vpsllw xmm0, xmm0, 7
// x86-avx2-NEXT: vpmovmskb eax, xmm0
// x86-avx2-NEXT: vpxor xmm0, xmm0
// x86-avx2-NOT: vpsllw
// x86-avx2-DAG: vpmovmskb eax
// x86-avx2-DAG: vpxor
// x86-avx2-NEXT: test al, 1
// x86-avx2-NEXT: jne
// x86-avx2-NEXT: test al, 2
@ -58,8 +58,8 @@ pub unsafe extern "C" fn load_i8x16(mask: m8x16, pointer: *const i8) -> i8x16 {
// x86-avx2-NEXT: vmovd xmm0, [[REG]]
// x86-avx2-DAG: vpinsrb xmm0, xmm0, byte ptr [rdi + 1], 1
//
// x86-avx512: vpsllw xmm0, xmm0, 7
// x86-avx512-NEXT: vpmovb2m k1, xmm0
// x86-avx512-NOT: vpsllw
// x86-avx512: vpmovb2m k1, xmm0
// x86-avx512-NEXT: vmovdqu8 xmm0 {k1} {z}, xmmword ptr [rdi]
simd_masked_load(mask, pointer, i8x16([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
}
@ -67,11 +67,11 @@ pub unsafe extern "C" fn load_i8x16(mask: m8x16, pointer: *const i8) -> i8x16 {
// CHECK-LABEL: load_f32x8
#[no_mangle]
pub unsafe extern "C" fn load_f32x8(mask: m32x8, pointer: *const f32) -> f32x8 {
// x86-avx2: vpslld ymm0, ymm0, 31
// x86-avx2-NEXT: vmaskmovps ymm0, ymm0, ymmword ptr [rdi]
// x86-avx2-NOT: vpslld
// x86-avx2: vmaskmovps ymm0, ymm0, ymmword ptr [rdi]
//
// x86-avx512: vpslld ymm0, ymm0, 31
// x86-avx512-NEXT: vpmovd2m k1, ymm0
// x86-avx512-NOT: vpslld
// x86-avx512: vpmovd2m k1, ymm0
// x86-avx512-NEXT: vmovups ymm0 {k1} {z}, ymmword ptr [rdi]
simd_masked_load(mask, pointer, f32x8([0_f32, 0_f32, 0_f32, 0_f32, 0_f32, 0_f32, 0_f32, 0_f32]))
}
@ -79,11 +79,10 @@ pub unsafe extern "C" fn load_f32x8(mask: m32x8, pointer: *const f32) -> f32x8 {
// CHECK-LABEL: load_f64x4
#[no_mangle]
pub unsafe extern "C" fn load_f64x4(mask: m64x4, pointer: *const f64) -> f64x4 {
// x86-avx2: vpsllq ymm0, ymm0, 63
// x86-avx2-NEXT: vmaskmovpd ymm0, ymm0, ymmword ptr [rdi]
// x86-avx2-NOT: vpsllq
// x86-avx2: vmaskmovpd ymm0, ymm0, ymmword ptr [rdi]
//
// x86-avx512: vpsllq ymm0, ymm0, 63
// x86-avx512-NEXT: vpmovq2m k1, ymm0
// x86-avx512-NEXT: vmovupd ymm0 {k1} {z}, ymmword ptr [rdi]
// x86-avx512-NOT: vpsllq
// x86-avx512: vpmovq2m k1, ymm0
simd_masked_load(mask, pointer, f64x4([0_f64, 0_f64, 0_f64, 0_f64]))
}

View file

@ -31,29 +31,30 @@ extern "rust-intrinsic" {
// CHECK-LABEL: mask_reduce_all:
#[no_mangle]
pub unsafe extern "C" fn mask_reduce_all(m: mask8x16) -> bool {
// x86: psllw xmm0, 7
// x86-NEXT: pmovmskb eax, xmm0
// x86-NEXT: {{cmp ax, -1|xor eax, 65535}}
// x86-NOT: psllw
// x86: pmovmskb eax, xmm0
// x86-NEXT: {{cmp ax, -1|cmp eax, 65535|xor eax, 65535}}
// x86-NEXT: sete al
//
// aarch64: shl v0.16b, v0.16b, #7
// aarch64-NEXT: cmlt v0.16b, v0.16b, #0
// aarch64-NEXT: uminv b0, v0.16b
// aarch64-NEXT: fmov [[REG:[a-z0-9]+]], s0
// aarch64-NEXT: and w0, [[REG]], #0x1
// aarch64-NOT: shl
// aarch64: cmge v0.16b, v0.16b, #0
// aarch64-DAG: mov [[REG1:[a-z0-9]+]], #1
// aarch64-DAG: umaxv b0, v0.16b
// aarch64-NEXT: fmov [[REG2:[a-z0-9]+]], s0
// aarch64-NEXT: bic w0, [[REG1]], [[REG2]]
simd_reduce_all(m)
}
// CHECK-LABEL: mask_reduce_any:
#[no_mangle]
pub unsafe extern "C" fn mask_reduce_any(m: mask8x16) -> bool {
// x86: psllw xmm0, 7
// x86-NEXT: pmovmskb
// x86-NOT: psllw
// x86: pmovmskb
// x86-NEXT: test eax, eax
// x86-NEXT: setne al
//
// aarch64: shl v0.16b, v0.16b, #7
// aarch64-NEXT: cmlt v0.16b, v0.16b, #0
// aarch64-NOT: shl
// aarch64: cmlt v0.16b, v0.16b, #0
// aarch64-NEXT: umaxv b0, v0.16b
// aarch64-NEXT: fmov [[REG:[a-z0-9]+]], s0
// aarch64-NEXT: and w0, [[REG]], #0x1

View file

@ -47,8 +47,8 @@ extern "rust-intrinsic" {
pub unsafe extern "C" fn store_i8x16(mask: m8x16, pointer: *mut i8, value: i8x16) {
// Since avx2 supports no masked stores for bytes, the code tests each individual bit
// and jumps to code that extracts individual bytes to memory.
// x86-avx2: vpsllw xmm0, xmm0, 7
// x86-avx2-NEXT: vpmovmskb eax, xmm0
// x86-avx2-NOT: vpsllw
// x86-avx2: vpmovmskb eax, xmm0
// x86-avx2-NEXT: test al, 1
// x86-avx2-NEXT: jne
// x86-avx2-NEXT: test al, 2
@ -56,8 +56,8 @@ pub unsafe extern "C" fn store_i8x16(mask: m8x16, pointer: *mut i8, value: i8x16
// x86-avx2-DAG: vpextrb byte ptr [rdi + 1], xmm1, 1
// x86-avx2-DAG: vpextrb byte ptr [rdi], xmm1, 0
//
// x86-avx512: vpsllw xmm0, xmm0, 7
// x86-avx512-NEXT: vpmovb2m k1, xmm0
// x86-avx512-NOT: vpsllw
// x86-avx512: vpmovb2m k1, xmm0
// x86-avx512-NEXT: vmovdqu8 xmmword ptr [rdi] {k1}, xmm1
simd_masked_store(mask, pointer, value)
}
@ -65,11 +65,11 @@ pub unsafe extern "C" fn store_i8x16(mask: m8x16, pointer: *mut i8, value: i8x16
// CHECK-LABEL: store_f32x8
#[no_mangle]
pub unsafe extern "C" fn store_f32x8(mask: m32x8, pointer: *mut f32, value: f32x8) {
// x86-avx2: vpslld ymm0, ymm0, 31
// x86-avx2-NEXT: vmaskmovps ymmword ptr [rdi], ymm0, ymm1
// x86-avx2-NOT: vpslld
// x86-avx2: vmaskmovps ymmword ptr [rdi], ymm0, ymm1
//
// x86-avx512: vpslld ymm0, ymm0, 31
// x86-avx512-NEXT: vpmovd2m k1, ymm0
// x86-avx512-NOT: vpslld
// x86-avx512: vpmovd2m k1, ymm0
// x86-avx512-NEXT: vmovups ymmword ptr [rdi] {k1}, ymm1
simd_masked_store(mask, pointer, value)
}
@ -77,11 +77,11 @@ pub unsafe extern "C" fn store_f32x8(mask: m32x8, pointer: *mut f32, value: f32x
// CHECK-LABEL: store_f64x4
#[no_mangle]
pub unsafe extern "C" fn store_f64x4(mask: m64x4, pointer: *mut f64, value: f64x4) {
// x86-avx2: vpsllq ymm0, ymm0, 63
// x86-avx2-NEXT: vmaskmovpd ymmword ptr [rdi], ymm0, ymm1
// x86-avx2-NOT: vpsllq
// x86-avx2: vmaskmovpd ymmword ptr [rdi], ymm0, ymm1
//
// x86-avx512: vpsllq ymm0, ymm0, 63
// x86-avx512-NEXT: vpmovq2m k1, ymm0
// x86-avx512-NOT: vpsllq
// x86-avx512: vpmovq2m k1, ymm0
// x86-avx512-NEXT: vmovupd ymmword ptr [rdi] {k1}, ymm1
simd_masked_store(mask, pointer, value)
}

View file

@ -32,8 +32,8 @@ extern "rust-intrinsic" {
// CHECK-LABEL: scatter_f64x4
#[no_mangle]
pub unsafe extern "C" fn scatter_f64x4(values: f64x4, ptrs: pf64x4, mask: m64x4) {
// x86-avx512: vpsllq ymm2, ymm2, 63
// x86-avx512-NEXT: vpmovq2m k1, ymm2
// x86-avx512-NOT: vpsllq
// x86-avx512: vpmovq2m k1, ymm2
// x86-avx512-NEXT: vscatterqpd {{(ymmword)|(qword)}} ptr [1*ymm1] {k1}, ymm0
simd_scatter(values, ptrs, mask)
}

View file

@ -58,15 +58,15 @@ extern "rust-intrinsic" {
// CHECK-LABEL: select_i8x16
#[no_mangle]
pub unsafe extern "C" fn select_i8x16(mask: m8x16, a: i8x16, b: i8x16) -> i8x16 {
// x86-avx2: vpsllw xmm0, xmm0, 7
// x86-avx2-NEXT: vpblendvb xmm0, xmm2, xmm1, xmm0
// x86-avx2-NOT: vpsllw
// x86-avx2: vpblendvb xmm0, xmm2, xmm1, xmm0
//
// x86-avx512: vpsllw xmm0, xmm0, 7
// x86-avx512-NEXT: vpmovb2m k1, xmm0
// x86-avx512-NOT: vpsllw
// x86-avx512: vpmovb2m k1, xmm0
// x86-avx512-NEXT: vpblendmb xmm0 {k1}, xmm2, xmm1
//
// aarch64: shl v0.16b, v0.16b, #7
// aarch64-NEXT: cmlt v0.16b, v0.16b, #0
// aarch64-NOT: shl
// aarch64: cmlt v0.16b, v0.16b, #0
// aarch64-NEXT: bsl v0.16b, v1.16b, v2.16b
simd_select(mask, a, b)
}
@ -74,15 +74,15 @@ pub unsafe extern "C" fn select_i8x16(mask: m8x16, a: i8x16, b: i8x16) -> i8x16
// CHECK-LABEL: select_f32x4
#[no_mangle]
pub unsafe extern "C" fn select_f32x4(mask: m32x4, a: f32x4, b: f32x4) -> f32x4 {
// x86-avx2: vpslld xmm0, xmm0, 31
// x86-avx2-NEXT: vblendvps xmm0, xmm2, xmm1, xmm0
// x86-avx2-NOT: vpslld
// x86-avx2: vblendvps xmm0, xmm2, xmm1, xmm0
//
// x86-avx512: vpslld xmm0, xmm0, 31
// x86-avx512-NEXT: vpmovd2m k1, xmm0
// x86-avx512-NOT: vpslld
// x86-avx512: vpmovd2m k1, xmm0
// x86-avx512-NEXT: vblendmps xmm0 {k1}, xmm2, xmm1
//
// aarch64: shl v0.4s, v0.4s, #31
// aarch64-NEXT: cmlt v0.4s, v0.4s, #0
// aarch64-NOT: shl
// aarch64: cmlt v0.4s, v0.4s, #0
// aarch64-NEXT: bsl v0.16b, v1.16b, v2.16b
simd_select(mask, a, b)
}
@ -90,15 +90,15 @@ pub unsafe extern "C" fn select_f32x4(mask: m32x4, a: f32x4, b: f32x4) -> f32x4
// CHECK-LABEL: select_f64x2
#[no_mangle]
pub unsafe extern "C" fn select_f64x2(mask: m64x2, a: f64x2, b: f64x2) -> f64x2 {
// x86-avx2: vpsllq xmm0, xmm0, 63
// x86-avx2-NEXT: vblendvpd xmm0, xmm2, xmm1, xmm0
// x86-avx2-NOT: vpsllq
// x86-avx2: vblendvpd xmm0, xmm2, xmm1, xmm0
//
// x86-avx512: vpsllq xmm0, xmm0, 63
// x86-avx512-NEXT: vpmovq2m k1, xmm0
// x86-avx512-NOT: vpsllq
// x86-avx512: vpmovq2m k1, xmm0
// x86-avx512-NEXT: vblendmpd xmm0 {k1}, xmm2, xmm1
//
// aarch64: shl v0.2d, v0.2d, #63
// aarch64-NEXT: cmlt v0.2d, v0.2d, #0
// aarch64-NOT: shl
// aarch64: cmlt v0.2d, v0.2d, #0
// aarch64-NEXT: bsl v0.16b, v1.16b, v2.16b
simd_select(mask, a, b)
}
@ -108,11 +108,11 @@ pub unsafe extern "C" fn select_f64x2(mask: m64x2, a: f64x2, b: f64x2) -> f64x2
pub unsafe extern "C" fn select_f64x4(mask: m64x4, a: f64x4, b: f64x4) -> f64x4 {
// The parameter is a 256 bit vector which in the C abi is only valid for avx targets.
//
// x86-avx2: vpsllq ymm0, ymm0, 63
// x86-avx2-NEXT: vblendvpd ymm0, ymm2, ymm1, ymm0
// x86-avx2-NOT: vpsllq
// x86-avx2: vblendvpd ymm0, ymm2, ymm1, ymm0
//
// x86-avx512: vpsllq ymm0, ymm0, 63
// x86-avx512-NEXT: vpmovq2m k1, ymm0
// x86-avx512-NOT: vpsllq
// x86-avx512: vpmovq2m k1, ymm0
// x86-avx512-NEXT: vblendmpd ymm0 {k1}, ymm2, ymm1
simd_select(mask, a, b)
}
@ -122,8 +122,8 @@ pub unsafe extern "C" fn select_f64x4(mask: m64x4, a: f64x4, b: f64x4) -> f64x4
pub unsafe extern "C" fn select_f64x8(mask: m64x8, a: f64x8, b: f64x8) -> f64x8 {
// The parameter is a 256 bit vector which in the C abi is only valid for avx512 targets.
//
// x86-avx512: vpsllq zmm0, zmm0, 63
// x86-avx512-NEXT: vpmovq2m k1, zmm0
// x86-avx512-NOT: vpsllq
// x86-avx512: vpmovq2m k1, zmm0
// x86-avx512-NEXT: vblendmpd zmm0 {k1}, zmm2, zmm1
simd_select(mask, a, b)
}

View file

@ -23,7 +23,9 @@ extern "rust-intrinsic" {
#[no_mangle]
pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2<i32>,
values: Vec2<f32>) -> Vec2<f32> {
// CHECK: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float> {{.*}})
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, <i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
// CHECK: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]], <2 x float> {{.*}})
simd_gather(values, pointers, mask)
}
@ -31,6 +33,8 @@ pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2<i32>,
#[no_mangle]
pub unsafe fn gather_pf32x2(pointers: Vec2<*const *const f32>, mask: Vec2<i32>,
values: Vec2<*const f32>) -> Vec2<*const f32> {
// CHECK: call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x ptr> {{.*}})
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, <i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
// CHECK: call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]], <2 x ptr> {{.*}})
simd_gather(values, pointers, mask)
}

View file

@ -21,7 +21,9 @@ extern "rust-intrinsic" {
#[no_mangle]
pub unsafe fn load_f32x2(mask: Vec2<i32>, pointer: *const f32,
values: Vec2<f32>) -> Vec2<f32> {
// CHECK: call <2 x float> @llvm.masked.load.v2f32.p0(ptr {{.*}}, i32 4, <2 x i1> {{.*}}, <2 x float> {{.*}})
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, <i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
// CHECK: call <2 x float> @llvm.masked.load.v2f32.p0(ptr {{.*}}, i32 4, <2 x i1> [[B]], <2 x float> {{.*}})
simd_masked_load(mask, pointer, values)
}
@ -29,6 +31,8 @@ pub unsafe fn load_f32x2(mask: Vec2<i32>, pointer: *const f32,
#[no_mangle]
pub unsafe fn load_pf32x4(mask: Vec4<i32>, pointer: *const *const f32,
values: Vec4<*const f32>) -> Vec4<*const f32> {
// CHECK: call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr {{.*}}, i32 {{.*}}, <4 x i1> {{.*}}, <4 x ptr> {{.*}})
// CHECK: [[A:%[0-9]+]] = lshr <4 x i32> {{.*}}, <i32 31, i32 31, i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <4 x i32> [[A]] to <4 x i1>
// CHECK: call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr {{.*}}, i32 {{.*}}, <4 x i1> [[B]], <4 x ptr> {{.*}})
simd_masked_load(mask, pointer, values)
}

View file

@ -20,13 +20,17 @@ extern "rust-intrinsic" {
// CHECK-LABEL: @store_f32x2
#[no_mangle]
pub unsafe fn store_f32x2(mask: Vec2<i32>, pointer: *mut f32, values: Vec2<f32>) {
// CHECK: call void @llvm.masked.store.v2f32.p0(<2 x float> {{.*}}, ptr {{.*}}, i32 4, <2 x i1> {{.*}})
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, <i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
// CHECK: call void @llvm.masked.store.v2f32.p0(<2 x float> {{.*}}, ptr {{.*}}, i32 4, <2 x i1> [[B]])
simd_masked_store(mask, pointer, values)
}
// CHECK-LABEL: @store_pf32x4
#[no_mangle]
pub unsafe fn store_pf32x4(mask: Vec4<i32>, pointer: *mut *const f32, values: Vec4<*const f32>) {
// CHECK: call void @llvm.masked.store.v4p0.p0(<4 x ptr> {{.*}}, ptr {{.*}}, i32 {{.*}}, <4 x i1> {{.*}})
// CHECK: [[A:%[0-9]+]] = lshr <4 x i32> {{.*}}, <i32 31, i32 31, i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <4 x i32> [[A]] to <4 x i1>
// CHECK: call void @llvm.masked.store.v4p0.p0(<4 x ptr> {{.*}}, ptr {{.*}}, i32 {{.*}}, <4 x i1> [[B]])
simd_masked_store(mask, pointer, values)
}

View file

@ -23,7 +23,9 @@ extern "rust-intrinsic" {
#[no_mangle]
pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2<i32>,
values: Vec2<f32>) {
// CHECK: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}})
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, <i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
// CHECK: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]]
simd_scatter(values, pointers, mask)
}
@ -32,6 +34,8 @@ pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2<i32>,
#[no_mangle]
pub unsafe fn scatter_pf32x2(pointers: Vec2<*mut *const f32>, mask: Vec2<i32>,
values: Vec2<*const f32>) {
// CHECK: call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}})
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, <i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
// CHECK: call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]]
simd_scatter(values, pointers, mask)
}

View file

@ -3,7 +3,7 @@
#![crate_type = "lib"]
#![feature(repr_simd, intrinsics)]
#[allow(non_camel_case_types)]
#![allow(non_camel_case_types)]
#[repr(simd)]
#[derive(Copy, Clone, PartialEq, Debug)]
@ -17,21 +17,37 @@ pub struct f32x8([f32; 8]);
#[derive(Copy, Clone, PartialEq, Debug)]
pub struct b8x4(pub [i8; 4]);
#[repr(simd)]
#[derive(Copy, Clone, PartialEq, Debug)]
pub struct i32x4([i32; 4]);
extern "rust-intrinsic" {
fn simd_select<T, U>(x: T, a: U, b: U) -> U;
fn simd_select_bitmask<T, U>(x: T, a: U, b: U) -> U;
}
// CHECK-LABEL: @select
// CHECK-LABEL: @select_m8
#[no_mangle]
pub unsafe fn select(m: b8x4, a: f32x4, b: f32x4) -> f32x4 {
// CHECK: select <4 x i1>
pub unsafe fn select_m8(m: b8x4, a: f32x4, b: f32x4) -> f32x4 {
// CHECK: [[A:%[0-9]+]] = lshr <4 x i8> %{{.*}}, <i8 7, i8 7, i8 7, i8 7>
// CHECK: [[B:%[0-9]+]] = trunc <4 x i8> [[A]] to <4 x i1>
// CHECK: select <4 x i1> [[B]]
simd_select(m, a, b)
}
// CHECK-LABEL: @select_m32
#[no_mangle]
pub unsafe fn select_m32(m: i32x4, a: f32x4, b: f32x4) -> f32x4 {
// CHECK: [[A:%[0-9]+]] = lshr <4 x i32> %{{.*}}, <i32 31, i32 31, i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <4 x i32> [[A]] to <4 x i1>
// CHECK: select <4 x i1> [[B]]
simd_select(m, a, b)
}
// CHECK-LABEL: @select_bitmask
#[no_mangle]
pub unsafe fn select_bitmask(m: i8, a: f32x8, b: f32x8) -> f32x8 {
// CHECK: select <8 x i1>
// CHECK: [[A:%[0-9]+]] = bitcast i8 {{.*}} to <8 x i1>
// CHECK: select <8 x i1> [[A]]
simd_select_bitmask(m, a, b)
}

View file

@ -0,0 +1,65 @@
//@ compile-flags: -C no-prepopulate-passes
//
#![crate_type = "lib"]
#![feature(repr_simd, intrinsics)]
#![allow(non_camel_case_types)]
#[repr(simd)]
#[derive(Copy, Clone)]
pub struct mask32x2([i32; 2]);
#[repr(simd)]
#[derive(Copy, Clone)]
pub struct mask8x16([i8; 16]);
extern "rust-intrinsic" {
fn simd_reduce_all<T>(x: T) -> bool;
fn simd_reduce_any<T>(x: T) -> bool;
}
// NOTE(eddyb) `%{{x|1}}` is used because on some targets (e.g. WASM)
// SIMD vectors are passed directly, resulting in `%x` being a vector,
// while on others they're passed indirectly, resulting in `%x` being
// a pointer to a vector, and `%1` a vector loaded from that pointer.
// This is controlled by the target spec option `simd_types_indirect`.
// CHECK-LABEL: @reduce_any_32x2
#[no_mangle]
pub unsafe fn reduce_any_32x2(x: mask32x2) -> bool {
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, <i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
// CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.or.v2i1(<2 x i1> [[B]])
// CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8
simd_reduce_any(x)
}
// CHECK-LABEL: @reduce_all_32x2
#[no_mangle]
pub unsafe fn reduce_all_32x2(x: mask32x2) -> bool {
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, <i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
// CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.and.v2i1(<2 x i1> [[B]])
// CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8
simd_reduce_all(x)
}
// CHECK-LABEL: @reduce_any_8x16
#[no_mangle]
pub unsafe fn reduce_any_8x16(x: mask8x16) -> bool {
// CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{x|1}}, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
// CHECK: [[B:%[0-9]+]] = trunc <16 x i8> [[A]] to <16 x i1>
// CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[B]])
// CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8
simd_reduce_any(x)
}
// CHECK-LABEL: @reduce_all_8x16
#[no_mangle]
pub unsafe fn reduce_all_8x16(x: mask8x16) -> bool {
// CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{x|1}}, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
// CHECK: [[B:%[0-9]+]] = trunc <16 x i8> [[A]] to <16 x i1>
// CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.and.v16i1(<16 x i1> [[B]])
// CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8
simd_reduce_all(x)
}