1
Fork 0

Auto merge of #3275 - rust-lang:rustup-2024-01-24, r=RalfJung

Automatic Rustup
This commit is contained in:
bors 2024-01-24 08:12:24 +00:00
commit cf22ee0c0c
220 changed files with 3717 additions and 1233 deletions

View file

@ -129,7 +129,7 @@ Clement Miao <clementmiao@gmail.com>
Clément Renault <renault.cle@gmail.com>
Cliff Dyer <jcd@sdf.org>
Clinton Ryan <clint.ryan3@gmail.com>
Corey Richardson <corey@octayn.net> Elaine "See More" Nemo <corey@octayn.net>
ember arlynx <ember@lunar.town> <corey@octayn.net>
Crazycolorz5 <Crazycolorz5@gmail.com>
csmoe <35686186+csmoe@users.noreply.github.com>
Cyryl Płotnicki <cyplo@cyplo.net>

View file

@ -878,12 +878,12 @@ dependencies = [
[[package]]
name = "ctrlc"
version = "3.4.0"
version = "3.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e"
checksum = "b467862cc8610ca6fc9a1532d7777cee0804e678ab45410897b9396495994a0b"
dependencies = [
"nix",
"windows-sys 0.48.0",
"windows-sys 0.52.0",
]
[[package]]
@ -2512,14 +2512,13 @@ checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54"
[[package]]
name = "nix"
version = "0.26.2"
version = "0.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a"
checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
dependencies = [
"bitflags 1.3.2",
"bitflags 2.4.1",
"cfg-if",
"libc",
"static_assertions",
]
[[package]]
@ -2641,11 +2640,11 @@ dependencies = [
[[package]]
name = "openssl"
version = "0.10.55"
version = "0.10.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d"
checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8"
dependencies = [
"bitflags 1.3.2",
"bitflags 2.4.1",
"cfg-if",
"foreign-types",
"libc",
@ -2673,9 +2672,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "openssl-sys"
version = "0.9.90"
version = "0.9.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6"
checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae"
dependencies = [
"cc",
"libc",
@ -6143,6 +6142,15 @@ dependencies = [
"windows-targets 0.48.1",
]
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets 0.52.0",
]
[[package]]
name = "windows-targets"
version = "0.42.2"
@ -6173,6 +6181,21 @@ dependencies = [
"windows_x86_64_msvc 0.48.0",
]
[[package]]
name = "windows-targets"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
dependencies = [
"windows_aarch64_gnullvm 0.52.0",
"windows_aarch64_msvc 0.52.0",
"windows_i686_gnu 0.52.0",
"windows_i686_msvc 0.52.0",
"windows_x86_64_gnu 0.52.0",
"windows_x86_64_gnullvm 0.52.0",
"windows_x86_64_msvc 0.52.0",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
@ -6185,6 +6208,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
@ -6197,6 +6226,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
@ -6209,6 +6244,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
[[package]]
name = "windows_i686_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
@ -6221,6 +6262,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
[[package]]
name = "windows_i686_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
@ -6233,6 +6280,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
@ -6245,6 +6298,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
@ -6257,6 +6316,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
[[package]]
name = "winnow"
version = "0.4.7"

View file

@ -1,6 +1,6 @@
use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
use rustc_data_structures::graph::WithSuccessors;
use rustc_index::bit_set::HybridBitSet;
use rustc_index::bit_set::BitSet;
use rustc_index::interval::IntervalSet;
use rustc_infer::infer::canonical::QueryRegionConstraints;
use rustc_infer::infer::outlives::for_liveness;
@ -135,7 +135,7 @@ struct LivenessResults<'me, 'typeck, 'flow, 'tcx> {
cx: LivenessContext<'me, 'typeck, 'flow, 'tcx>,
/// Set of points that define the current local.
defs: HybridBitSet<PointIndex>,
defs: BitSet<PointIndex>,
/// Points where the current variable is "use live" -- meaning
/// that there is a future "full use" that may use its value.
@ -158,7 +158,7 @@ impl<'me, 'typeck, 'flow, 'tcx> LivenessResults<'me, 'typeck, 'flow, 'tcx> {
let num_points = cx.elements.num_points();
LivenessResults {
cx,
defs: HybridBitSet::new_empty(num_points),
defs: BitSet::new_empty(num_points),
use_live_at: IntervalSet::new(num_points),
drop_live_at: IntervalSet::new(num_points),
drop_locations: vec![],

View file

@ -682,7 +682,6 @@ fn codegen_stmt<'tcx>(
args,
ty::ClosureKind::FnOnce,
)
.expect("failed to normalize and resolve closure during codegen")
.polymorphize(fx.tcx);
let func_ref = fx.get_function_ref(instance);
let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);

View file

@ -435,7 +435,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args,
ty::ClosureKind::FnOnce,
)
.expect("failed to normalize and resolve closure during codegen")
.polymorphize(bx.cx().tcx());
OperandValue::Immediate(bx.cx().get_fn_addr(instance))
}

View file

@ -46,8 +46,8 @@ const_eval_dangling_int_pointer =
{$bad_pointer_message}: {$pointer} is a dangling pointer (it has no provenance)
const_eval_dangling_null_pointer =
{$bad_pointer_message}: null pointer is a dangling pointer (it has no provenance)
const_eval_dangling_ptr_in_final = encountered dangling pointer in final constant
const_eval_dangling_ptr_in_final = encountered dangling pointer in final value of {const_eval_intern_kind}
const_eval_dead_local =
accessing a dead local variable
const_eval_dealloc_immutable =
@ -134,6 +134,14 @@ const_eval_interior_mutable_data_refer =
This would make multiple uses of a constant to be able to see different values and allow circumventing
the `Send` and `Sync` requirements for shared mutable data, which is unsound.
const_eval_intern_kind = {$kind ->
[static] static
[static_mut] mutable static
[const] constant
[promoted] promoted
*[other] {""}
}
const_eval_invalid_align =
align has to be a power of 2
@ -205,6 +213,8 @@ const_eval_modified_global =
const_eval_mut_deref =
mutation through a reference is not allowed in {const_eval_const_context}s
const_eval_mutable_ptr_in_final = encountered mutable pointer in final value of {const_eval_intern_kind}
const_eval_non_const_fmt_macro_call =
cannot call non-const formatting macro in {const_eval_const_context}s
@ -327,7 +337,7 @@ const_eval_too_many_caller_args =
const_eval_transient_mut_borrow = mutable references are not allowed in {const_eval_const_context}s
const_eval_transient_mut_borrow_raw = raw mutable references are not allowed in {const_eval_const_context}s
const_eval_transient_mut_raw = raw mutable pointers are not allowed in {const_eval_const_context}s
const_eval_try_block_from_output_non_const =
`try` block cannot convert `{$ty}` to the result in {const_eval_const_context}s
@ -341,21 +351,21 @@ const_eval_unallowed_heap_allocations =
const_eval_unallowed_inline_asm =
inline assembly is not allowed in {const_eval_const_context}s
const_eval_unallowed_mutable_refs =
mutable references are not allowed in the final value of {const_eval_const_context}s
const_eval_unallowed_mutable_raw =
raw mutable pointers are not allowed in the final value of {const_eval_const_context}s
.teach_note =
References in statics and constants may only refer to immutable values.
Statics are shared everywhere, and if they refer to mutable data one might violate memory
safety since holding multiple mutable references to shared data is not allowed.
If you really want global mutable state, try using static mut or a global UnsafeCell.
const_eval_unallowed_mutable_refs_raw =
raw mutable references are not allowed in the final value of {const_eval_const_context}s
const_eval_unallowed_mutable_refs =
mutable references are not allowed in the final value of {const_eval_const_context}s
.teach_note =
References in statics and constants may only refer to immutable values.
Statics are shared everywhere, and if they refer to mutable data one might violate memory
safety since holding multiple mutable references to shared data is not allowed.
@ -392,9 +402,6 @@ const_eval_unstable_in_stable =
.unstable_sugg = if it is not part of the public API, make this function unstably const
.bypass_sugg = otherwise `#[rustc_allow_const_fn_unstable]` can be used to bypass stability checks
const_eval_unsupported_untyped_pointer = unsupported untyped pointer in constant
.note = memory only reachable via raw pointers is not supported
const_eval_unterminated_c_string =
reading a null-terminated string starting at {$pointer} with no null found before end of allocation
@ -406,7 +413,6 @@ const_eval_upcast_mismatch =
## The `front_matter`s here refer to either `const_eval_front_matter_invalid_value` or `const_eval_front_matter_invalid_value_with_path`.
## (We'd love to sort this differently to make that more clear but tidy won't let us...)
const_eval_validation_box_to_mut = {$front_matter}: encountered a box pointing to mutable memory in a constant
const_eval_validation_box_to_static = {$front_matter}: encountered a box pointing to a static variable in a constant
const_eval_validation_box_to_uninhabited = {$front_matter}: encountered a box pointing to uninhabited type {$ty}
const_eval_validation_dangling_box_no_provenance = {$front_matter}: encountered a dangling box ({$pointer} has no provenance)
@ -441,7 +447,8 @@ const_eval_validation_invalid_fn_ptr = {$front_matter}: encountered {$value}, bu
const_eval_validation_invalid_ref_meta = {$front_matter}: encountered invalid reference metadata: total size is bigger than largest supported object
const_eval_validation_invalid_ref_slice_meta = {$front_matter}: encountered invalid reference metadata: slice is bigger than largest supported object
const_eval_validation_invalid_vtable_ptr = {$front_matter}: encountered {$value}, but expected a vtable pointer
const_eval_validation_mutable_ref_in_const = {$front_matter}: encountered mutable reference in a `const`
const_eval_validation_mutable_ref_in_const = {$front_matter}: encountered mutable reference in a `const` or `static`
const_eval_validation_mutable_ref_to_immutable = {$front_matter}: encountered mutable reference or box pointing to read-only memory
const_eval_validation_never_val = {$front_matter}: encountered a value of the never type `!`
const_eval_validation_null_box = {$front_matter}: encountered a null box
const_eval_validation_null_fn_ptr = {$front_matter}: encountered a null function pointer
@ -451,7 +458,6 @@ const_eval_validation_out_of_range = {$front_matter}: encountered {$value}, but
const_eval_validation_partial_pointer = {$front_matter}: encountered a partial pointer or a mix of pointers
const_eval_validation_pointer_as_int = {$front_matter}: encountered a pointer, but {$expected}
const_eval_validation_ptr_out_of_range = {$front_matter}: encountered a pointer, but expected something that cannot possibly fail to be {$in_range}
const_eval_validation_ref_to_mut = {$front_matter}: encountered a reference pointing to mutable memory in a constant
const_eval_validation_ref_to_static = {$front_matter}: encountered a reference pointing to a static variable in a constant
const_eval_validation_ref_to_uninhabited = {$front_matter}: encountered a reference pointing to uninhabited type {$ty}
const_eval_validation_unaligned_box = {$front_matter}: encountered an unaligned box (required {$required_bytes} byte alignment but found {$found_bytes})
@ -459,7 +465,7 @@ const_eval_validation_unaligned_ref = {$front_matter}: encountered an unaligned
const_eval_validation_uninhabited_enum_variant = {$front_matter}: encountered an uninhabited enum variant
const_eval_validation_uninhabited_val = {$front_matter}: encountered a value of uninhabited type `{$ty}`
const_eval_validation_uninit = {$front_matter}: encountered uninitialized memory, but {$expected}
const_eval_validation_unsafe_cell = {$front_matter}: encountered `UnsafeCell` in a `const`
const_eval_validation_unsafe_cell = {$front_matter}: encountered `UnsafeCell` in read-only memory
const_eval_write_through_immutable_pointer =
writing through a pointer that was derived from a shared (immutable) reference

View file

@ -293,6 +293,9 @@ pub fn eval_in_interpreter<'mir, 'tcx>(
cid: GlobalId<'tcx>,
is_static: bool,
) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
// `is_static` just means "in static", it could still be a promoted!
debug_assert_eq!(is_static, ecx.tcx.static_mutability(cid.instance.def_id()).is_some());
let res = ecx.load_mir(cid.instance.def, cid.promoted);
match res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, body)) {
Err(error) => {
@ -330,8 +333,7 @@ pub fn eval_in_interpreter<'mir, 'tcx>(
Ok(mplace) => {
// Since evaluation had no errors, validate the resulting constant.
// This is a separate `try` block to provide more targeted error reporting.
let validation =
const_validate_mplace(&ecx, &mplace, is_static, cid.promoted.is_some());
let validation = const_validate_mplace(&ecx, &mplace, cid);
let alloc_id = mplace.ptr().provenance.unwrap().alloc_id();
@ -350,22 +352,26 @@ pub fn eval_in_interpreter<'mir, 'tcx>(
pub fn const_validate_mplace<'mir, 'tcx>(
ecx: &InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
mplace: &MPlaceTy<'tcx>,
is_static: bool,
is_promoted: bool,
cid: GlobalId<'tcx>,
) -> InterpResult<'tcx> {
let mut ref_tracking = RefTracking::new(mplace.clone());
let mut inner = false;
while let Some((mplace, path)) = ref_tracking.todo.pop() {
let mode = if is_static {
if is_promoted {
// Promoteds in statics are allowed to point to statics.
CtfeValidationMode::Const { inner, allow_static_ptrs: true }
} else {
// a `static`
CtfeValidationMode::Regular
let mode = match ecx.tcx.static_mutability(cid.instance.def_id()) {
Some(_) if cid.promoted.is_some() => {
// Promoteds in statics are consts that re allowed to point to statics.
CtfeValidationMode::Const {
allow_immutable_unsafe_cell: false,
allow_static_ptrs: true,
}
}
Some(mutbl) => CtfeValidationMode::Static { mutbl }, // a `static`
None => {
// In normal `const` (not promoted), the outermost allocation is always only copied,
// so having `UnsafeCell` in there is okay despite them being in immutable memory.
let allow_immutable_unsafe_cell = cid.promoted.is_none() && !inner;
CtfeValidationMode::Const { allow_immutable_unsafe_cell, allow_static_ptrs: false }
}
} else {
CtfeValidationMode::Const { inner, allow_static_ptrs: false }
};
ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?;
inner = true;

View file

@ -723,7 +723,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
&& ty.is_freeze(*ecx.tcx, ecx.param_env)
{
let place = ecx.ref_to_mplace(val)?;
let new_place = place.map_provenance(|p| p.map(CtfeProvenance::as_immutable));
let new_place = place.map_provenance(CtfeProvenance::as_immutable);
Ok(ImmTy::from_immediate(new_place.to_ref(ecx), val.layout))
} else {
Ok(val.clone())

View file

@ -1,3 +1,5 @@
use std::borrow::Cow;
use rustc_errors::{
DiagCtxt, DiagnosticArgValue, DiagnosticBuilder, DiagnosticMessage, EmissionGuarantee,
IntoDiagnostic, Level,
@ -13,12 +15,24 @@ use rustc_middle::ty::{self, Ty};
use rustc_span::Span;
use rustc_target::abi::call::AdjustForForeignAbiError;
use rustc_target::abi::{Size, WrappingRange};
use rustc_type_ir::Mutability;
use crate::interpret::InternKind;
#[derive(Diagnostic)]
#[diag(const_eval_dangling_ptr_in_final)]
pub(crate) struct DanglingPtrInFinal {
#[primary_span]
pub span: Span,
pub kind: InternKind,
}
#[derive(Diagnostic)]
#[diag(const_eval_mutable_ptr_in_final)]
pub(crate) struct MutablePtrInFinal {
#[primary_span]
pub span: Span,
pub kind: InternKind,
}
#[derive(Diagnostic)]
@ -100,8 +114,8 @@ pub(crate) struct TransientMutBorrowErr {
}
#[derive(Diagnostic)]
#[diag(const_eval_transient_mut_borrow_raw, code = "E0658")]
pub(crate) struct TransientMutBorrowErrRaw {
#[diag(const_eval_transient_mut_raw, code = "E0658")]
pub(crate) struct TransientMutRawErr {
#[primary_span]
pub span: Span,
pub kind: ConstContext,
@ -142,8 +156,8 @@ pub(crate) struct UnallowedMutableRefs {
}
#[derive(Diagnostic)]
#[diag(const_eval_unallowed_mutable_refs_raw, code = "E0764")]
pub(crate) struct UnallowedMutableRefsRaw {
#[diag(const_eval_unallowed_mutable_raw, code = "E0764")]
pub(crate) struct UnallowedMutableRaw {
#[primary_span]
pub span: Span,
pub kind: ConstContext,
@ -194,14 +208,6 @@ pub(crate) struct UnallowedInlineAsm {
pub kind: ConstContext,
}
#[derive(Diagnostic)]
#[diag(const_eval_unsupported_untyped_pointer)]
#[note]
pub(crate) struct UnsupportedUntypedPointer {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(const_eval_interior_mutable_data_refer, code = "E0492")]
pub(crate) struct InteriorMutableDataRefer {
@ -615,18 +621,16 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
PtrToStatic { ptr_kind: PointerKind::Box } => const_eval_validation_box_to_static,
PtrToStatic { ptr_kind: PointerKind::Ref } => const_eval_validation_ref_to_static,
PtrToMut { ptr_kind: PointerKind::Box } => const_eval_validation_box_to_mut,
PtrToMut { ptr_kind: PointerKind::Ref } => const_eval_validation_ref_to_mut,
PointerAsInt { .. } => const_eval_validation_pointer_as_int,
PartialPointer => const_eval_validation_partial_pointer,
MutableRefInConst => const_eval_validation_mutable_ref_in_const,
MutableRefToImmutable => const_eval_validation_mutable_ref_to_immutable,
NullFnPtr => const_eval_validation_null_fn_ptr,
NeverVal => const_eval_validation_never_val,
NullablePtrOutOfRange { .. } => const_eval_validation_nullable_ptr_out_of_range,
PtrOutOfRange { .. } => const_eval_validation_ptr_out_of_range,
OutOfRange { .. } => const_eval_validation_out_of_range,
UnsafeCell => const_eval_validation_unsafe_cell,
UnsafeCellInImmutable => const_eval_validation_unsafe_cell,
UninhabitedVal { .. } => const_eval_validation_uninhabited_val,
InvalidEnumTag { .. } => const_eval_validation_invalid_enum_tag,
UninhabitedEnumVariant => const_eval_validation_uninhabited_enum_variant,
@ -772,11 +776,11 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
}
NullPtr { .. }
| PtrToStatic { .. }
| PtrToMut { .. }
| MutableRefInConst
| MutableRefToImmutable
| NullFnPtr
| NeverVal
| UnsafeCell
| UnsafeCellInImmutable
| InvalidMetaSliceTooLarge { .. }
| InvalidMetaTooLarge { .. }
| DanglingPtrUseAfterFree { .. }
@ -905,3 +909,14 @@ impl ReportErrorExt for ResourceExhaustionInfo {
}
fn add_args<G: EmissionGuarantee>(self, _: &DiagCtxt, _: &mut DiagnosticBuilder<'_, G>) {}
}
impl rustc_errors::IntoDiagnosticArg for InternKind {
fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
DiagnosticArgValue::Str(Cow::Borrowed(match self {
InternKind::Static(Mutability::Not) => "static",
InternKind::Static(Mutability::Mut) => "static_mut",
InternKind::Constant => "const",
InternKind::Promoted => "promoted",
}))
}
}

View file

@ -117,8 +117,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
def_id,
args,
ty::ClosureKind::FnOnce,
)
.ok_or_else(|| err_inval!(TooGeneric))?;
);
let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
self.write_pointer(fn_ptr, dest)?;
}

View file

@ -5,30 +5,24 @@
//!
//! In principle, this is not very complicated: we recursively walk the final value, follow all the
//! pointers, and move all reachable allocations to the global `tcx` memory. The only complication
//! is picking the right mutability for the allocations in a `static` initializer: we want to make
//! as many allocations as possible immutable so LLVM can put them into read-only memory. At the
//! same time, we need to make memory that could be mutated by the program mutable to avoid
//! incorrect compilations. To achieve this, we do a type-based traversal of the final value,
//! tracking mutable and shared references and `UnsafeCell` to determine the current mutability.
//! (In principle, we could skip this type-based part for `const` and promoteds, as they need to be
//! always immutable. At least for `const` however we use this opportunity to reject any `const`
//! that contains allocations whose mutability we cannot identify.)
//! is picking the right mutability: the outermost allocation generally has a clear mutability, but
//! what about the other allocations it points to that have also been created with this value? We
//! don't want to do guesswork here. The rules are: `static`, `const`, and promoted can only create
//! immutable allocations that way. `static mut` can be initialized with expressions like `&mut 42`,
//! so all inner allocations are marked mutable. Some of them could potentially be made immutable,
//! but that would require relying on type information, and given how many ways Rust has to lie
//! about type information, we want to avoid doing that.
use super::validity::RefTracking;
use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
use rustc_ast::Mutability;
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
use rustc_middle::mir::interpret::{CtfeProvenance, InterpResult};
use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
use rustc_middle::ty::layout::TyAndLayout;
use rustc_ast::Mutability;
use super::{
AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, Projectable,
ValueVisitor,
};
use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy};
use crate::const_eval;
use crate::errors::{DanglingPtrInFinal, UnsupportedUntypedPointer};
use crate::errors::{DanglingPtrInFinal, MutablePtrInFinal};
pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine<
'mir,
@ -41,271 +35,44 @@ pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine<
MemoryMap = FxIndexMap<AllocId, (MemoryKind<T>, Allocation)>,
>;
struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> {
/// The ectx from which we intern.
/// Intern an allocation. Returns `Err` if the allocation does not exist in the local memory.
///
/// `mutability` can be used to force immutable interning: if it is `Mutability::Not`, the
/// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be*
/// already mutable (as a sanity check).
///
/// `recursive_alloc` is called for all recursively encountered allocations.
fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>(
ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
/// Previously encountered safe references.
ref_tracking: &'rt mut RefTracking<(MPlaceTy<'tcx>, InternMode)>,
/// A list of all encountered allocations. After type-based interning, we traverse this list to
/// also intern allocations that are only referenced by a raw pointer or inside a union.
leftover_allocations: &'rt mut FxIndexSet<AllocId>,
/// The root kind of the value that we're looking at. This field is never mutated for a
/// particular allocation. It is primarily used to make as many allocations as possible
/// read-only so LLVM can place them in const memory.
mode: InternMode,
/// This field stores whether we are *currently* inside an `UnsafeCell`. This can affect
/// the intern mode of references we encounter.
inside_unsafe_cell: bool,
}
#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
enum InternMode {
/// A static and its current mutability. Below shared references inside a `static mut`,
/// this is *immutable*, and below mutable references inside an `UnsafeCell`, this
/// is *mutable*.
Static(hir::Mutability),
/// A `const`.
Const,
}
/// Signalling data structure to ensure we don't recurse
/// into the memory of other constants or statics
struct IsStaticOrFn;
/// Intern an allocation without looking at its children.
/// `mode` is the mode of the environment where we found this pointer.
/// `mutability` is the mutability of the place to be interned; even if that says
/// `immutable` things might become mutable if `ty` is not frozen.
/// `ty` can be `None` if there is no potential interior mutability
/// to account for (e.g. for vtables).
fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>(
ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
leftover_allocations: &'rt mut FxIndexSet<AllocId>,
alloc_id: AllocId,
mode: InternMode,
ty: Option<Ty<'tcx>>,
) -> Option<IsStaticOrFn> {
trace!("intern_shallow {:?} with {:?}", alloc_id, mode);
mutability: Mutability,
mut recursive_alloc: impl FnMut(&InterpCx<'mir, 'tcx, M>, CtfeProvenance),
) -> Result<(), ()> {
trace!("intern_shallow {:?}", alloc_id);
// remove allocation
let tcx = ecx.tcx;
let Some((kind, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else {
// Pointer not found in local memory map. It is either a pointer to the global
// map, or dangling.
// If the pointer is dangling (neither in local nor global memory), we leave it
// to validation to error -- it has the much better error messages, pointing out where
// in the value the dangling reference lies.
// The `span_delayed_bug` ensures that we don't forget such a check in validation.
if tcx.try_get_global_alloc(alloc_id).is_none() {
tcx.dcx().span_delayed_bug(ecx.tcx.span, "tried to intern dangling pointer");
}
// treat dangling pointers like other statics
// just to stop trying to recurse into them
return Some(IsStaticOrFn);
let Some((_kind, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else {
return Err(());
};
// This match is just a canary for future changes to `MemoryKind`, which most likely need
// changes in this function.
match kind {
MemoryKind::Stack
| MemoryKind::Machine(const_eval::MemoryKind::Heap)
| MemoryKind::CallerLocation => {}
}
// Set allocation mutability as appropriate. This is used by LLVM to put things into
// read-only memory, and also by Miri when evaluating other globals that
// access this one.
if let InternMode::Static(mutability) = mode {
// For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume
// no interior mutability.
let frozen = ty.map_or(true, |ty| ty.is_freeze(*ecx.tcx, ecx.param_env));
// For statics, allocation mutability is the combination of place mutability and
// type mutability.
// The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere.
let immutable = mutability == Mutability::Not && frozen;
if immutable {
alloc.mutability = Mutability::Not;
} else {
// Just making sure we are not "upgrading" an immutable allocation to mutable.
assert_eq!(alloc.mutability, Mutability::Mut);
}
} else {
// No matter what, *constants are never mutable*. Mutating them is UB.
// See const_eval::machine::MemoryExtra::can_access_statics for why
// immutability is so important.
// Validation will ensure that there is no `UnsafeCell` on an immutable allocation.
alloc.mutability = Mutability::Not;
};
// link the alloc id to the actual allocation
leftover_allocations.extend(alloc.provenance().ptrs().iter().map(|&(_, prov)| prov.alloc_id()));
let alloc = tcx.mk_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc);
None
}
impl<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>
InternVisitor<'rt, 'mir, 'tcx, M>
{
fn intern_shallow(
&mut self,
alloc_id: AllocId,
mode: InternMode,
ty: Option<Ty<'tcx>>,
) -> Option<IsStaticOrFn> {
intern_shallow(self.ecx, self.leftover_allocations, alloc_id, mode, ty)
}
}
impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>
ValueVisitor<'mir, 'tcx, M> for InternVisitor<'rt, 'mir, 'tcx, M>
{
type V = MPlaceTy<'tcx>;
#[inline(always)]
fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
self.ecx
}
fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
// Handle Reference types, as these are the only types with provenance supported by const eval.
// Raw pointers (and boxes) are handled by the `leftover_allocations` logic.
let tcx = self.ecx.tcx;
let ty = mplace.layout.ty;
if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
let value = self.ecx.read_immediate(mplace)?;
let mplace = self.ecx.ref_to_mplace(&value)?;
assert_eq!(mplace.layout.ty, referenced_ty);
// Handle trait object vtables.
if let ty::Dynamic(_, _, ty::Dyn) =
tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
{
let ptr = mplace.meta().unwrap_meta().to_pointer(&tcx)?;
if let Some(prov) = ptr.provenance {
// Explicitly choose const mode here, since vtables are immutable, even
// if the reference of the fat pointer is mutable.
self.intern_shallow(prov.alloc_id(), InternMode::Const, None);
} else {
// Validation will error (with a better message) on an invalid vtable pointer.
// Let validation show the error message, but make sure it *does* error.
tcx.dcx()
.span_delayed_bug(tcx.span, "vtables pointers cannot be integer pointers");
}
}
// Check if we have encountered this pointer+layout combination before.
// Only recurse for allocation-backed pointers.
if let Some(prov) = mplace.ptr().provenance {
// Compute the mode with which we intern this. Our goal here is to make as many
// statics as we can immutable so they can be placed in read-only memory by LLVM.
let ref_mode = match self.mode {
InternMode::Static(mutbl) => {
// In statics, merge outer mutability with reference mutability and
// take into account whether we are in an `UnsafeCell`.
// The only way a mutable reference actually works as a mutable reference is
// by being in a `static mut` directly or behind another mutable reference.
// If there's an immutable reference or we are inside a `static`, then our
// mutable reference is equivalent to an immutable one. As an example:
// `&&mut Foo` is semantically equivalent to `&&Foo`
match ref_mutability {
_ if self.inside_unsafe_cell => {
// Inside an `UnsafeCell` is like inside a `static mut`, the "outer"
// mutability does not matter.
InternMode::Static(ref_mutability)
}
match mutability {
Mutability::Not => {
// A shared reference, things become immutable.
// We do *not* consider `freeze` here: `intern_shallow` considers
// `freeze` for the actual mutability of this allocation; the intern
// mode for references contained in this allocation is tracked more
// precisely when traversing the referenced data (by tracking
// `UnsafeCell`). This makes sure that `&(&i32, &Cell<i32>)` still
// has the left inner reference interned into a read-only
// allocation.
InternMode::Static(Mutability::Not)
alloc.mutability = Mutability::Not;
}
Mutability::Mut => {
// Mutable reference.
InternMode::Static(mutbl)
// This must be already mutable, we won't "un-freeze" allocations ever.
assert_eq!(alloc.mutability, Mutability::Mut);
}
}
// record child allocations
for &(_, prov) in alloc.provenance().ptrs().iter() {
recursive_alloc(ecx, prov);
}
InternMode::Const => {
// Ignore `UnsafeCell`, everything is immutable. Validity does some sanity
// checking for mutable references that we encounter -- they must all be
// ZST.
InternMode::Const
}
};
match self.intern_shallow(prov.alloc_id(), ref_mode, Some(referenced_ty)) {
// No need to recurse, these are interned already and statics may have
// cycles, so we don't want to recurse there
Some(IsStaticOrFn) => {}
// intern everything referenced by this value. The mutability is taken from the
// reference. It is checked above that mutable references only happen in
// `static mut`
None => self.ref_tracking.track((mplace, ref_mode), || ()),
}
}
// link the alloc id to the actual allocation
let alloc = ecx.tcx.mk_const_alloc(alloc);
ecx.tcx.set_alloc_id_memory(alloc_id, alloc);
Ok(())
} else {
// Not a reference. Check if we want to recurse.
let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
// ZSTs cannot contain pointers, we can avoid the interning walk.
if mplace.layout.is_zst() {
return Ok(false);
}
// Now, check whether this allocation could contain references.
//
// Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
// to avoid could be expensive: on the potentially larger types, arrays and slices,
// rather than on all aggregates unconditionally.
if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
let Some((size, _align)) = self.ecx.size_and_align_of_mplace(mplace)? else {
// We do the walk if we can't determine the size of the mplace: we may be
// dealing with extern types here in the future.
return Ok(true);
};
// If there is no provenance in this allocation, it does not contain references
// that point to another allocation, and we can avoid the interning walk.
if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr(), size)? {
if !alloc.has_provenance() {
return Ok(false);
}
} else {
// We're encountering a ZST here, and can avoid the walk as well.
return Ok(false);
}
}
// In the general case, we do the walk.
Ok(true)
};
// If this allocation contains no references to intern, we avoid the potentially costly
// walk.
//
// We can do this before the checks for interior mutability below, because only references
// are relevant in that situation, and we're checking if there are any here.
if !is_walk_needed(mplace)? {
return Ok(());
}
if let Some(def) = mplace.layout.ty.ty_adt_def() {
if def.is_unsafe_cell() {
// We are crossing over an `UnsafeCell`, we can mutate again. This means that
// References we encounter inside here are interned as pointing to mutable
// allocations.
// Remember the `old` value to handle nested `UnsafeCell`.
let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
let walked = self.walk_value(mplace);
self.inside_unsafe_cell = old;
return walked;
}
}
self.walk_value(mplace)
}
}
}
/// How a constant value should be interned.
@ -332,122 +99,108 @@ pub fn intern_const_alloc_recursive<
intern_kind: InternKind,
ret: &MPlaceTy<'tcx>,
) -> Result<(), ErrorGuaranteed> {
let tcx = ecx.tcx;
let base_intern_mode = match intern_kind {
InternKind::Static(mutbl) => InternMode::Static(mutbl),
// `Constant` includes array lengths.
InternKind::Constant | InternKind::Promoted => InternMode::Const,
// We are interning recursively, and for mutability we are distinguishing the "root" allocation
// that we are starting in, and all other allocations that we are encountering recursively.
let (base_mutability, inner_mutability) = match intern_kind {
InternKind::Constant | InternKind::Promoted => {
// Completely immutable. Interning anything mutably here can only lead to unsoundness,
// since all consts are conceptually independent values but share the same underlying
// memory.
(Mutability::Not, Mutability::Not)
}
InternKind::Static(Mutability::Not) => {
(
// Outermost allocation is mutable if `!Freeze`.
if ret.layout.ty.is_freeze(*ecx.tcx, ecx.param_env) {
Mutability::Not
} else {
Mutability::Mut
},
// Inner allocations are never mutable. They can only arise via the "tail
// expression" / "outer scope" rule, and we treat them consistently with `const`.
Mutability::Not,
)
}
InternKind::Static(Mutability::Mut) => {
// Just make everything mutable. We accept code like
// `static mut X = &mut [42]`, so even inner allocations need to be mutable.
(Mutability::Mut, Mutability::Mut)
}
};
// Type based interning.
// `ref_tracking` tracks typed references we have already interned and still need to crawl for
// more typed information inside them.
// `leftover_allocations` collects *all* allocations we see, because some might not
// be available in a typed way. They get interned at the end.
let mut ref_tracking = RefTracking::empty();
let leftover_allocations = &mut FxIndexSet::default();
// Initialize recursive interning.
let base_alloc_id = ret.ptr().provenance.unwrap().alloc_id();
let mut todo = vec![(base_alloc_id, base_mutability)];
// We need to distinguish "has just been interned" from "was already in `tcx`",
// so we track this in a separate set.
let mut just_interned = FxHashSet::default();
// Whether we encountered a bad mutable pointer.
// We want to first report "dangling" and then "mutable", so we need to delay reporting these
// errors.
let mut found_bad_mutable_pointer = false;
// start with the outermost allocation
intern_shallow(
ecx,
leftover_allocations,
// The outermost allocation must exist, because we allocated it with
// `Memory::allocate`.
ret.ptr().provenance.unwrap().alloc_id(),
base_intern_mode,
Some(ret.layout.ty),
);
ref_tracking.track((ret.clone(), base_intern_mode), || ());
while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() {
let res = InternVisitor {
ref_tracking: &mut ref_tracking,
ecx,
mode,
leftover_allocations,
inside_unsafe_cell: false,
// Keep interning as long as there are things to intern.
// We show errors if there are dangling pointers, or mutable pointers in immutable contexts
// (i.e., everything except for `static mut`). When these errors affect references, it is
// unfortunate that we show these errors here and not during validation, since validation can
// show much nicer errors. However, we do need these checks to be run on all pointers, including
// raw pointers, so we cannot rely on validation to catch them -- and since interning runs
// before validation, and interning doesn't know the type of anything, this means we can't show
// better errors. Maybe we should consider doing validation before interning in the future.
while let Some((alloc_id, mutability)) = todo.pop() {
if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
// Already interned.
debug_assert!(!ecx.memory.alloc_map.contains_key(&alloc_id));
continue;
}
.visit_value(&mplace);
// We deliberately *ignore* interpreter errors here. When there is a problem, the remaining
// references are "leftover"-interned, and later validation will show a proper error
// and point at the right part of the value causing the problem.
match res {
Ok(()) => {}
Err(error) => {
ecx.tcx.dcx().span_delayed_bug(
ecx.tcx.span,
format!(
"error during interning should later cause validation failure: {}",
ecx.format_error(error),
),
);
}
}
}
// Intern the rest of the allocations as mutable. These might be inside unions, padding, raw
// pointers, ... So we can't intern them according to their type rules
let mut todo: Vec<_> = leftover_allocations.iter().cloned().collect();
debug!(?todo);
debug!("dead_alloc_map: {:#?}", ecx.memory.dead_alloc_map);
while let Some(alloc_id) = todo.pop() {
if let Some((_, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) {
// We can't call the `intern_shallow` method here, as its logic is tailored to safe
// references and a `leftover_allocations` set (where we only have a todo-list here).
// So we hand-roll the interning logic here again.
match intern_kind {
// Statics may point to mutable allocations.
// Even for immutable statics it would be ok to have mutable allocations behind
// raw pointers, e.g. for `static FOO: *const AtomicUsize = &AtomicUsize::new(42)`.
InternKind::Static(_) => {}
// Raw pointers in promoteds may only point to immutable things so we mark
// everything as immutable.
// It is UB to mutate through a raw pointer obtained via an immutable reference:
// Since all references and pointers inside a promoted must by their very definition
// be created from an immutable reference (and promotion also excludes interior
// mutability), mutating through them would be UB.
// There's no way we can check whether the user is using raw pointers correctly,
// so all we can do is mark this as immutable here.
InternKind::Promoted => {
// See const_eval::machine::MemoryExtra::can_access_statics for why
// immutability is so important.
alloc.mutability = Mutability::Not;
}
// If it's a constant, we should not have any "leftovers" as everything
// is tracked by const-checking.
// FIXME: downgrade this to a warning? It rejects some legitimate consts,
// such as `const CONST_RAW: *const Vec<i32> = &Vec::new() as *const _;`.
//
// NOTE: it looks likes this code path is only reachable when we try to intern
// something that cannot be promoted, which in constants means values that have
// drop glue, such as the example above.
InternKind::Constant => {
ecx.tcx.dcx().emit_err(UnsupportedUntypedPointer { span: ecx.tcx.span });
// For better errors later, mark the allocation as immutable.
alloc.mutability = Mutability::Not;
}
}
let alloc = tcx.mk_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc);
for &(_, prov) in alloc.inner().provenance().ptrs().iter() {
just_interned.insert(alloc_id);
intern_shallow(ecx, alloc_id, mutability, |ecx, prov| {
let alloc_id = prov.alloc_id();
if leftover_allocations.insert(alloc_id) {
todo.push(alloc_id);
if intern_kind != InternKind::Promoted
&& inner_mutability == Mutability::Not
&& !prov.immutable()
{
if ecx.tcx.try_get_global_alloc(alloc_id).is_some()
&& !just_interned.contains(&alloc_id)
{
// This is a pointer to some memory from another constant. We encounter mutable
// pointers to such memory since we do not always track immutability through
// these "global" pointers. Allowing them is harmless; the point of these checks
// during interning is to justify why we intern the *new* allocations immutably,
// so we can completely ignore existing allocations. We also don't need to add
// this to the todo list, since after all it is already interned.
return;
}
// Found a mutable pointer inside a const where inner allocations should be
// immutable. We exclude promoteds from this, since things like `&mut []` and
// `&None::<Cell<i32>>` lead to promotion that can produce mutable pointers. We rely
// on the promotion analysis not screwing up to ensure that it is sound to intern
// promoteds as immutable.
found_bad_mutable_pointer = true;
}
} else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
// Codegen does not like dangling pointers, and generally `tcx` assumes that
// all allocations referenced anywhere actually exist. So, make sure we error here.
let reported = ecx.tcx.dcx().emit_err(DanglingPtrInFinal { span: ecx.tcx.span });
return Err(reported);
} else if ecx.tcx.try_get_global_alloc(alloc_id).is_none() {
// We have hit an `AllocId` that is neither in local or global memory and isn't
// marked as dangling by local memory. That should be impossible.
span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id);
// We always intern with `inner_mutability`, and furthermore we ensured above that if
// that is "immutable", then there are *no* mutable pointers anywhere in the newly
// interned memory -- justifying that we can indeed intern immutably. However this also
// means we can *not* easily intern immutably here if `prov.immutable()` is true and
// `inner_mutability` is `Mut`: there might be other pointers to that allocation, and
// we'd have to somehow check that they are *all* immutable before deciding that this
// allocation can be made immutable. In the future we could consider analyzing all
// pointers before deciding which allocations can be made immutable; but for now we are
// okay with losing some potential for immutability here. This can anyway only affect
// `static mut`.
todo.push((alloc_id, inner_mutability));
})
.map_err(|()| {
ecx.tcx.dcx().emit_err(DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind })
})?;
}
if found_bad_mutable_pointer {
return Err(ecx
.tcx
.dcx()
.emit_err(MutablePtrInFinal { span: ecx.tcx.span, kind: intern_kind }));
}
Ok(())
}
@ -462,29 +215,18 @@ pub fn intern_const_alloc_for_constprop<
ecx: &mut InterpCx<'mir, 'tcx, M>,
alloc_id: AllocId,
) -> InterpResult<'tcx, ()> {
// Move allocation to `tcx`.
let Some((_, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else {
// Pointer not found in local memory map. It is either a pointer to the global
// map, or dangling.
if ecx.tcx.try_get_global_alloc(alloc_id).is_none() {
throw_ub!(DeadLocal)
}
if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
// The constant is already in global memory. Do nothing.
return Ok(());
};
alloc.mutability = Mutability::Not;
}
// Move allocation to `tcx`.
intern_shallow(ecx, alloc_id, Mutability::Not, |_ecx, _| {
// We are not doing recursive interning, so we don't currently support provenance.
// (If this assertion ever triggers, we should just implement a
// proper recursive interning loop.)
assert!(alloc.provenance().ptrs().is_empty());
// Link the alloc id to the actual allocation
let alloc = ecx.tcx.mk_const_alloc(alloc);
ecx.tcx.set_alloc_id_memory(alloc_id, alloc);
Ok(())
// proper recursive interning loop -- or just call `intern_const_alloc_recursive`.
panic!("`intern_const_alloc_for_constprop` called on allocation with nested provenance")
})
.map_err(|()| err_ub!(DeadLocal).into())
}
impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
@ -504,12 +246,16 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
// `allocate` picks a fresh AllocId that we will associate with its data below.
let dest = self.allocate(layout, MemoryKind::Stack)?;
f(self, &dest.clone().into())?;
let mut alloc =
self.memory.alloc_map.remove(&dest.ptr().provenance.unwrap().alloc_id()).unwrap().1;
alloc.mutability = Mutability::Not;
let alloc = self.tcx.mk_const_alloc(alloc);
let alloc_id = dest.ptr().provenance.unwrap().alloc_id(); // this was just allocated, it must have provenance
self.tcx.set_alloc_id_memory(alloc_id, alloc);
intern_shallow(self, alloc_id, Mutability::Not, |ecx, prov| {
// We are not doing recursive interning, so we don't currently support provenance.
// (If this assertion ever triggers, we should just implement a
// proper recursive interning loop -- or just call `intern_const_alloc_recursive`.
if !ecx.tcx.try_get_global_alloc(prov.alloc_id()).is_some() {
panic!("`intern_with_temp_alloc` with nested allocations");
}
})
.unwrap();
Ok(alloc_id)
}
}

View file

@ -62,8 +62,8 @@ pub(super) struct MemPlace<Prov: Provenance = CtfeProvenance> {
impl<Prov: Provenance> MemPlace<Prov> {
/// Adjust the provenance of the main pointer (metadata is unaffected).
pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
MemPlace { ptr: self.ptr.map_provenance(f), ..self }
pub fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self {
MemPlace { ptr: self.ptr.map_provenance(|p| p.map(f)), ..self }
}
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
@ -128,7 +128,7 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
}
/// Adjust the provenance of the main pointer (metadata is unaffected).
pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
pub fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self {
MPlaceTy { mplace: self.mplace.map_provenance(f), ..self }
}

View file

@ -9,12 +9,13 @@ use std::num::NonZeroUsize;
use either::{Left, Right};
use hir::def::DefKind;
use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
use rustc_middle::mir::interpret::{
ExpectedKind, InterpError, InvalidMetaKind, Misalignment, PointerKind, ValidationErrorInfo,
ValidationErrorKind, ValidationErrorKind::*,
ExpectedKind, InterpError, InvalidMetaKind, Misalignment, PointerKind, Provenance,
ValidationErrorInfo, ValidationErrorKind, ValidationErrorKind::*,
};
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
@ -123,15 +124,41 @@ pub enum PathElem {
}
/// Extra things to check for during validation of CTFE results.
#[derive(Copy, Clone)]
pub enum CtfeValidationMode {
/// Regular validation, nothing special happening.
Regular,
/// Validation of a `const`.
/// `inner` says if this is an inner, indirect allocation (as opposed to the top-level const
/// allocation). Being an inner allocation makes a difference because the top-level allocation
/// of a `const` is copied for each use, but the inner allocations are implicitly shared.
/// Validation of a `static`
Static { mutbl: Mutability },
/// Validation of a `const` (including promoteds).
/// `allow_immutable_unsafe_cell` says whether we allow `UnsafeCell` in immutable memory (which is the
/// case for the top-level allocation of a `const`, where this is fine because the allocation will be
/// copied at each use site).
/// `allow_static_ptrs` says if pointers to statics are permitted (which is the case for promoteds in statics).
Const { inner: bool, allow_static_ptrs: bool },
Const { allow_immutable_unsafe_cell: bool, allow_static_ptrs: bool },
}
impl CtfeValidationMode {
fn allow_immutable_unsafe_cell(self) -> bool {
match self {
CtfeValidationMode::Static { .. } => false,
CtfeValidationMode::Const { allow_immutable_unsafe_cell, .. } => {
allow_immutable_unsafe_cell
}
}
}
fn allow_static_ptrs(self) -> bool {
match self {
CtfeValidationMode::Static { .. } => true, // statics can point to statics
CtfeValidationMode::Const { allow_static_ptrs, .. } => allow_static_ptrs,
}
}
fn may_contain_mutable_ref(self) -> bool {
match self {
CtfeValidationMode::Static { mutbl } => mutbl == Mutability::Mut,
CtfeValidationMode::Const { .. } => false,
}
}
}
/// State for tracking recursive validation of references
@ -418,26 +445,52 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
}
// Recursive checking
if let Some(ref_tracking) = self.ref_tracking.as_deref_mut() {
// Determine whether this pointer expects to be pointing to something mutable.
let ptr_expected_mutbl = match ptr_kind {
PointerKind::Box => Mutability::Mut,
PointerKind::Ref => {
let tam = value.layout.ty.builtin_deref(false).unwrap();
// ZST never require mutability. We do not take into account interior mutability
// here since we cannot know if there really is an `UnsafeCell` inside
// `Option<UnsafeCell>` -- so we check that in the recursive descent behind this
// reference.
if size == Size::ZERO { Mutability::Not } else { tam.mutbl }
}
};
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr()) {
// Let's see what kind of memory this points to.
let alloc_kind = self.ecx.tcx.try_get_global_alloc(alloc_id);
// `unwrap` since dangling pointers have already been handled.
let alloc_kind = self.ecx.tcx.try_get_global_alloc(alloc_id).unwrap();
match alloc_kind {
Some(GlobalAlloc::Static(did)) => {
GlobalAlloc::Static(did) => {
// Special handling for pointers to statics (irrespective of their type).
assert!(!self.ecx.tcx.is_thread_local_static(did));
assert!(self.ecx.tcx.is_static(did));
if matches!(
self.ctfe_mode,
Some(CtfeValidationMode::Const { allow_static_ptrs: false, .. })
) {
if self.ctfe_mode.is_some_and(|c| !c.allow_static_ptrs()) {
// See const_eval::machine::MemoryExtra::can_access_statics for why
// this check is so important.
// This check is reachable when the const just referenced the static,
// but never read it (so we never entered `before_access_global`).
throw_validation_failure!(self.path, PtrToStatic { ptr_kind });
}
// Mutability check.
if ptr_expected_mutbl == Mutability::Mut {
if matches!(
self.ecx.tcx.def_kind(did),
DefKind::Static(Mutability::Not)
) && self
.ecx
.tcx
.type_of(did)
.no_bound_vars()
.expect("statics should not have generic parameters")
.is_freeze(*self.ecx.tcx, ty::ParamEnv::reveal_all())
{
throw_validation_failure!(self.path, MutableRefToImmutable);
}
}
// We skip recursively checking other statics. These statics must be sound by
// themselves, and the only way to get broken statics here is by using
// unsafe code.
@ -450,18 +503,31 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// referring to statics).
return Ok(());
}
Some(GlobalAlloc::Memory(alloc)) => {
GlobalAlloc::Memory(alloc) => {
if alloc.inner().mutability == Mutability::Mut
&& matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. }))
{
// This should be unreachable, but if someone manages to copy a pointer
// out of a `static`, then that pointer might point to mutable memory,
// and we would catch that here.
throw_validation_failure!(self.path, PtrToMut { ptr_kind });
// This is impossible: this can only be some inner allocation of a
// `static mut` (everything else either hits the `GlobalAlloc::Static`
// case or is interned immutably). To get such a pointer we'd have to
// load it from a static, but such loads lead to a CTFE error.
span_bug!(
self.ecx.tcx.span,
"encountered reference to mutable memory inside a `const`"
);
}
if ptr_expected_mutbl == Mutability::Mut
&& alloc.inner().mutability == Mutability::Not
{
throw_validation_failure!(self.path, MutableRefToImmutable);
}
}
GlobalAlloc::Function(..) | GlobalAlloc::VTable(..) => {
// These are immutable, we better don't allow mutable pointers here.
if ptr_expected_mutbl == Mutability::Mut {
throw_validation_failure!(self.path, MutableRefToImmutable);
}
}
// Nothing to check for these.
None | Some(GlobalAlloc::Function(..) | GlobalAlloc::VTable(..)) => {}
}
}
let path = &self.path;
@ -532,11 +598,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
Ok(true)
}
ty::Ref(_, ty, mutbl) => {
if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. }))
if self.ctfe_mode.is_some_and(|c| !c.may_contain_mutable_ref())
&& *mutbl == Mutability::Mut
{
// A mutable reference inside a const? That does not seem right (except if it is
// a ZST).
let layout = self.ecx.layout_of(*ty)?;
if !layout.is_zst() {
throw_validation_failure!(self.path, MutableRefInConst);
@ -642,6 +706,19 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
)
}
}
fn in_mutable_memory(&self, op: &OpTy<'tcx, M::Provenance>) -> bool {
if let Some(mplace) = op.as_mplace_or_imm().left() {
if let Some(alloc_id) = mplace.ptr().provenance.and_then(|p| p.get_alloc_id()) {
if self.ecx.tcx.global_alloc(alloc_id).unwrap_memory().inner().mutability
== Mutability::Mut
{
return true;
}
}
}
false
}
}
impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
@ -705,10 +782,12 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
op: &OpTy<'tcx, M::Provenance>,
_fields: NonZeroUsize,
) -> InterpResult<'tcx> {
// Special check preventing `UnsafeCell` inside unions in the inner part of constants.
if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. })) {
if !op.layout.ty.is_freeze(*self.ecx.tcx, self.ecx.param_env) {
throw_validation_failure!(self.path, UnsafeCell);
// Special check for CTFE validation, preventing `UnsafeCell` inside unions in immutable memory.
if self.ctfe_mode.is_some_and(|c| !c.allow_immutable_unsafe_cell()) {
if !op.layout.is_zst() && !op.layout.ty.is_freeze(*self.ecx.tcx, self.ecx.param_env) {
if !self.in_mutable_memory(op) {
throw_validation_failure!(self.path, UnsafeCellInImmutable);
}
}
}
Ok(())
@ -730,11 +809,14 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
}
// Special check preventing `UnsafeCell` in the inner part of constants
if let Some(def) = op.layout.ty.ty_adt_def() {
if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. }))
if self.ctfe_mode.is_some_and(|c| !c.allow_immutable_unsafe_cell()) {
if !op.layout.is_zst()
&& let Some(def) = op.layout.ty.ty_adt_def()
&& def.is_unsafe_cell()
{
throw_validation_failure!(self.path, UnsafeCell);
if !self.in_mutable_memory(op) {
throw_validation_failure!(self.path, UnsafeCellInImmutable);
}
}
}

View file

@ -466,7 +466,7 @@ impl<'tcx> NonConstOp<'tcx> for MutBorrow {
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
match self.0 {
hir::BorrowKind::Raw => ccx.dcx().create_err(errors::UnallowedMutableRefsRaw {
hir::BorrowKind::Raw => ccx.tcx.dcx().create_err(errors::UnallowedMutableRaw {
span,
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(&error_code!(E0764)).then_some(()),
@ -491,10 +491,10 @@ impl<'tcx> NonConstOp<'tcx> for TransientMutBorrow {
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
let kind = ccx.const_kind();
match self.0 {
hir::BorrowKind::Raw => ccx.tcx.sess.create_feature_err(
errors::TransientMutBorrowErrRaw { span, kind },
sym::const_mut_refs,
),
hir::BorrowKind::Raw => ccx
.tcx
.sess
.create_feature_err(errors::TransientMutRawErr { span, kind }, sym::const_mut_refs),
hir::BorrowKind::Ref => ccx.tcx.sess.create_feature_err(
errors::TransientMutBorrowErr { span, kind },
sym::const_mut_refs,

View file

@ -27,6 +27,7 @@ fn alloc_caller_location<'mir, 'tcx>(
// See https://github.com/rust-lang/rust/pull/89920#discussion_r730012398
ecx.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not).unwrap()
};
let file = file.map_provenance(CtfeProvenance::as_immutable);
let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) };
let col = if loc_details.column { Scalar::from_u32(col) } else { Scalar::from_u32(0) };

View file

@ -12,7 +12,9 @@ use rustc_trait_selection::traits::StructurallyNormalizeExt;
#[derive(Copy, Clone, Debug)]
pub enum AutoderefKind {
/// A true pointer type, such as `&T` and `*mut T`.
Builtin,
/// A type which must dispatch to a `Deref` implementation.
Overloaded,
}
@ -83,6 +85,7 @@ impl<'a, 'tcx> Iterator for Autoderef<'a, 'tcx> {
(AutoderefKind::Builtin, ty)
}
} else if let Some(ty) = self.overloaded_deref_ty(self.state.cur_ty) {
// The overloaded deref check already normalizes the pointee type.
(AutoderefKind::Overloaded, ty)
} else {
return None;

View file

@ -254,7 +254,7 @@ fn compare_method_predicate_entailment<'tcx>(
// checks. For the comparison to be valid, we need to
// normalize the associated types in the impl/trait methods
// first. However, because function types bind regions, just
// calling `normalize_associated_types_in` would have no effect on
// calling `FnCtxt::normalize` would have no effect on
// any associated types appearing in the fn arguments or return
// type.

View file

@ -530,9 +530,13 @@ pub(super) fn type_of_opaque(
Ok(ty::EarlyBinder::bind(match tcx.hir_node_by_def_id(def_id) {
Node::Item(item) => match item.kind {
ItemKind::OpaqueTy(OpaqueTy {
origin: hir::OpaqueTyOrigin::TyAlias { .. },
origin: hir::OpaqueTyOrigin::TyAlias { in_assoc_ty: false },
..
}) => opaque::find_opaque_ty_constraints_for_tait(tcx, def_id),
ItemKind::OpaqueTy(OpaqueTy {
origin: hir::OpaqueTyOrigin::TyAlias { in_assoc_ty: true },
..
}) => opaque::find_opaque_ty_constraints_for_impl_trait_in_assoc_type(tcx, def_id),
// Opaque types desugared from `impl Trait`.
ItemKind::OpaqueTy(&OpaqueTy {
origin:

View file

@ -23,6 +23,60 @@ pub fn test_opaque_hidden_types(tcx: TyCtxt<'_>) -> Result<(), ErrorGuaranteed>
res
}
/// Checks "defining uses" of opaque `impl Trait` in associated types.
/// These can only be defined by associated items of the same trait.
#[instrument(skip(tcx), level = "debug")]
pub(super) fn find_opaque_ty_constraints_for_impl_trait_in_assoc_type(
tcx: TyCtxt<'_>,
def_id: LocalDefId,
) -> Ty<'_> {
let mut parent_def_id = def_id;
while tcx.def_kind(parent_def_id) == def::DefKind::OpaqueTy {
// Account for `type Alias = impl Trait<Foo = impl Trait>;` (#116031)
parent_def_id = tcx.local_parent(parent_def_id);
}
let impl_def_id = tcx.local_parent(parent_def_id);
match tcx.def_kind(impl_def_id) {
DefKind::Impl { .. } => {}
other => bug!("invalid impl trait in assoc type parent: {other:?}"),
}
let mut locator = TaitConstraintLocator { def_id, tcx, found: None, typeck_types: vec![] };
for &assoc_id in tcx.associated_item_def_ids(impl_def_id) {
let assoc = tcx.associated_item(assoc_id);
match assoc.kind {
ty::AssocKind::Const | ty::AssocKind::Fn => {
locator.check(assoc_id.expect_local(), ImplTraitSource::AssocTy)
}
// Associated types don't have bodies, so they can't constrain hidden types
ty::AssocKind::Type => {}
}
}
if let Some(hidden) = locator.found {
// Only check against typeck if we didn't already error
if !hidden.ty.references_error() {
for concrete_type in locator.typeck_types {
if concrete_type.ty != tcx.erase_regions(hidden.ty)
&& !(concrete_type, hidden).references_error()
{
hidden.report_mismatch(&concrete_type, def_id, tcx).emit();
}
}
}
hidden.ty
} else {
let reported = tcx.dcx().emit_err(UnconstrainedOpaqueType {
span: tcx.def_span(def_id),
name: tcx.item_name(parent_def_id.to_def_id()),
what: "impl",
});
Ty::new_error(tcx, reported)
}
}
/// Checks "defining uses" of opaque `impl Trait` types to ensure that they meet the restrictions
/// laid for "higher-order pattern unification".
/// This ensures that inference is tractable.
@ -128,9 +182,15 @@ struct TaitConstraintLocator<'tcx> {
typeck_types: Vec<ty::OpaqueHiddenType<'tcx>>,
}
#[derive(Debug)]
enum ImplTraitSource {
AssocTy,
TyAlias,
}
impl TaitConstraintLocator<'_> {
#[instrument(skip(self), level = "debug")]
fn check(&mut self, item_def_id: LocalDefId) {
fn check(&mut self, item_def_id: LocalDefId, source: ImplTraitSource) {
// Don't try to check items that cannot possibly constrain the type.
if !self.tcx.has_typeck_results(item_def_id) {
debug!("no constraint: no typeck results");
@ -182,7 +242,13 @@ impl TaitConstraintLocator<'_> {
continue;
}
constrained = true;
if !self.tcx.opaque_types_defined_by(item_def_id).contains(&self.def_id) {
let opaque_types_defined_by = match source {
ImplTraitSource::AssocTy => {
self.tcx.impl_trait_in_assoc_types_defined_by(item_def_id)
}
ImplTraitSource::TyAlias => self.tcx.opaque_types_defined_by(item_def_id),
};
if !opaque_types_defined_by.contains(&self.def_id) {
self.tcx.dcx().emit_err(TaitForwardCompat {
span: hidden_type.span,
item_span: self
@ -240,7 +306,7 @@ impl<'tcx> intravisit::Visitor<'tcx> for TaitConstraintLocator<'tcx> {
}
fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
if let hir::ExprKind::Closure(closure) = ex.kind {
self.check(closure.def_id);
self.check(closure.def_id, ImplTraitSource::TyAlias);
}
intravisit::walk_expr(self, ex);
}
@ -248,7 +314,7 @@ impl<'tcx> intravisit::Visitor<'tcx> for TaitConstraintLocator<'tcx> {
trace!(?it.owner_id);
// The opaque type itself or its children are not within its reveal scope.
if it.owner_id.def_id != self.def_id {
self.check(it.owner_id.def_id);
self.check(it.owner_id.def_id, ImplTraitSource::TyAlias);
intravisit::walk_item(self, it);
}
}
@ -256,13 +322,13 @@ impl<'tcx> intravisit::Visitor<'tcx> for TaitConstraintLocator<'tcx> {
trace!(?it.owner_id);
// The opaque type itself or its children are not within its reveal scope.
if it.owner_id.def_id != self.def_id {
self.check(it.owner_id.def_id);
self.check(it.owner_id.def_id, ImplTraitSource::TyAlias);
intravisit::walk_impl_item(self, it);
}
}
fn visit_trait_item(&mut self, it: &'tcx TraitItem<'tcx>) {
trace!(?it.owner_id);
self.check(it.owner_id.def_id);
self.check(it.owner_id.def_id, ImplTraitSource::TyAlias);
intravisit::walk_trait_item(self, it);
}
fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem<'tcx>) {

View file

@ -44,7 +44,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|| self.suggest_non_zero_new_unwrap(err, expr, expected, expr_ty)
|| self.suggest_calling_boxed_future_when_appropriate(err, expr, expected, expr_ty)
|| self.suggest_no_capture_closure(err, expected, expr_ty)
|| self.suggest_boxing_when_appropriate(err, expr.span, expr.hir_id, expected, expr_ty)
|| self.suggest_boxing_when_appropriate(
err,
expr.peel_blocks().span,
expr.hir_id,
expected,
expr_ty,
)
|| self.suggest_block_to_brackets_peeling_refs(err, expr, expr_ty, expected)
|| self.suggest_copied_cloned_or_as_ref(err, expr, expr_ty, expected)
|| self.suggest_clone_for_ref(err, expr, expr_ty, expected)

View file

@ -2,7 +2,7 @@ use crate::callee::{self, DeferredCallResolution};
use crate::errors::CtorIsPrivate;
use crate::method::{self, MethodCallee, SelfSource};
use crate::rvalue_scopes;
use crate::{BreakableCtxt, Diverges, Expectation, FnCtxt, RawTy};
use crate::{BreakableCtxt, Diverges, Expectation, FnCtxt, LoweredTy};
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{Applicability, Diagnostic, ErrorGuaranteed, MultiSpan, StashKey};
@ -373,14 +373,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
pub fn handle_raw_ty(&self, span: Span, ty: Ty<'tcx>) -> RawTy<'tcx> {
RawTy { raw: ty, normalized: self.normalize(span, ty) }
}
pub fn to_ty(&self, ast_t: &hir::Ty<'tcx>) -> RawTy<'tcx> {
pub fn to_ty(&self, ast_t: &hir::Ty<'tcx>) -> LoweredTy<'tcx> {
let t = self.astconv().ast_ty_to_ty(ast_t);
self.register_wf_obligation(t.into(), ast_t.span, traits::WellFormed(None));
self.handle_raw_ty(ast_t.span, t)
LoweredTy::from_raw(self, ast_t.span, t)
}
pub fn to_ty_saving_user_provided_ty(&self, ast_ty: &hir::Ty<'tcx>) -> Ty<'tcx> {
@ -396,7 +392,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
ty.normalized
}
pub(super) fn user_args_for_adt(ty: RawTy<'tcx>) -> UserArgs<'tcx> {
pub(super) fn user_args_for_adt(ty: LoweredTy<'tcx>) -> UserArgs<'tcx> {
match (ty.raw.kind(), ty.normalized.kind()) {
(ty::Adt(_, args), _) => UserArgs { args, user_self_ty: None },
(_, ty::Adt(adt, args)) => UserArgs {
@ -801,7 +797,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
hir_id: hir::HirId,
span: Span,
args: Option<&'tcx [hir::Expr<'tcx>]>,
) -> (Res, Option<RawTy<'tcx>>, &'tcx [hir::PathSegment<'tcx>]) {
) -> (Res, Option<LoweredTy<'tcx>>, &'tcx [hir::PathSegment<'tcx>]) {
debug!(
"resolve_ty_and_res_fully_qualified_call: qpath={:?} hir_id={:?} span={:?}",
qpath, hir_id, span
@ -825,7 +821,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// We manually call `register_wf_obligation` in the success path
// below.
let ty = self.astconv().ast_ty_to_ty_in_path(qself);
(self.handle_raw_ty(span, ty), qself, segment)
(LoweredTy::from_raw(self, span, ty), qself, segment)
}
QPath::LangItem(..) => {
bug!("`resolve_ty_and_res_fully_qualified_call` called on `LangItem`")
@ -1074,7 +1070,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pub fn instantiate_value_path(
&self,
segments: &'tcx [hir::PathSegment<'tcx>],
self_ty: Option<RawTy<'tcx>>,
self_ty: Option<LoweredTy<'tcx>>,
res: Res,
span: Span,
hir_id: hir::HirId,
@ -1201,8 +1197,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
path_segs.last().is_some_and(|PathSeg(def_id, _)| tcx.generics_of(*def_id).has_self);
let (res, self_ctor_args) = if let Res::SelfCtor(impl_def_id) = res {
let ty =
self.handle_raw_ty(span, tcx.at(span).type_of(impl_def_id).instantiate_identity());
let ty = LoweredTy::from_raw(
self,
span,
tcx.at(span).type_of(impl_def_id).instantiate_identity(),
);
match ty.normalized.ty_adt_def() {
Some(adt_def) if adt_def.has_ctor() => {
let (ctor_kind, ctor_def_id) = adt_def.non_enum_variant().ctor.unwrap();

View file

@ -6,7 +6,7 @@ use crate::method::MethodCallee;
use crate::TupleArgumentsFlag::*;
use crate::{errors, Expectation::*};
use crate::{
struct_span_code_err, BreakableCtxt, Diverges, Expectation, FnCtxt, Needs, RawTy,
struct_span_code_err, BreakableCtxt, Diverges, Expectation, FnCtxt, LoweredTy, Needs,
TupleArgumentsFlag,
};
use itertools::Itertools;
@ -1792,12 +1792,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
qpath: &QPath<'tcx>,
path_span: Span,
hir_id: hir::HirId,
) -> (Res, RawTy<'tcx>) {
) -> (Res, LoweredTy<'tcx>) {
match *qpath {
QPath::Resolved(ref maybe_qself, path) => {
let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself).raw);
let ty = self.astconv().res_to_ty(self_ty, path, hir_id, true);
(path.res, self.handle_raw_ty(path_span, ty))
(path.res, LoweredTy::from_raw(self, path_span, ty))
}
QPath::TypeRelative(qself, segment) => {
let ty = self.to_ty(qself);
@ -1808,7 +1808,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let ty = result
.map(|(ty, _, _)| ty)
.unwrap_or_else(|guar| Ty::new_error(self.tcx(), guar));
let ty = self.handle_raw_ty(path_span, ty);
let ty = LoweredTy::from_raw(self, path_span, ty);
let result = result.map(|(_, kind, def_id)| (kind, def_id));
// Write back the new resolution.
@ -1818,7 +1818,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
QPath::LangItem(lang_item, span) => {
let (res, ty) = self.resolve_lang_item_path(lang_item, span, hir_id);
(res, self.handle_raw_ty(path_span, ty))
(res, LoweredTy::from_raw(self, path_span, ty))
}
}
}

View file

@ -353,14 +353,22 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
}
}
/// Represents a user-provided type in the raw form (never normalized).
/// The `ty` representation of a user-provided type. Depending on the use-site
/// we want to either use the unnormalized or the normalized form of this type.
///
/// This is a bridge between the interface of `AstConv`, which outputs a raw `Ty`,
/// and the API in this module, which expect `Ty` to be fully normalized.
#[derive(Clone, Copy, Debug)]
pub struct RawTy<'tcx> {
pub struct LoweredTy<'tcx> {
/// The unnormalized type provided by the user.
pub raw: Ty<'tcx>,
/// The normalized form of `raw`, stored here for efficiency.
pub normalized: Ty<'tcx>,
}
impl<'tcx> LoweredTy<'tcx> {
pub fn from_raw(fcx: &FnCtxt<'_, 'tcx>, span: Span, raw: Ty<'tcx>) -> LoweredTy<'tcx> {
LoweredTy { raw, normalized: fcx.normalize(span, raw) }
}
}

View file

@ -49,7 +49,7 @@ use crate::check::check_fn;
use crate::coercion::DynamicCoerceMany;
use crate::diverges::Diverges;
use crate::expectation::Expectation;
use crate::fn_ctxt::RawTy;
use crate::fn_ctxt::LoweredTy;
use crate::gather_locals::GatherLocalsVisitor;
use rustc_data_structures::unord::UnordSet;
use rustc_errors::{struct_span_code_err, ErrorGuaranteed};

View file

@ -746,11 +746,13 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
let (xform_self_ty, xform_ret_ty) = self.xform_self_ty(item, impl_ty, impl_args);
debug!("xform_self_ty: {:?}, xform_ret_ty: {:?}", xform_self_ty, xform_ret_ty);
// We can't use normalize_associated_types_in as it will pollute the
// We can't use `FnCtxt::normalize` as it will pollute the
// fcx's fulfillment context after this probe is over.
//
// Note: we only normalize `xform_self_ty` here since the normalization
// of the return type can lead to inference results that prohibit
// valid candidates from being found, see issue #85671
//
// FIXME Postponing the normalization of the return type likely only hides a deeper bug,
// which might be caused by the `param_env` itself. The clauses of the `param_env`
// maybe shouldn't include `Param`s, but rather fresh variables or be canonicalized,

View file

@ -1,5 +1,5 @@
use crate::gather_locals::DeclOrigin;
use crate::{errors, FnCtxt, RawTy};
use crate::{errors, FnCtxt, LoweredTy};
use rustc_ast as ast;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{
@ -891,7 +891,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&self,
pat: &Pat<'tcx>,
qpath: &hir::QPath<'_>,
path_resolution: (Res, Option<RawTy<'tcx>>, &'tcx [hir::PathSegment<'tcx>]),
path_resolution: (Res, Option<LoweredTy<'tcx>>, &'tcx [hir::PathSegment<'tcx>]),
expected: Ty<'tcx>,
ti: TopInfo<'tcx>,
) -> Ty<'tcx> {

View file

@ -284,7 +284,7 @@ impl<T: Idx> BitSet<T> {
not_already
}
fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T> {
pub fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T> {
let (start, end) = inclusive_start_end(range, self.domain_size)?;
let (start_word_index, _) = word_index_and_mask(start);
let (end_word_index, end_mask) = word_index_and_mask(end);
@ -1299,7 +1299,7 @@ impl<T: Idx> SparseBitSet<T> {
}
impl<T: Idx + Ord> SparseBitSet<T> {
fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T> {
pub fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T> {
let mut last_leq = None;
for e in self.iter() {
if range.contains(e) {

View file

@ -294,8 +294,9 @@ impl<T> Trait<T> for X {
);
}
}
(ty::Alias(ty::Opaque, alias), _) | (_, ty::Alias(ty::Opaque, alias))
if alias.def_id.is_local()
(_, ty::Alias(ty::Opaque, opaque_ty))
| (ty::Alias(ty::Opaque, opaque_ty), _) => {
if opaque_ty.def_id.is_local()
&& matches!(
tcx.def_kind(body_owner_def_id),
DefKind::Fn
@ -303,21 +304,74 @@ impl<T> Trait<T> for X {
| DefKind::Const
| DefKind::AssocFn
| DefKind::AssocConst
) =>
{
if tcx.is_type_alias_impl_trait(alias.def_id) {
if !tcx
)
&& tcx.is_type_alias_impl_trait(opaque_ty.def_id)
&& !tcx
.opaque_types_defined_by(body_owner_def_id.expect_local())
.contains(&alias.def_id.expect_local())
.contains(&opaque_ty.def_id.expect_local())
{
let sp = tcx
.def_ident_span(body_owner_def_id)
.unwrap_or_else(|| tcx.def_span(body_owner_def_id));
diag.span_note(
sp,
"\
this item must have the opaque type in its signature \
in order to be able to register hidden types",
"this item must have the opaque type in its signature in order to \
be able to register hidden types",
);
}
// If two if arms can be coerced to a trait object, provide a structured
// suggestion.
let ObligationCauseCode::IfExpression(cause) = cause.code() else {
return;
};
let hir::Node::Block(blk) = self.tcx.hir_node(cause.then_id) else {
return;
};
let Some(then) = blk.expr else {
return;
};
let hir::Node::Block(blk) = self.tcx.hir_node(cause.else_id) else {
return;
};
let Some(else_) = blk.expr else {
return;
};
let expected = match values.found.kind() {
ty::Alias(..) => values.expected,
_ => values.found,
};
let preds = tcx.explicit_item_bounds(opaque_ty.def_id);
for (pred, _span) in preds.skip_binder() {
let ty::ClauseKind::Trait(trait_predicate) = pred.kind().skip_binder()
else {
continue;
};
if trait_predicate.polarity != ty::ImplPolarity::Positive {
continue;
}
let def_id = trait_predicate.def_id();
let mut impl_def_ids = vec![];
tcx.for_each_relevant_impl(def_id, expected, |did| {
impl_def_ids.push(did)
});
if let [_] = &impl_def_ids[..] {
let trait_name = tcx.item_name(def_id);
diag.multipart_suggestion(
format!(
"`{expected}` implements `{trait_name}` so you can box \
both arms and coerce to the trait object \
`Box<dyn {trait_name}>`",
),
vec![
(then.span.shrink_to_lo(), "Box::new(".to_string()),
(
then.span.shrink_to_hi(),
format!(") as Box<dyn {}>", tcx.def_path_str(def_id)),
),
(else_.span.shrink_to_lo(), "Box::new(".to_string()),
(else_.span.shrink_to_hi(), ")".to_string()),
],
MachineApplicable,
);
}
}
@ -330,6 +384,38 @@ impl<T> Trait<T> for X {
);
}
}
(ty::Adt(_, _), ty::Adt(def, args))
if let ObligationCauseCode::IfExpression(cause) = cause.code()
&& let hir::Node::Block(blk) = self.tcx.hir_node(cause.then_id)
&& let Some(then) = blk.expr
&& def.is_box()
&& let boxed_ty = args.type_at(0)
&& let ty::Dynamic(t, _, _) = boxed_ty.kind()
&& let Some(def_id) = t.principal_def_id()
&& let mut impl_def_ids = vec![]
&& let _ =
tcx.for_each_relevant_impl(def_id, values.expected, |did| {
impl_def_ids.push(did)
})
&& let [_] = &impl_def_ids[..] =>
{
// We have divergent if/else arms where the expected value is a type that
// implements the trait of the found boxed trait object.
diag.multipart_suggestion(
format!(
"`{}` implements `{}` so you can box it to coerce to the trait \
object `{}`",
values.expected,
tcx.item_name(def_id),
values.found,
),
vec![
(then.span.shrink_to_lo(), "Box::new(".to_string()),
(then.span.shrink_to_hi(), ")".to_string()),
],
MachineApplicable,
);
}
_ => {}
}
debug!(

View file

@ -416,14 +416,14 @@ pub enum ValidationErrorKind<'tcx> {
PartialPointer,
PtrToUninhabited { ptr_kind: PointerKind, ty: Ty<'tcx> },
PtrToStatic { ptr_kind: PointerKind },
PtrToMut { ptr_kind: PointerKind },
MutableRefInConst,
MutableRefToImmutable,
UnsafeCellInImmutable,
NullFnPtr,
NeverVal,
NullablePtrOutOfRange { range: WrappingRange, max_value: u128 },
PtrOutOfRange { range: WrappingRange, max_value: u128 },
OutOfRange { value: String, range: WrappingRange, max_value: u128 },
UnsafeCell,
UninhabitedVal { ty: Ty<'tcx> },
InvalidEnumTag { value: String },
UninhabitedEnumVariant,

View file

@ -277,6 +277,12 @@ impl From<AllocId> for Pointer {
Pointer::new(alloc_id.into(), Size::ZERO)
}
}
impl From<CtfeProvenance> for Pointer {
#[inline(always)]
fn from(prov: CtfeProvenance) -> Self {
Pointer::new(prov, Size::ZERO)
}
}
impl<Prov> From<Pointer<Prov>> for Pointer<Option<Prov>> {
#[inline(always)]

View file

@ -343,6 +343,15 @@ rustc_queries! {
}
}
query impl_trait_in_assoc_types_defined_by(
key: LocalDefId
) -> &'tcx ty::List<LocalDefId> {
desc {
|tcx| "computing the opaque types defined by `{}`",
tcx.def_path_str(key.to_def_id())
}
}
/// Returns the list of bounds that can be used for
/// `SelectionCandidate::ProjectionCandidate(_)` and
/// `ProjectionTyCandidate::TraitDef`.

View file

@ -530,12 +530,12 @@ impl<'tcx> Instance<'tcx> {
def_id: DefId,
args: ty::GenericArgsRef<'tcx>,
requested_kind: ty::ClosureKind,
) -> Option<Instance<'tcx>> {
) -> Instance<'tcx> {
let actual_kind = args.as_closure().kind();
match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
Ok(true) => Instance::fn_once_adapter_instance(tcx, def_id, args),
_ => Some(Instance::new(def_id, args)),
_ => Instance::new(def_id, args),
}
}
@ -550,7 +550,7 @@ impl<'tcx> Instance<'tcx> {
tcx: TyCtxt<'tcx>,
closure_did: DefId,
args: ty::GenericArgsRef<'tcx>,
) -> Option<Instance<'tcx>> {
) -> Instance<'tcx> {
let fn_once = tcx.require_lang_item(LangItem::FnOnce, None);
let call_once = tcx
.associated_items(fn_once)
@ -564,14 +564,12 @@ impl<'tcx> Instance<'tcx> {
let self_ty = Ty::new_closure(tcx, closure_did, args);
let sig = args.as_closure().sig();
let sig =
tcx.try_normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig).ok()?;
assert_eq!(sig.inputs().len(), 1);
let args = tcx.mk_args_trait(self_ty, [sig.inputs()[0].into()]);
let tupled_inputs_ty = args.as_closure().sig().map_bound(|sig| sig.inputs()[0]);
let tupled_inputs_ty = tcx.instantiate_bound_regions_with_erased(tupled_inputs_ty);
let args = tcx.mk_args_trait(self_ty, [tupled_inputs_ty.into()]);
debug!(?self_ty, ?sig);
Some(Instance { def, args })
debug!(?self_ty, args=?tupled_inputs_ty.tuple_fields());
Instance { def, args }
}
pub fn try_resolve_item_for_coroutine(

View file

@ -804,17 +804,12 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
}
} else {
p!(print_def_path(did, args));
p!(" upvar_tys=(");
if !args.as_coroutine().is_valid() {
p!("unavailable");
} else {
self.comma_sep(args.as_coroutine().upvar_tys().iter())?;
}
p!(")");
if args.as_coroutine().is_valid() {
p!(" ", print(args.as_coroutine().witness()));
}
p!(
" upvar_tys=",
print(args.as_coroutine().tupled_upvars_ty()),
" witness=",
print(args.as_coroutine().witness())
);
}
p!("}}")
@ -868,19 +863,14 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
}
} else {
p!(print_def_path(did, args));
if !args.as_closure().is_valid() {
p!(" closure_args=(unavailable)");
p!(write(" args={}", args.print_as_list()));
} else {
p!(" closure_kind_ty=", print(args.as_closure().kind_ty()));
p!(
" closure_kind_ty=",
print(args.as_closure().kind_ty()),
" closure_sig_as_fn_ptr_ty=",
print(args.as_closure().sig_as_fn_ptr_ty())
print(args.as_closure().sig_as_fn_ptr_ty()),
" upvar_tys=",
print(args.as_closure().tupled_upvars_ty())
);
p!(" upvar_tys=(");
self.comma_sep(args.as_closure().upvar_tys().iter())?;
p!(")");
}
}
p!("}}");
}

View file

@ -242,9 +242,15 @@ pub struct ClosureArgs<'tcx> {
/// Struct returned by `split()`.
pub struct ClosureArgsParts<'tcx> {
/// This is the args of the typeck root.
pub parent_args: &'tcx [GenericArg<'tcx>],
/// Represents the maximum calling capability of the closure.
pub closure_kind_ty: Ty<'tcx>,
/// Captures the closure's signature. This closure signature is "tupled", and
/// thus has a peculiar signature of `extern "rust-call" fn((Args, ...)) -> Ty`.
pub closure_sig_as_fn_ptr_ty: Ty<'tcx>,
/// The upvars captured by the closure. Remains an inference variable
/// until the upvar analysis, which happens late in HIR typeck.
pub tupled_upvars_ty: Ty<'tcx>,
}
@ -277,15 +283,6 @@ impl<'tcx> ClosureArgs<'tcx> {
}
}
/// Returns `true` only if enough of the synthetic types are known to
/// allow using all of the methods on `ClosureArgs` without panicking.
///
/// Used primarily by `ty::print::pretty` to be able to handle closure
/// types that haven't had their synthetic types substituted in.
pub fn is_valid(self) -> bool {
self.args.len() >= 3 && matches!(self.split().tupled_upvars_ty.kind(), Tuple(_))
}
/// Returns the substitutions of the closure's parent.
pub fn parent_args(self) -> &'tcx [GenericArg<'tcx>] {
self.split().parent_args
@ -296,9 +293,9 @@ impl<'tcx> ClosureArgs<'tcx> {
/// empty iterator is returned.
#[inline]
pub fn upvar_tys(self) -> &'tcx List<Ty<'tcx>> {
match self.tupled_upvars_ty().kind() {
match *self.tupled_upvars_ty().kind() {
TyKind::Error(_) => ty::List::empty(),
TyKind::Tuple(..) => self.tupled_upvars_ty().tuple_fields(),
TyKind::Tuple(tys) => tys,
TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
}
@ -337,10 +334,9 @@ impl<'tcx> ClosureArgs<'tcx> {
/// Extracts the signature from the closure.
pub fn sig(self) -> ty::PolyFnSig<'tcx> {
let ty = self.sig_as_fn_ptr_ty();
match ty.kind() {
ty::FnPtr(sig) => *sig,
_ => bug!("closure_sig_as_fn_ptr_ty is not a fn-ptr: {:?}", ty.kind()),
match *self.sig_as_fn_ptr_ty().kind() {
ty::FnPtr(sig) => sig,
ty => bug!("closure_sig_as_fn_ptr_ty is not a fn-ptr: {ty:?}"),
}
}
@ -356,11 +352,17 @@ pub struct CoroutineArgs<'tcx> {
}
pub struct CoroutineArgsParts<'tcx> {
/// This is the args of the typeck root.
pub parent_args: &'tcx [GenericArg<'tcx>],
pub resume_ty: Ty<'tcx>,
pub yield_ty: Ty<'tcx>,
pub return_ty: Ty<'tcx>,
/// The interior type of the coroutine.
/// Represents all types that are stored in locals
/// in the coroutine's body.
pub witness: Ty<'tcx>,
/// The upvars captured by the closure. Remains an inference variable
/// until the upvar analysis, which happens late in HIR typeck.
pub tupled_upvars_ty: Ty<'tcx>,
}
@ -397,15 +399,6 @@ impl<'tcx> CoroutineArgs<'tcx> {
}
}
/// Returns `true` only if enough of the synthetic types are known to
/// allow using all of the methods on `CoroutineArgs` without panicking.
///
/// Used primarily by `ty::print::pretty` to be able to handle coroutine
/// types that haven't had their synthetic types substituted in.
pub fn is_valid(self) -> bool {
self.args.len() >= 5 && matches!(self.split().tupled_upvars_ty.kind(), Tuple(_))
}
/// Returns the substitutions of the coroutine's parent.
pub fn parent_args(self) -> &'tcx [GenericArg<'tcx>] {
self.split().parent_args
@ -425,9 +418,9 @@ impl<'tcx> CoroutineArgs<'tcx> {
/// empty iterator is returned.
#[inline]
pub fn upvar_tys(self) -> &'tcx List<Ty<'tcx>> {
match self.tupled_upvars_ty().kind() {
match *self.tupled_upvars_ty().kind() {
TyKind::Error(_) => ty::List::empty(),
TyKind::Tuple(..) => self.tupled_upvars_ty().tuple_fields(),
TyKind::Tuple(tys) => tys,
TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
}

View file

@ -20,6 +20,10 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
@call(mir_storage_dead, args) => {
Ok(StatementKind::StorageDead(self.parse_local(args[0])?))
},
@call(mir_assume, args) => {
let op = self.parse_operand(args[0])?;
Ok(StatementKind::Intrinsic(Box::new(NonDivergingIntrinsic::Assume(op))))
},
@call(mir_deinit, args) => {
Ok(StatementKind::Deinit(Box::new(self.parse_place(args[0])?)))
},

View file

@ -305,7 +305,10 @@ impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> {
}
impl<'tcx> AnalysisDomain<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
/// There can be many more `MovePathIndex` than there are locals in a MIR body.
/// We use a chunked bitset to avoid paying too high a memory footprint.
type Domain = MaybeReachable<ChunkedBitSet<MovePathIndex>>;
const NAME: &'static str = "maybe_init";
fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
@ -437,6 +440,8 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
}
impl<'tcx> AnalysisDomain<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> {
/// There can be many more `MovePathIndex` than there are locals in a MIR body.
/// We use a chunked bitset to avoid paying too high a memory footprint.
type Domain = ChunkedBitSet<MovePathIndex>;
const NAME: &'static str = "maybe_uninit";
@ -636,6 +641,8 @@ impl<'tcx> GenKillAnalysis<'tcx> for DefinitelyInitializedPlaces<'_, 'tcx> {
}
impl<'tcx> AnalysisDomain<'tcx> for EverInitializedPlaces<'_, 'tcx> {
/// There can be many more `InitIndex` than there are locals in a MIR body.
/// We use a chunked bitset to avoid paying too high a memory footprint.
type Domain = ChunkedBitSet<InitIndex>;
const NAME: &'static str = "ever_init";

View file

@ -1,4 +1,4 @@
use rustc_index::bit_set::{BitSet, ChunkedBitSet};
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::{
self, CallReturnPlaces, Local, Location, Place, StatementKind, TerminatorEdges,
@ -26,14 +26,14 @@ use crate::{Analysis, AnalysisDomain, Backward, GenKill, GenKillAnalysis};
pub struct MaybeLiveLocals;
impl<'tcx> AnalysisDomain<'tcx> for MaybeLiveLocals {
type Domain = ChunkedBitSet<Local>;
type Domain = BitSet<Local>;
type Direction = Backward;
const NAME: &'static str = "liveness";
fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
// bottom = not live
ChunkedBitSet::new_empty(body.local_decls.len())
BitSet::new_empty(body.local_decls.len())
}
fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
@ -233,14 +233,14 @@ impl<'a> MaybeTransitiveLiveLocals<'a> {
}
impl<'a, 'tcx> AnalysisDomain<'tcx> for MaybeTransitiveLiveLocals<'a> {
type Domain = ChunkedBitSet<Local>;
type Domain = BitSet<Local>;
type Direction = Backward;
const NAME: &'static str = "transitive liveness";
fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
// bottom = not live
ChunkedBitSet::new_empty(body.local_decls.len())
BitSet::new_empty(body.local_decls.len())
}
fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {

View file

@ -1,5 +1,5 @@
use crate::framework::{visit_results, ResultsVisitable, ResultsVisitor};
use rustc_index::bit_set::ChunkedBitSet;
use rustc_index::bit_set::BitSet;
use rustc_index::interval::SparseIntervalMatrix;
use rustc_index::Idx;
use rustc_index::IndexVec;
@ -102,7 +102,7 @@ pub fn save_as_intervals<'tcx, N, R>(
) -> SparseIntervalMatrix<N, PointIndex>
where
N: Idx,
R: ResultsVisitable<'tcx, FlowState = ChunkedBitSet<N>>,
R: ResultsVisitable<'tcx, FlowState = BitSet<N>>,
{
let values = SparseIntervalMatrix::new(elements.num_points());
let mut visitor = Visitor { elements, values };
@ -124,7 +124,7 @@ impl<'mir, 'tcx, R, N> ResultsVisitor<'mir, 'tcx, R> for Visitor<'_, N>
where
N: Idx,
{
type FlowState = ChunkedBitSet<N>;
type FlowState = BitSet<N>;
fn visit_statement_after_primary_effect(
&mut self,

View file

@ -12,7 +12,7 @@ use crate::MoveDataParamEnv;
use crate::{Analysis, JoinSemiLattice, ResultsCursor};
use rustc_ast::MetaItem;
use rustc_hir::def_id::DefId;
use rustc_index::bit_set::ChunkedBitSet;
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::MirPass;
use rustc_middle::mir::{self, Body, Local, Location};
use rustc_middle::ty::{self, Ty, TyCtxt};
@ -275,7 +275,7 @@ impl<'tcx> RustcPeekAt<'tcx> for MaybeLiveLocals {
&self,
tcx: TyCtxt<'tcx>,
place: mir::Place<'tcx>,
flow_state: &ChunkedBitSet<Local>,
flow_state: &BitSet<Local>,
call: PeekCall,
) {
info!(?place, "peek_at");

View file

@ -566,11 +566,6 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
cost: &CostChecker<'_, 'tcx>,
depth: usize,
) {
let register_opportunity = |c: Condition| {
debug!(?bb, ?c.target, "register");
self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
};
let term = self.body.basic_blocks[bb].terminator();
let place_to_flood = match term.kind {
// We come from a target, so those are not possible.
@ -592,16 +587,8 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
// Flood the overwritten place, and progress through.
TerminatorKind::Drop { place: destination, .. }
| TerminatorKind::Call { destination, .. } => Some(destination),
// Treat as an `assume(cond == expected)`.
TerminatorKind::Assert { ref cond, expected, .. } => {
if let Some(place) = cond.place()
&& let Some(conditions) = state.try_get(place.as_ref(), self.map)
{
let expected = if expected { ScalarInt::TRUE } else { ScalarInt::FALSE };
conditions.iter_matches(expected).for_each(register_opportunity);
}
None
}
// Ignore, as this can be a no-op at codegen time.
TerminatorKind::Assert { .. } => None,
};
// We can recurse through this terminator.

View file

@ -1,7 +1,7 @@
//! See the docs for [`RenameReturnPlace`].
use rustc_hir::Mutability;
use rustc_index::bit_set::HybridBitSet;
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::visit::{MutVisitor, NonUseContext, PlaceContext, Visitor};
use rustc_middle::mir::{self, BasicBlock, Local, Location};
use rustc_middle::ty::TyCtxt;
@ -123,7 +123,7 @@ fn find_local_assigned_to_return_place(
body: &mut mir::Body<'_>,
) -> Option<Local> {
let mut block = start;
let mut seen = HybridBitSet::new_empty(body.basic_blocks.len());
let mut seen = BitSet::new_empty(body.basic_blocks.len());
// Iterate as long as `block` has exactly one predecessor that we have not yet visited.
while seen.insert(block) {

View file

@ -7,14 +7,10 @@ pub struct RemoveStorageMarkers;
impl<'tcx> MirPass<'tcx> for RemoveStorageMarkers {
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
sess.mir_opt_level() > 0
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
if tcx.sess.emit_lifetime_markers() {
return;
sess.mir_opt_level() > 0 && !sess.emit_lifetime_markers()
}
fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
trace!("Running RemoveStorageMarkers on {:?}", body.source);
for data in body.basic_blocks.as_mut_preserves_cfg() {
data.statements.retain(|statement| match statement.kind {

View file

@ -783,8 +783,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> {
def_id,
args,
ty::ClosureKind::FnOnce,
)
.expect("failed to normalize and resolve closure during codegen");
);
if should_codegen_locally(self.tcx, &instance) {
self.output.push(create_fn_mono_item(self.tcx, instance, span));
}

View file

@ -464,7 +464,10 @@ impl<'tcx> Context for TablesWrapper<'tcx> {
let def_id = def.0.internal(&mut *tables, tcx);
let args_ref = args.internal(&mut *tables, tcx);
let closure_kind = kind.internal(&mut *tables, tcx);
Instance::resolve_closure(tables.tcx, def_id, args_ref, closure_kind).stable(&mut *tables)
Some(
Instance::resolve_closure(tables.tcx, def_id, args_ref, closure_kind)
.stable(&mut *tables),
)
}
fn eval_instance(&self, def: InstanceDef, const_ty: Ty) -> Result<Allocation, Error> {

View file

@ -1028,6 +1028,7 @@ symbols! {
minnumf32,
minnumf64,
mips_target_feature,
mir_assume,
mir_basic_block,
mir_call,
mir_cast_transmute,

View file

@ -8,6 +8,7 @@ pub fn opts() -> TargetOptions {
has_rpath: true,
position_independent_executables: true,
relro_level: RelroLevel::Full,
has_thread_local: true,
default_dwarf_version: 2,
..Default::default()
}

View file

@ -9,6 +9,7 @@ pub fn opts() -> TargetOptions {
crt_static_respected: true,
position_independent_executables: true,
relro_level: RelroLevel::Full,
has_thread_local: true,
abi_return_struct_as_int: true,
default_dwarf_version: 2,
..Default::default()

View file

@ -9,6 +9,7 @@ pub fn opts() -> TargetOptions {
has_rpath: true,
position_independent_executables: true,
relro_level: RelroLevel::Full,
has_thread_local: true,
use_ctors_section: true,
default_dwarf_version: 2,
..Default::default()

View file

@ -1597,6 +1597,7 @@ supported_targets! {
("x86_64-unikraft-linux-musl", x86_64_unikraft_linux_musl),
("riscv32i-unknown-none-elf", riscv32i_unknown_none_elf),
("riscv32im-risc0-zkvm-elf", riscv32im_risc0_zkvm_elf),
("riscv32im-unknown-none-elf", riscv32im_unknown_none_elf),
("riscv32imc-unknown-none-elf", riscv32imc_unknown_none_elf),
("riscv32imc-esp-espidf", riscv32imc_esp_espidf),

View file

@ -0,0 +1,36 @@
use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel};
use crate::spec::{Target, TargetOptions};
pub fn target() -> Target {
Target {
data_layout: "e-m:e-p:32:32-i64:64-n32-S128".into(),
llvm_target: "riscv32".into(),
pointer_width: 32,
arch: "riscv32".into(),
options: TargetOptions {
os: "zkvm".into(),
vendor: "risc0".into(),
linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
cpu: "generic-rv32".into(),
// Some crates (*cough* crossbeam) assume you have 64 bit
// atomics if the target name is not in a hardcoded list.
// Since zkvm is singlethreaded and all operations are
// atomic, I guess we can just say we support 64-bit
// atomics.
max_atomic_width: Some(64),
atomic_cas: true,
features: "+m".into(),
executables: true,
panic_strategy: PanicStrategy::Abort,
relocation_model: RelocModel::Static,
emit_debug_gdb_scripts: false,
eh_frame_header: false,
singlethread: true,
..Default::default()
},
}
}

View file

@ -1,7 +1,7 @@
//! Computes a normalizes-to (projection) goal for inherent associated types,
//! `#![feature(lazy_type_alias)]` and `#![feature(type_alias_impl_trait)]`.
//!
//! Since a weak alias is not ambiguous, this just computes the `type_of` of
//! Since a weak alias is never ambiguous, this just computes the `type_of` of
//! the alias and registers the where-clauses of the type alias.
use rustc_middle::traits::solve::{Certainty, Goal, GoalSource, QueryResult};
use rustc_middle::ty;

View file

@ -3152,6 +3152,10 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
],
Applicability::MachineApplicable,
);
} else {
// FIXME: we may suggest array::repeat instead
err.help("consider using `core::array::from_fn` to initialize the array");
err.help("see https://doc.rust-lang.org/stable/std/array/fn.from_fn.html# for more information");
}
if self.tcx.sess.is_nightly_build()

View file

@ -232,32 +232,12 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>(
Ok::<_, NoSolution>(())
})?,
ty::Closure(_, args) => {
if !args.as_closure().is_valid() {
// By the time this code runs, all type variables ought to
// be fully resolved.
tcx.dcx().span_delayed_bug(
span,
format!("upvar_tys for closure not found. Expected capture information for closure {ty}",),
);
return Err(NoSolution);
}
rustc_data_structures::stack::ensure_sufficient_stack(|| {
ty::Closure(_, args) => rustc_data_structures::stack::ensure_sufficient_stack(|| {
for ty in args.as_closure().upvar_tys() {
dtorck_constraint_for_ty_inner(
tcx,
param_env,
span,
depth + 1,
ty,
constraints,
)?;
dtorck_constraint_for_ty_inner(tcx, param_env, span, depth + 1, ty, constraints)?;
}
Ok::<_, NoSolution>(())
})?
}
})?,
ty::Coroutine(_, args) => {
// rust-lang/rust#49918: types can be constructed, stored
@ -283,15 +263,6 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>(
// derived from lifetimes attached to the upvars and resume
// argument, and we *do* incorporate those here.
let args = args.as_coroutine();
if !args.is_valid() {
// By the time this code runs, all type variables ought to
// be fully resolved.
tcx.dcx().span_delayed_bug(
span,
format!("upvar_tys for coroutine not found. Expected capture information for coroutine {ty}",),
);
return Err(NoSolution);
}
// While we conservatively assume that all coroutines require drop
// to avoid query cycles during MIR building, we can check the actual

View file

@ -141,6 +141,13 @@ where
infcx: &InferCtxt<'tcx>,
span: Span,
) -> Result<TypeOpOutput<'tcx, Self>, ErrorGuaranteed> {
// In the new trait solver, query type ops are performed locally. This
// is because query type ops currently use the old canonicalizer, and
// that doesn't preserve things like opaques which have been registered
// during MIR typeck. Even after the old canonicalizer is gone, it's
// probably worthwhile just keeping this run-locally logic, since we
// probably don't gain much from caching here given the new solver does
// caching internally.
if infcx.next_trait_solver() {
return Ok(scrape_region_constraints(
infcx,

View file

@ -265,7 +265,12 @@ fn resolve_associated_item<'tcx>(
match *rcvr_args.type_at(0).kind() {
ty::Closure(closure_def_id, args) => {
let trait_closure_kind = tcx.fn_trait_kind_from_def_id(trait_id).unwrap();
Instance::resolve_closure(tcx, closure_def_id, args, trait_closure_kind)
Some(Instance::resolve_closure(
tcx,
closure_def_id,
args,
trait_closure_kind,
))
}
ty::FnDef(..) | ty::FnPtr(..) => Some(Instance {
def: ty::InstanceDef::FnPtrShim(trait_item_id, rcvr_args.type_at(0)),

View file

@ -118,36 +118,21 @@ impl<'tcx> OpaqueTypeCollector<'tcx> {
}
TaitInBodyFinder { collector: self }.visit_expr(body);
}
}
impl<'tcx> super::sig_types::SpannedTypeVisitor<'tcx> for OpaqueTypeCollector<'tcx> {
#[instrument(skip(self), ret, level = "trace")]
fn visit(&mut self, span: Span, value: impl TypeVisitable<TyCtxt<'tcx>>) -> ControlFlow<!> {
self.visit_spanned(span, value);
ControlFlow::Continue(())
}
}
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> {
#[instrument(skip(self), ret, level = "trace")]
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<!> {
t.super_visit_with(self)?;
match t.kind() {
ty::Alias(ty::Opaque, alias_ty) if alias_ty.def_id.is_local() => {
fn visit_opaque_ty(&mut self, alias_ty: &ty::AliasTy<'tcx>) {
if !self.seen.insert(alias_ty.def_id.expect_local()) {
return ControlFlow::Continue(());
return;
}
// TAITs outside their defining scopes are ignored.
let origin = self.tcx.opaque_type_origin(alias_ty.def_id.expect_local());
trace!(?origin);
match origin {
rustc_hir::OpaqueTyOrigin::FnReturn(_)
| rustc_hir::OpaqueTyOrigin::AsyncFn(_) => {}
rustc_hir::OpaqueTyOrigin::FnReturn(_) | rustc_hir::OpaqueTyOrigin::AsyncFn(_) => {}
rustc_hir::OpaqueTyOrigin::TyAlias { in_assoc_ty } => {
if !in_assoc_ty {
if !self.check_tait_defining_scope(alias_ty.def_id.expect_local()) {
return ControlFlow::Continue(());
return;
}
}
}
@ -159,10 +144,10 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> {
// Only check that the parent generics of the TAIT/RPIT are unique.
// the args owned by the opaque are going to always be duplicate
// lifetime params for RPITs, and empty for TAITs.
match self.tcx.uses_unique_generic_params(
&alias_ty.args[..parent_count],
CheckRegions::FromFunction,
) {
match self
.tcx
.uses_unique_generic_params(&alias_ty.args[..parent_count], CheckRegions::FromFunction)
{
Ok(()) => {
// FIXME: implement higher kinded lifetime bounds on nested opaque types. They are not
// supported at all, so this is sound to do, but once we want to support them, you'll
@ -196,6 +181,24 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> {
}
}
}
}
impl<'tcx> super::sig_types::SpannedTypeVisitor<'tcx> for OpaqueTypeCollector<'tcx> {
#[instrument(skip(self), ret, level = "trace")]
fn visit(&mut self, span: Span, value: impl TypeVisitable<TyCtxt<'tcx>>) -> ControlFlow<!> {
self.visit_spanned(span, value);
ControlFlow::Continue(())
}
}
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> {
#[instrument(skip(self), ret, level = "trace")]
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<!> {
t.super_visit_with(self)?;
match t.kind() {
ty::Alias(ty::Opaque, alias_ty) if alias_ty.def_id.is_local() => {
self.visit_opaque_ty(alias_ty);
}
ty::Alias(ty::Weak, alias_ty) if alias_ty.def_id.is_local() => {
self.tcx
.type_of(alias_ty.def_id)
@ -272,6 +275,91 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> {
}
}
struct ImplTraitInAssocTypeCollector<'tcx>(OpaqueTypeCollector<'tcx>);
impl<'tcx> super::sig_types::SpannedTypeVisitor<'tcx> for ImplTraitInAssocTypeCollector<'tcx> {
#[instrument(skip(self), ret, level = "trace")]
fn visit(&mut self, span: Span, value: impl TypeVisitable<TyCtxt<'tcx>>) -> ControlFlow<!> {
let old = self.0.span;
self.0.span = Some(span);
value.visit_with(self);
self.0.span = old;
ControlFlow::Continue(())
}
}
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ImplTraitInAssocTypeCollector<'tcx> {
#[instrument(skip(self), ret, level = "trace")]
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<!> {
t.super_visit_with(self)?;
match t.kind() {
ty::Alias(ty::Opaque, alias_ty) if alias_ty.def_id.is_local() => {
self.0.visit_opaque_ty(alias_ty);
}
ty::Alias(ty::Projection, alias_ty) => {
// This avoids having to do normalization of `Self::AssocTy` by only
// supporting the case of a method defining opaque types from assoc types
// in the same impl block.
let parent_trait_ref = self
.0
.parent_trait_ref()
.expect("impl trait in assoc type collector used on non-assoc item");
// If the trait ref of the associated item and the impl differs,
// then we can't use the impl's identity substitutions below, so
// just skip.
if alias_ty.trait_ref(self.0.tcx) == parent_trait_ref {
let parent = self.0.parent().expect("we should have a parent here");
for &assoc in self.0.tcx.associated_items(parent).in_definition_order() {
trace!(?assoc);
if assoc.trait_item_def_id != Some(alias_ty.def_id) {
continue;
}
// If the type is further specializable, then the type_of
// is not actually correct below.
if !assoc.defaultness(self.0.tcx).is_final() {
continue;
}
let impl_args = alias_ty.args.rebase_onto(
self.0.tcx,
parent_trait_ref.def_id,
ty::GenericArgs::identity_for_item(self.0.tcx, parent),
);
if check_args_compatible(self.0.tcx, assoc, impl_args) {
return self
.0
.tcx
.type_of(assoc.def_id)
.instantiate(self.0.tcx, impl_args)
.visit_with(self);
} else {
self.0.tcx.dcx().span_delayed_bug(
self.0.tcx.def_span(assoc.def_id),
"item had incorrect args",
);
}
}
}
}
_ => trace!(kind=?t.kind()),
}
ControlFlow::Continue(())
}
}
fn impl_trait_in_assoc_types_defined_by<'tcx>(
tcx: TyCtxt<'tcx>,
item: LocalDefId,
) -> &'tcx ty::List<LocalDefId> {
let mut collector = ImplTraitInAssocTypeCollector(OpaqueTypeCollector::new(tcx, item));
super::sig_types::walk_types(tcx, item, &mut collector);
tcx.mk_local_def_ids(&collector.0.opaques)
}
fn opaque_types_defined_by<'tcx>(
tcx: TyCtxt<'tcx>,
item: LocalDefId,
@ -321,5 +409,6 @@ fn opaque_types_defined_by<'tcx>(
}
pub(super) fn provide(providers: &mut Providers) {
*providers = Providers { opaque_types_defined_by, ..*providers };
*providers =
Providers { opaque_types_defined_by, impl_trait_in_assoc_types_defined_by, ..*providers };
}

View file

@ -3,7 +3,7 @@
#![stable(feature = "alloc_module", since = "1.28.0")]
#[cfg(not(test))]
use core::intrinsics;
use core::hint;
#[cfg(not(test))]
use core::ptr::{self, NonNull};
@ -208,7 +208,7 @@ impl Global {
let new_size = new_layout.size();
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
intrinsics::assume(new_size >= old_layout.size());
hint::assert_unchecked(new_size >= old_layout.size());
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
@ -299,7 +299,7 @@ unsafe impl Allocator for Global {
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
new_size if old_layout.align() == new_layout.align() => unsafe {
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
intrinsics::assume(new_size <= old_layout.size());
hint::assert_unchecked(new_size <= old_layout.size());
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;

View file

@ -129,6 +129,7 @@
#![feature(fmt_internals)]
#![feature(fn_traits)]
#![feature(hasher_prefixfree_extras)]
#![feature(hint_assert_unchecked)]
#![feature(inline_const)]
#![feature(inplace_iteration)]
#![feature(iter_advance_by)]

View file

@ -2,7 +2,7 @@
use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
use core::hint;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ptr::{self, NonNull, Unique};
use core::slice;
@ -325,7 +325,7 @@ impl<T, A: Allocator> RawVec<T, A> {
}
unsafe {
// Inform the optimizer that the reservation has succeeded or wasn't needed
core::intrinsics::assume(!self.needs_to_grow(len, additional));
hint::assert_unchecked(!self.needs_to_grow(len, additional));
}
Ok(())
}
@ -363,7 +363,7 @@ impl<T, A: Allocator> RawVec<T, A> {
}
unsafe {
// Inform the optimizer that the reservation has succeeded or wasn't needed
core::intrinsics::assume(!self.needs_to_grow(len, additional));
hint::assert_unchecked(!self.needs_to_grow(len, additional));
}
Ok(())
}
@ -514,7 +514,7 @@ where
debug_assert_eq!(old_layout.align(), new_layout.align());
unsafe {
// The allocator checks for alignment equality
intrinsics::assume(old_layout.align() == new_layout.align());
hint::assert_unchecked(old_layout.align() == new_layout.align());
alloc.grow(ptr, old_layout, new_layout)
}
} else {

View file

@ -252,6 +252,7 @@ use core::cell::Cell;
use core::cmp::Ordering;
use core::fmt;
use core::hash::{Hash, Hasher};
use core::hint;
use core::intrinsics::abort;
#[cfg(not(no_global_oom_handling))]
use core::iter;
@ -1885,10 +1886,10 @@ impl<T: ?Sized> Rc<T> {
// Initialize the RcBox
let inner = mem_to_rcbox(ptr.as_non_null_ptr().as_ptr());
unsafe {
debug_assert_eq!(Layout::for_value(&*inner), layout);
debug_assert_eq!(Layout::for_value_raw(inner), layout);
ptr::write(&mut (*inner).strong, Cell::new(1));
ptr::write(&mut (*inner).weak, Cell::new(1));
ptr::addr_of_mut!((*inner).strong).write(Cell::new(1));
ptr::addr_of_mut!((*inner).weak).write(Cell::new(1));
}
Ok(inner)
@ -1902,7 +1903,7 @@ impl<T: ?Sized, A: Allocator> Rc<T, A> {
// Allocate for the `RcBox<T>` using the given value.
unsafe {
Rc::<T>::allocate_for_layout(
Layout::for_value(&*ptr),
Layout::for_value_raw(ptr),
|layout| alloc.allocate(layout),
|mem| mem.with_metadata_of(ptr as *const RcBox<T>),
)
@ -1918,7 +1919,7 @@ impl<T: ?Sized, A: Allocator> Rc<T, A> {
// Copy value as bytes
ptr::copy_nonoverlapping(
&*src as *const T as *const u8,
&mut (*ptr).value as *mut _ as *mut u8,
ptr::addr_of_mut!((*ptr).value) as *mut u8,
value_size,
);
@ -1952,7 +1953,11 @@ impl<T> Rc<[T]> {
unsafe fn copy_from_slice(v: &[T]) -> Rc<[T]> {
unsafe {
let ptr = Self::allocate_for_slice(v.len());
ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).value as *mut [T] as *mut T, v.len());
ptr::copy_nonoverlapping(
v.as_ptr(),
ptr::addr_of_mut!((*ptr).value) as *mut T,
v.len(),
);
Self::from_ptr(ptr)
}
}
@ -1987,10 +1992,10 @@ impl<T> Rc<[T]> {
let ptr = Self::allocate_for_slice(len);
let mem = ptr as *mut _ as *mut u8;
let layout = Layout::for_value(&*ptr);
let layout = Layout::for_value_raw(ptr);
// Pointer to first element
let elems = &mut (*ptr).value as *mut [T] as *mut T;
let elems = ptr::addr_of_mut!((*ptr).value) as *mut T;
let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
@ -2096,7 +2101,8 @@ unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Rc<T, A> {
self.inner().dec_weak();
if self.inner().weak() == 0 {
self.alloc.deallocate(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()));
self.alloc
.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()));
}
}
}
@ -2524,7 +2530,7 @@ impl<T, A: Allocator> From<Vec<T, A>> for Rc<[T], A> {
let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
ptr::copy_nonoverlapping(vec_ptr, &mut (*rc_ptr).value as *mut [T] as *mut T, len);
ptr::copy_nonoverlapping(vec_ptr, ptr::addr_of_mut!((*rc_ptr).value) as *mut T, len);
// Create a `Vec<T, &A>` with length 0, to deallocate the buffer
// without dropping its contents or the allocator
@ -3268,7 +3274,7 @@ trait RcInnerPtr {
// SAFETY: The reference count will never be zero when this is
// called.
unsafe {
core::intrinsics::assume(strong != 0);
hint::assert_unchecked(strong != 0);
}
let strong = strong.wrapping_add(1);
@ -3301,7 +3307,7 @@ trait RcInnerPtr {
// SAFETY: The reference count will never be zero when this is
// called.
unsafe {
core::intrinsics::assume(weak != 0);
hint::assert_unchecked(weak != 0);
}
let weak = weak.wrapping_add(1);
@ -3514,7 +3520,7 @@ unsafe impl<#[may_dangle] T> Drop for UniqueRc<T> {
self.ptr.as_ref().dec_weak();
if self.ptr.as_ref().weak() == 0 {
Global.deallocate(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()));
Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()));
}
}
}

View file

@ -1828,11 +1828,11 @@ impl<T: ?Sized> Arc<T> {
mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> *mut ArcInner<T> {
let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);
debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
unsafe {
ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
ptr::addr_of_mut!((*inner).strong).write(atomic::AtomicUsize::new(1));
ptr::addr_of_mut!((*inner).weak).write(atomic::AtomicUsize::new(1));
}
inner
@ -1847,7 +1847,7 @@ impl<T: ?Sized, A: Allocator> Arc<T, A> {
// Allocate for the `ArcInner<T>` using the given value.
unsafe {
Arc::allocate_for_layout(
Layout::for_value(&*ptr),
Layout::for_value_raw(ptr),
|layout| alloc.allocate(layout),
|mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
)
@ -1863,7 +1863,7 @@ impl<T: ?Sized, A: Allocator> Arc<T, A> {
// Copy value as bytes
ptr::copy_nonoverlapping(
&*src as *const T as *const u8,
&mut (*ptr).data as *mut _ as *mut u8,
ptr::addr_of_mut!((*ptr).data) as *mut u8,
value_size,
);
@ -1898,7 +1898,7 @@ impl<T> Arc<[T]> {
unsafe {
let ptr = Self::allocate_for_slice(v.len());
ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).data as *mut [T] as *mut T, v.len());
ptr::copy_nonoverlapping(v.as_ptr(), ptr::addr_of_mut!((*ptr).data) as *mut T, v.len());
Self::from_ptr(ptr)
}
@ -1934,10 +1934,10 @@ impl<T> Arc<[T]> {
let ptr = Self::allocate_for_slice(len);
let mem = ptr as *mut _ as *mut u8;
let layout = Layout::for_value(&*ptr);
let layout = Layout::for_value_raw(ptr);
// Pointer to first element
let elems = &mut (*ptr).data as *mut [T] as *mut T;
let elems = ptr::addr_of_mut!((*ptr).data) as *mut T;
let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
@ -3380,7 +3380,7 @@ impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
ptr::copy_nonoverlapping(vec_ptr, &mut (*rc_ptr).data as *mut [T] as *mut T, len);
ptr::copy_nonoverlapping(vec_ptr, ptr::addr_of_mut!((*rc_ptr).data) as *mut T, len);
// Create a `Vec<T, &A>` with length 0, to deallocate the buffer
// without dropping its contents or the allocator

View file

@ -1996,7 +1996,7 @@ impl<T, A: Allocator> Vec<T, A> {
} else {
unsafe {
self.len -= 1;
core::intrinsics::assume(self.len < self.capacity());
core::hint::assert_unchecked(self.len < self.capacity());
Some(ptr::read(self.as_ptr().add(self.len())))
}
}

View file

@ -110,7 +110,7 @@ use crate::ptr;
/// ```rust,ignore (unsound and has placeholders)
/// drop(Box::new(42));
/// let number_of_heap_allocs = /* call private allocator API */;
/// unsafe { std::intrinsics::assume(number_of_heap_allocs > 0); }
/// unsafe { std::hint::assert_unchecked(number_of_heap_allocs > 0); }
/// ```
///
/// Note that the optimizations mentioned above are not the only

View file

@ -357,6 +357,8 @@ define!("mir_unwind_resume",
define!("mir_storage_live", fn StorageLive<T>(local: T));
define!("mir_storage_dead", fn StorageDead<T>(local: T));
#[cfg(not(bootstrap))]
define!("mir_assume", fn Assume(operand: bool));
define!("mir_deinit", fn Deinit<T>(place: T));
define!("mir_checked", fn Checked<T>(binop: T) -> (T, bool));
define!("mir_len", fn Len<T>(place: T) -> usize);

View file

@ -132,6 +132,7 @@
#![feature(const_fmt_arguments_new)]
#![feature(const_hash)]
#![feature(const_heap)]
#![feature(const_hint_assert_unchecked)]
#![feature(const_index_range_slice_index)]
#![feature(const_int_unchecked_arith)]
#![feature(const_intrinsic_forget)]

View file

@ -3,6 +3,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use crate::ascii;
use crate::hint;
use crate::intrinsics;
use crate::mem;
use crate::ops::{Add, Mul, Sub};

View file

@ -1,6 +1,9 @@
//! Definitions of integer that is known not to equal zero.
use crate::cmp::Ordering;
use crate::fmt;
use crate::hash::{Hash, Hasher};
use crate::marker::StructuralPartialEq;
use crate::ops::{BitOr, BitOrAssign, Div, Neg, Rem};
use crate::str::FromStr;
@ -31,13 +34,6 @@ pub trait ZeroablePrimitive: Sized + Copy + private::Sealed {
type NonZero;
}
#[unstable(
feature = "nonzero_internals",
reason = "implementation detail which may disappear or be replaced at any time",
issue = "none"
)]
pub(crate) type NonZero<T> = <T as ZeroablePrimitive>::NonZero;
macro_rules! impl_zeroable_primitive {
($NonZero:ident ( $primitive:ty )) => {
#[unstable(
@ -71,6 +67,13 @@ impl_zeroable_primitive!(NonZeroI64(i64));
impl_zeroable_primitive!(NonZeroI128(i128));
impl_zeroable_primitive!(NonZeroIsize(isize));
#[unstable(
feature = "nonzero_internals",
reason = "implementation detail which may disappear or be replaced at any time",
issue = "none"
)]
pub(crate) type NonZero<T> = <T as ZeroablePrimitive>::NonZero;
macro_rules! impl_nonzero_fmt {
( #[$stability: meta] ( $( $Trait: ident ),+ ) for $Ty: ident ) => {
$(
@ -128,7 +131,7 @@ macro_rules! nonzero_integer {
///
/// [null pointer optimization]: crate::option#representation
#[$stability]
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[derive(Copy, Eq)]
#[repr(transparent)]
#[rustc_layout_scalar_valid_range_start(1)]
#[rustc_nonnull_optimization_guaranteed]
@ -317,7 +320,7 @@ macro_rules! nonzero_integer {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn checked_mul(self, other: $Ty) -> Option<$Ty> {
pub const fn checked_mul(self, other: Self) -> Option<Self> {
if let Some(result) = self.get().checked_mul(other.get()) {
// SAFETY:
// - `checked_mul` returns `None` on overflow
@ -326,7 +329,7 @@ macro_rules! nonzero_integer {
// of the sides to be zero
//
// So the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
Some(unsafe { Self::new_unchecked(result) })
} else {
None
}
@ -356,7 +359,7 @@ macro_rules! nonzero_integer {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn saturating_mul(self, other: $Ty) -> $Ty {
pub const fn saturating_mul(self, other: Self) -> Self {
// SAFETY:
// - `saturating_mul` returns `u*::MAX`/`i*::MAX`/`i*::MIN` on overflow/underflow,
// all of which are non-zero
@ -365,7 +368,7 @@ macro_rules! nonzero_integer {
// of the sides to be zero
//
// So the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_mul(other.get())) }
unsafe { Self::new_unchecked(self.get().saturating_mul(other.get())) }
}
/// Multiplies two non-zero integers together,
@ -403,9 +406,9 @@ macro_rules! nonzero_integer {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const unsafe fn unchecked_mul(self, other: $Ty) -> $Ty {
pub const unsafe fn unchecked_mul(self, other: Self) -> Self {
// SAFETY: The caller ensures there is no overflow.
unsafe { $Ty::new_unchecked(self.get().unchecked_mul(other.get())) }
unsafe { Self::new_unchecked(self.get().unchecked_mul(other.get())) }
}
/// Raises non-zero value to an integer power.
@ -433,7 +436,7 @@ macro_rules! nonzero_integer {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn checked_pow(self, other: u32) -> Option<$Ty> {
pub const fn checked_pow(self, other: u32) -> Option<Self> {
if let Some(result) = self.get().checked_pow(other) {
// SAFETY:
// - `checked_pow` returns `None` on overflow/underflow
@ -442,7 +445,7 @@ macro_rules! nonzero_integer {
// for base to be zero
//
// So the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
Some(unsafe { Self::new_unchecked(result) })
} else {
None
}
@ -481,7 +484,7 @@ macro_rules! nonzero_integer {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn saturating_pow(self, other: u32) -> $Ty {
pub const fn saturating_pow(self, other: u32) -> Self {
// SAFETY:
// - `saturating_pow` returns `u*::MAX`/`i*::MAX`/`i*::MIN` on overflow/underflow,
// all of which are non-zero
@ -490,7 +493,97 @@ macro_rules! nonzero_integer {
// for base to be zero
//
// So the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_pow(other)) }
unsafe { Self::new_unchecked(self.get().saturating_pow(other)) }
}
}
#[$stability]
impl Clone for $Ty {
#[inline]
fn clone(&self) -> Self {
// SAFETY: The contained value is non-zero.
unsafe { Self(self.0) }
}
}
#[$stability]
impl PartialEq for $Ty {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
#[inline]
fn ne(&self, other: &Self) -> bool {
self.0 != other.0
}
}
#[unstable(feature = "structural_match", issue = "31434")]
impl StructuralPartialEq for $Ty {}
#[$stability]
impl PartialOrd for $Ty {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.0.partial_cmp(&other.0)
}
#[inline]
fn lt(&self, other: &Self) -> bool {
self.0 < other.0
}
#[inline]
fn le(&self, other: &Self) -> bool {
self.0 <= other.0
}
#[inline]
fn gt(&self, other: &Self) -> bool {
self.0 > other.0
}
#[inline]
fn ge(&self, other: &Self) -> bool {
self.0 >= other.0
}
}
#[$stability]
impl Ord for $Ty {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.0.cmp(&other.0)
}
#[inline]
fn max(self, other: Self) -> Self {
// SAFETY: The maximum of two non-zero values is still non-zero.
unsafe { Self(self.0.max(other.0)) }
}
#[inline]
fn min(self, other: Self) -> Self {
// SAFETY: The minimum of two non-zero values is still non-zero.
unsafe { Self(self.0.min(other.0)) }
}
#[inline]
fn clamp(self, min: Self, max: Self) -> Self {
// SAFETY: A non-zero value clamped between two non-zero values is still non-zero.
unsafe { Self(self.0.clamp(min.0, max.0)) }
}
}
#[$stability]
impl Hash for $Ty {
#[inline]
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
self.0.hash(state)
}
}
@ -508,29 +601,32 @@ macro_rules! nonzero_integer {
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
impl BitOr for $Ty {
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self::Output {
// SAFETY: since `self` and `rhs` are both nonzero, the
// result of the bitwise-or will be nonzero.
unsafe { $Ty::new_unchecked(self.get() | rhs.get()) }
unsafe { Self::new_unchecked(self.get() | rhs.get()) }
}
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
impl BitOr<$Int> for $Ty {
type Output = Self;
#[inline]
fn bitor(self, rhs: $Int) -> Self::Output {
// SAFETY: since `self` is nonzero, the result of the
// bitwise-or will be nonzero regardless of the value of
// `rhs`.
unsafe { $Ty::new_unchecked(self.get() | rhs) }
unsafe { Self::new_unchecked(self.get() | rhs) }
}
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
impl BitOr<$Ty> for $Int {
type Output = $Ty;
#[inline]
fn bitor(self, rhs: $Ty) -> Self::Output {
// SAFETY: since `rhs` is nonzero, the result of the
@ -603,6 +699,7 @@ macro_rules! nonzero_integer_signedness_dependent_impls {
#[stable(feature = "nonzero_div", since = "1.51.0")]
impl Div<$Ty> for $Int {
type Output = $Int;
/// This operation rounds towards zero,
/// truncating any fractional part of the exact result, and cannot panic.
#[inline]
@ -616,6 +713,7 @@ macro_rules! nonzero_integer_signedness_dependent_impls {
#[stable(feature = "nonzero_div", since = "1.51.0")]
impl Rem<$Ty> for $Int {
type Output = $Int;
/// This operation satisfies `n % d == n - (n / d) * d`, and cannot panic.
#[inline]
fn rem(self, other: $Ty) -> $Int {
@ -630,12 +728,12 @@ macro_rules! nonzero_integer_signedness_dependent_impls {
($Ty:ident signed $Int:ty) => {
#[stable(feature = "signed_nonzero_neg", since = "1.71.0")]
impl Neg for $Ty {
type Output = $Ty;
type Output = Self;
#[inline]
fn neg(self) -> $Ty {
fn neg(self) -> Self {
// SAFETY: negation of nonzero cannot yield zero values.
unsafe { $Ty::new_unchecked(self.get().neg()) }
unsafe { Self::new_unchecked(self.get().neg()) }
}
}
@ -703,7 +801,7 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn checked_add(self, other: $Int) -> Option<$Ty> {
pub const fn checked_add(self, other: $Int) -> Option<Self> {
if let Some(result) = self.get().checked_add(other) {
// SAFETY:
// - `checked_add` returns `None` on overflow
@ -712,7 +810,7 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
// sides to be zero
//
// So the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
Some(unsafe { Self::new_unchecked(result) })
} else {
None
}
@ -742,7 +840,7 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn saturating_add(self, other: $Int) -> $Ty {
pub const fn saturating_add(self, other: $Int) -> Self {
// SAFETY:
// - `saturating_add` returns `u*::MAX` on overflow, which is non-zero
// - `self` is non-zero
@ -750,7 +848,7 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
// sides to be zero
//
// So the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_add(other)) }
unsafe { Self::new_unchecked(self.get().saturating_add(other)) }
}
/// Adds an unsigned integer to a non-zero value,
@ -779,9 +877,9 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const unsafe fn unchecked_add(self, other: $Int) -> $Ty {
pub const unsafe fn unchecked_add(self, other: $Int) -> Self {
// SAFETY: The caller ensures there is no overflow.
unsafe { $Ty::new_unchecked(self.get().unchecked_add(other)) }
unsafe { Self::new_unchecked(self.get().unchecked_add(other)) }
}
/// Returns the smallest power of two greater than or equal to n.
@ -812,11 +910,11 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn checked_next_power_of_two(self) -> Option<$Ty> {
pub const fn checked_next_power_of_two(self) -> Option<Self> {
if let Some(nz) = self.get().checked_next_power_of_two() {
// SAFETY: The next power of two is positive
// and overflow is checked.
Some(unsafe { $Ty::new_unchecked(nz) })
Some(unsafe { Self::new_unchecked(nz) })
} else {
None
}
@ -902,9 +1000,9 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
pub const fn midpoint(self, rhs: Self) -> Self {
// SAFETY: The only way to get `0` with midpoint is to have two opposite or
// near opposite numbers: (-5, 5), (0, 1), (0, 0) which is impossible because
// of the unsignedness of this number and also because $Ty is guaranteed to
// of the unsignedness of this number and also because `Self` is guaranteed to
// never being 0.
unsafe { $Ty::new_unchecked(self.get().midpoint(rhs.get())) }
unsafe { Self::new_unchecked(self.get().midpoint(rhs.get())) }
}
/// Returns `true` if and only if `self == (1 << k)` for some `k`.
@ -1000,9 +1098,9 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn abs(self) -> $Ty {
pub const fn abs(self) -> Self {
// SAFETY: This cannot overflow to zero.
unsafe { $Ty::new_unchecked(self.get().abs()) }
unsafe { Self::new_unchecked(self.get().abs()) }
}
/// Checked absolute value.
@ -1031,10 +1129,10 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn checked_abs(self) -> Option<$Ty> {
pub const fn checked_abs(self) -> Option<Self> {
if let Some(nz) = self.get().checked_abs() {
// SAFETY: absolute value of nonzero cannot yield zero values.
Some(unsafe { $Ty::new_unchecked(nz) })
Some(unsafe { Self::new_unchecked(nz) })
} else {
None
}
@ -1066,11 +1164,11 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn overflowing_abs(self) -> ($Ty, bool) {
pub const fn overflowing_abs(self) -> (Self, bool) {
let (nz, flag) = self.get().overflowing_abs();
(
// SAFETY: absolute value of nonzero cannot yield zero values.
unsafe { $Ty::new_unchecked(nz) },
unsafe { Self::new_unchecked(nz) },
flag,
)
}
@ -1105,9 +1203,9 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn saturating_abs(self) -> $Ty {
pub const fn saturating_abs(self) -> Self {
// SAFETY: absolute value of nonzero cannot yield zero values.
unsafe { $Ty::new_unchecked(self.get().saturating_abs()) }
unsafe { Self::new_unchecked(self.get().saturating_abs()) }
}
/// Wrapping absolute value, see
@ -1138,9 +1236,9 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn wrapping_abs(self) -> $Ty {
pub const fn wrapping_abs(self) -> Self {
// SAFETY: absolute value of nonzero cannot yield zero values.
unsafe { $Ty::new_unchecked(self.get().wrapping_abs()) }
unsafe { Self::new_unchecked(self.get().wrapping_abs()) }
}
/// Computes the absolute value of self
@ -1250,10 +1348,10 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[inline]
#[stable(feature = "nonzero_negation_ops", since = "1.71.0")]
#[rustc_const_stable(feature = "nonzero_negation_ops", since = "1.71.0")]
pub const fn checked_neg(self) -> Option<$Ty> {
pub const fn checked_neg(self) -> Option<Self> {
if let Some(result) = self.get().checked_neg() {
// SAFETY: negation of nonzero cannot yield zero values.
return Some(unsafe { $Ty::new_unchecked(result) });
return Some(unsafe { Self::new_unchecked(result) });
}
None
}
@ -1282,10 +1380,10 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[inline]
#[stable(feature = "nonzero_negation_ops", since = "1.71.0")]
#[rustc_const_stable(feature = "nonzero_negation_ops", since = "1.71.0")]
pub const fn overflowing_neg(self) -> ($Ty, bool) {
pub const fn overflowing_neg(self) -> (Self, bool) {
let (result, overflow) = self.get().overflowing_neg();
// SAFETY: negation of nonzero cannot yield zero values.
((unsafe { $Ty::new_unchecked(result) }), overflow)
((unsafe { Self::new_unchecked(result) }), overflow)
}
/// Saturating negation. Computes `-self`,
@ -1317,11 +1415,11 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[inline]
#[stable(feature = "nonzero_negation_ops", since = "1.71.0")]
#[rustc_const_stable(feature = "nonzero_negation_ops", since = "1.71.0")]
pub const fn saturating_neg(self) -> $Ty {
pub const fn saturating_neg(self) -> Self {
if let Some(result) = self.checked_neg() {
return result;
}
$Ty::MAX
Self::MAX
}
/// Wrapping (modular) negation. Computes `-self`, wrapping around at the boundary
@ -1349,10 +1447,10 @@ macro_rules! nonzero_integer_signedness_dependent_methods {
#[inline]
#[stable(feature = "nonzero_negation_ops", since = "1.71.0")]
#[rustc_const_stable(feature = "nonzero_negation_ops", since = "1.71.0")]
pub const fn wrapping_neg(self) -> $Ty {
pub const fn wrapping_neg(self) -> Self {
let result = self.get().wrapping_neg();
// SAFETY: negation of nonzero cannot yield zero values.
unsafe { $Ty::new_unchecked(result) }
unsafe { Self::new_unchecked(result) }
}
};
}

View file

@ -2452,8 +2452,8 @@ macro_rules! uint_impl {
// SAFETY: the result is positive and fits in an integer with half as many bits.
// Inform the optimizer about it.
unsafe {
intrinsics::assume(0 < res);
intrinsics::assume(res < 1 << (Self::BITS / 2));
hint::assert_unchecked(0 < res);
hint::assert_unchecked(res < 1 << (Self::BITS / 2));
}
res

View file

@ -234,7 +234,7 @@ unsafe impl<T> SliceIndex<[T]> for usize {
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe {
crate::intrinsics::assume(self < slice.len());
crate::hint::assert_unchecked(self < slice.len());
slice.as_ptr().add(self)
}
}

View file

@ -5,7 +5,7 @@ mod macros;
use crate::cmp;
use crate::fmt;
use crate::intrinsics::assume;
use crate::hint::assert_unchecked;
use crate::iter::{
FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, UncheckedIterator,
};

View file

@ -338,7 +338,7 @@ macro_rules! iterator {
if predicate(x) {
// SAFETY: we are guaranteed to be in bounds by the loop invariant:
// when `i >= n`, `self.next()` returns `None` and the loop breaks.
unsafe { assume(i < n) };
unsafe { assert_unchecked(i < n) };
return Some(i);
}
i += 1;
@ -361,7 +361,7 @@ macro_rules! iterator {
if predicate(x) {
// SAFETY: `i` must be lower than `n` since it starts at `n`
// and is only decreasing.
unsafe { assume(i < n) };
unsafe { assert_unchecked(i < n) };
return Some(i);
}
}

View file

@ -8,6 +8,7 @@
use crate::cmp::Ordering::{self, Equal, Greater, Less};
use crate::fmt;
use crate::hint;
use crate::intrinsics::exact_div;
use crate::mem::{self, SizedTypeProperties};
use crate::num::NonZeroUsize;
@ -2799,7 +2800,7 @@ impl<T> [T] {
right = if cmp == Greater { mid } else { right };
if cmp == Equal {
// SAFETY: same as the `get_unchecked` above
unsafe { crate::intrinsics::assume(mid < self.len()) };
unsafe { hint::assert_unchecked(mid < self.len()) };
return Ok(mid);
}
@ -2808,7 +2809,7 @@ impl<T> [T] {
// SAFETY: directly true from the overall invariant.
// Note that this is `<=`, unlike the assume in the `Ok` path.
unsafe { crate::intrinsics::assume(left <= self.len()) };
unsafe { hint::assert_unchecked(left <= self.len()) };
Err(left)
}

View file

@ -19,6 +19,9 @@
#[cfg(target_os = "android")]
mod android;
#[cfg(target_os = "zkvm")]
mod zkvm;
use core::any::Any;
use core::panic::PanicPayload;
@ -34,6 +37,8 @@ pub unsafe fn __rust_start_panic(_payload: &mut dyn PanicPayload) -> u32 {
// Android has the ability to attach a message as part of the abort.
#[cfg(target_os = "android")]
android::android_set_abort_message(_payload);
#[cfg(target_os = "zkvm")]
zkvm::zkvm_set_abort_message(_payload);
abort();

View file

@ -0,0 +1,24 @@
use alloc::string::String;
use core::panic::PanicPayload;
// Forward the abort message to zkVM's sys_panic. This is implemented by RISC Zero's
// platform crate which exposes system calls specifically for the zkVM.
pub(crate) unsafe fn zkvm_set_abort_message(payload: &mut dyn PanicPayload) {
let payload = payload.get();
let msg = match payload.downcast_ref::<&'static str>() {
Some(msg) => msg.as_bytes(),
None => match payload.downcast_ref::<String>() {
Some(msg) => msg.as_bytes(),
None => &[],
},
};
if msg.is_empty() {
return;
}
extern "C" {
fn sys_panic(msg_ptr: *const u8, len: usize) -> !;
}
sys_panic(msg.as_ptr(), msg.len());
}

View file

@ -35,6 +35,7 @@ fn main() {
|| target.contains("hurd")
|| target.contains("uefi")
|| target.contains("teeos")
|| target.contains("zkvm")
// See src/bootstrap/synthetic_targets.rs
|| env::var("RUSTC_BOOTSTRAP_SYNTHETIC_TARGET").is_ok()
{

View file

@ -56,7 +56,7 @@
#![deny(unsafe_op_in_unsafe_fn)]
#![stable(feature = "alloc_module", since = "1.28.0")]
use core::intrinsics;
use core::hint;
use core::ptr::NonNull;
use core::sync::atomic::{AtomicPtr, Ordering};
use core::{mem, ptr};
@ -172,7 +172,7 @@ impl System {
let new_size = new_layout.size();
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
intrinsics::assume(new_size >= old_layout.size());
hint::assert_unchecked(new_size >= old_layout.size());
let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
@ -264,7 +264,7 @@ unsafe impl Allocator for System {
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
new_size if old_layout.align() == new_layout.align() => unsafe {
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
intrinsics::assume(new_size <= old_layout.size());
hint::assert_unchecked(new_size <= old_layout.size());
let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;

View file

@ -325,6 +325,7 @@
#![feature(float_next_up_down)]
#![feature(hasher_prefixfree_extras)]
#![feature(hashmap_internals)]
#![feature(hint_assert_unchecked)]
#![feature(ip)]
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_uninit_array)]

View file

@ -149,8 +149,7 @@ pub trait CommandExt: Sealed {
/// The pidfd can be retrieved from the child with [`pidfd`] or [`take_pidfd`].
///
/// A pidfd will only be created if it is possible to do so
/// in a guaranteed race-free manner (e.g. if the `clone3` system call
/// is supported). Otherwise, [`pidfd`] will return an error.
/// in a guaranteed race-free manner. Otherwise, [`pidfd`] will return an error.
///
/// If a pidfd has been successfully created and not been taken from the `Child`
/// then calls to `kill()`, `wait()` and `try_wait()` will use the pidfd

View file

@ -16,7 +16,7 @@ use crate::ptr;
target_arch = "sparc",
target_arch = "wasm32",
target_arch = "hexagon",
all(target_arch = "riscv32", not(target_os = "espidf")),
all(target_arch = "riscv32", not(any(target_os = "espidf", target_os = "zkvm"))),
all(target_arch = "xtensa", not(target_os = "espidf")),
))]
pub const MIN_ALIGN: usize = 8;
@ -32,11 +32,11 @@ pub const MIN_ALIGN: usize = 8;
target_arch = "wasm64",
))]
pub const MIN_ALIGN: usize = 16;
// The allocator on the esp-idf platform guarantees 4 byte alignment.
#[cfg(any(
all(target_arch = "riscv32", target_os = "espidf"),
// The allocator on the esp-idf and zkvm platforms guarantee 4 byte alignment.
#[cfg(all(any(
all(target_arch = "riscv32", any(target_os = "espidf", target_os = "zkvm")),
all(target_arch = "xtensa", target_os = "espidf"),
))]
)))]
pub const MIN_ALIGN: usize = 4;
pub unsafe fn realloc_fallback(

View file

@ -55,6 +55,9 @@ cfg_if::cfg_if! {
} else if #[cfg(target_os = "teeos")] {
mod teeos;
pub use self::teeos::*;
} else if #[cfg(target_os = "zkvm")] {
mod zkvm;
pub use self::zkvm::*;
} else {
mod unsupported;
pub use self::unsupported::*;

View file

@ -147,8 +147,7 @@ impl Command {
#[cfg(not(target_os = "linux"))]
let pidfd = -1;
// Safety: We obtained the pidfd from calling `clone3` with
// `CLONE_PIDFD` so it's valid an otherwise unowned.
// Safety: We obtained the pidfd (on Linux) using SOCK_SEQPACKET, so it's valid.
let mut p = unsafe { Process::new(pid, pidfd) };
let mut bytes = [0; 8];

View file

@ -62,13 +62,14 @@ fn test_command_fork_no_unwind() {
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(target_os = "linux")] // pidfds are a linux-specific concept
fn test_command_pidfd() {
use crate::assert_matches::assert_matches;
use crate::os::fd::{AsRawFd, RawFd};
use crate::os::linux::process::{ChildExt, CommandExt};
use crate::process::Command;
// pidfds require the pidfd_open syscall
let our_pid = crate::process::id();
let pidfd = unsafe { libc::syscall(libc::SYS_pidfd_open, our_pid, 0) };
let pidfd_open_available = if pidfd >= 0 {
@ -81,7 +82,9 @@ fn test_command_pidfd() {
// always exercise creation attempts
let mut child = Command::new("false").create_pidfd(true).spawn().unwrap();
// but only check if we know that the kernel supports pidfds
// but only check if we know that the kernel supports pidfds.
// We don't assert the precise value, since the standard library
// might have opened other file descriptors before our code runs.
if pidfd_open_available {
assert!(child.pidfd().is_ok());
}
@ -97,4 +100,17 @@ fn test_command_pidfd() {
child.kill().expect("failed to kill child");
let status = child.wait().expect("error waiting on pidfd");
assert_eq!(status.signal(), Some(libc::SIGKILL));
let _ = Command::new("echo")
.create_pidfd(false)
.spawn()
.unwrap()
.pidfd()
.expect_err("pidfd should not have been created when create_pid(false) is set");
let _ = Command::new("echo")
.spawn()
.unwrap()
.pidfd()
.expect_err("pidfd should not have been created");
}

View file

@ -106,7 +106,18 @@ mod imp {
// supported on the current kernel.
//
// Also fall back in case it is disabled by something like
// seccomp or inside of virtual machines.
// seccomp or inside of docker.
//
// If the `getrandom` syscall is not implemented in the current kernel version it should return an
// `ENOSYS` error. Docker also blocks the whole syscall inside unprivileged containers, and
// returns `EPERM` (instead of `ENOSYS`) when a program tries to invoke the syscall. Because of
// that we need to check for *both* `ENOSYS` and `EPERM`.
//
// Note that Docker's behavior is breaking other projects (notably glibc), so they're planning
// to update their filtering to return `ENOSYS` in a future release:
//
// https://github.com/moby/moby/issues/42680
//
GETRANDOM_UNAVAILABLE.store(true, Ordering::Relaxed);
return false;
} else if err == libc::EAGAIN {

View file

@ -17,7 +17,10 @@
target_os = "android",
target_os = "fuchsia",
target_os = "redox",
target_os = "hurd"
target_os = "hurd",
target_os = "freebsd",
target_os = "netbsd",
target_os = "dragonfly"
))]
// FIXME: The Rust compiler currently omits weakly function definitions (i.e.,
// __cxa_thread_atexit_impl) and its metadata from LLVM IR.

View file

@ -0,0 +1,55 @@
//! ABI definitions for symbols exported by risc0-zkvm-platform.
// Included here so we don't have to depend on risc0-zkvm-platform.
//
// FIXME: Should we move this to the "libc" crate? It seems like other
// architectures put a lot of this kind of stuff there. But there's
// currently no risc0 fork of the libc crate, so we'd either have to
// fork it or upstream it.
#![allow(dead_code)]
pub const DIGEST_WORDS: usize = 8;
/// Standard IO file descriptors for use with sys_read and sys_write.
pub mod fileno {
pub const STDIN: u32 = 0;
pub const STDOUT: u32 = 1;
pub const STDERR: u32 = 2;
pub const JOURNAL: u32 = 3;
}
extern "C" {
// Wrappers around syscalls provided by risc0-zkvm-platform:
pub fn sys_halt();
pub fn sys_output(output_id: u32, output_value: u32);
pub fn sys_sha_compress(
out_state: *mut [u32; DIGEST_WORDS],
in_state: *const [u32; DIGEST_WORDS],
block1_ptr: *const [u32; DIGEST_WORDS],
block2_ptr: *const [u32; DIGEST_WORDS],
);
pub fn sys_sha_buffer(
out_state: *mut [u32; DIGEST_WORDS],
in_state: *const [u32; DIGEST_WORDS],
buf: *const u8,
count: u32,
);
pub fn sys_rand(recv_buf: *mut u32, words: usize);
pub fn sys_panic(msg_ptr: *const u8, len: usize) -> !;
pub fn sys_log(msg_ptr: *const u8, len: usize);
pub fn sys_cycle_count() -> usize;
pub fn sys_read(fd: u32, recv_buf: *mut u8, nrequested: usize) -> usize;
pub fn sys_write(fd: u32, write_buf: *const u8, nbytes: usize);
pub fn sys_getenv(
recv_buf: *mut u32,
words: usize,
varname: *const u8,
varname_len: usize,
) -> usize;
pub fn sys_argc() -> usize;
pub fn sys_argv(out_words: *mut u32, out_nwords: usize, arg_index: usize) -> usize;
// Allocate memory from global HEAP.
pub fn sys_alloc_words(nwords: usize) -> *mut u32;
pub fn sys_alloc_aligned(nwords: usize, align: usize) -> *mut u8;
}

View file

@ -0,0 +1,15 @@
use super::abi;
use crate::alloc::{GlobalAlloc, Layout, System};
#[stable(feature = "alloc_system_type", since = "1.28.0")]
unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
abi::sys_alloc_aligned(layout.size(), layout.align())
}
#[inline]
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
// this allocator never deallocates memory
}
}

View file

@ -0,0 +1,80 @@
use super::{abi, WORD_SIZE};
use crate::ffi::OsString;
use crate::fmt;
use crate::sys_common::FromInner;
pub struct Args {
i_forward: usize,
i_back: usize,
count: usize,
}
pub fn args() -> Args {
let count = unsafe { abi::sys_argc() };
Args { i_forward: 0, i_back: 0, count }
}
impl Args {
/// Use sys_argv to get the arg at the requested index. Does not check that i is less than argc
/// and will not return if the index is out of bounds.
fn argv(i: usize) -> OsString {
let arg_len = unsafe { abi::sys_argv(crate::ptr::null_mut(), 0, i) };
let arg_len_words = (arg_len + WORD_SIZE - 1) / WORD_SIZE;
let words = unsafe { abi::sys_alloc_words(arg_len_words) };
let arg_len2 = unsafe { abi::sys_argv(words, arg_len_words, i) };
debug_assert_eq!(arg_len, arg_len2);
// Convert to OsString.
//
// FIXME: We can probably get rid of the extra copy here if we
// reimplement "os_str" instead of just using the generic unix
// "os_str".
let arg_bytes: &[u8] =
unsafe { crate::slice::from_raw_parts(words.cast() as *const u8, arg_len) };
OsString::from_inner(super::os_str::Buf { inner: arg_bytes.to_vec() })
}
}
impl fmt::Debug for Args {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().finish()
}
}
impl Iterator for Args {
type Item = OsString;
fn next(&mut self) -> Option<OsString> {
if self.i_forward >= self.count - self.i_back {
None
} else {
let arg = Self::argv(self.i_forward);
self.i_forward += 1;
Some(arg)
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.count, Some(self.count))
}
}
impl ExactSizeIterator for Args {
fn len(&self) -> usize {
self.count
}
}
impl DoubleEndedIterator for Args {
fn next_back(&mut self) -> Option<OsString> {
if self.i_back >= self.count - self.i_forward {
None
} else {
let arg = Self::argv(self.count - 1 - self.i_back);
self.i_back += 1;
Some(arg)
}
}
}

View file

@ -0,0 +1,9 @@
pub mod os {
pub const FAMILY: &str = "";
pub const OS: &str = "";
pub const DLL_PREFIX: &str = "";
pub const DLL_SUFFIX: &str = ".elf";
pub const DLL_EXTENSION: &str = "elf";
pub const EXE_SUFFIX: &str = ".elf";
pub const EXE_EXTENSION: &str = "elf";
}

View file

@ -0,0 +1,93 @@
//! System bindings for the risc0 zkvm platform
//!
//! This module contains the facade (aka platform-specific) implementations of
//! OS level functionality for zkvm.
//!
//! This is all super highly experimental and not actually intended for
//! wide/production use yet, it's still all in the experimental category. This
//! will likely change over time.
const WORD_SIZE: usize = core::mem::size_of::<u32>();
pub mod alloc;
#[path = "../zkvm/args.rs"]
pub mod args;
#[path = "../unix/cmath.rs"]
pub mod cmath;
pub mod env;
#[path = "../unsupported/fs.rs"]
pub mod fs;
#[path = "../unsupported/io.rs"]
pub mod io;
#[path = "../unsupported/net.rs"]
pub mod net;
#[path = "../unsupported/once.rs"]
pub mod once;
pub mod os;
#[path = "../unix/os_str.rs"]
pub mod os_str;
#[path = "../unix/path.rs"]
pub mod path;
#[path = "../unsupported/pipe.rs"]
pub mod pipe;
#[path = "../unsupported/process.rs"]
pub mod process;
pub mod stdio;
pub mod thread_local_key;
#[path = "../unsupported/time.rs"]
pub mod time;
#[path = "../unsupported/locks/mod.rs"]
pub mod locks;
#[path = "../unsupported/thread.rs"]
pub mod thread;
#[path = "../unsupported/thread_parking.rs"]
pub mod thread_parking;
mod abi;
use crate::io as std_io;
pub mod memchr {
pub use core::slice::memchr::{memchr, memrchr};
}
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) {}
// SAFETY: must be called only once during runtime cleanup.
// NOTE: this is not guaranteed to run, for example when the program aborts.
pub unsafe fn cleanup() {}
pub fn unsupported<T>() -> std_io::Result<T> {
Err(unsupported_err())
}
pub fn unsupported_err() -> std_io::Error {
std_io::const_io_error!(
std_io::ErrorKind::Unsupported,
"operation not supported on this platform",
)
}
pub fn is_interrupted(_code: i32) -> bool {
false
}
pub fn decode_error_kind(_code: i32) -> crate::io::ErrorKind {
crate::io::ErrorKind::Uncategorized
}
pub fn abort_internal() -> ! {
core::intrinsics::abort();
}
pub fn hashmap_random_keys() -> (u64, u64) {
let mut buf = [0u32; 4];
unsafe {
abi::sys_rand(buf.as_mut_ptr(), 4);
};
((buf[0] as u64) << 32 + buf[1] as u64, (buf[2] as u64) << 32 + buf[3] as u64)
}

View file

@ -0,0 +1,139 @@
use super::{abi, unsupported, WORD_SIZE};
use crate::error::Error as StdError;
use crate::ffi::{OsStr, OsString};
use crate::fmt;
use crate::io;
use crate::marker::PhantomData;
use crate::path::{self, PathBuf};
use crate::sys_common::FromInner;
pub fn errno() -> i32 {
0
}
pub fn error_string(_errno: i32) -> String {
"operation successful".to_string()
}
pub fn getcwd() -> io::Result<PathBuf> {
unsupported()
}
pub fn chdir(_: &path::Path) -> io::Result<()> {
unsupported()
}
pub struct SplitPaths<'a>(!, PhantomData<&'a ()>);
pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> {
panic!("unsupported")
}
impl<'a> Iterator for SplitPaths<'a> {
type Item = PathBuf;
fn next(&mut self) -> Option<PathBuf> {
self.0
}
}
#[derive(Debug)]
pub struct JoinPathsError;
pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError>
where
I: Iterator<Item = T>,
T: AsRef<OsStr>,
{
Err(JoinPathsError)
}
impl fmt::Display for JoinPathsError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"not supported on this platform yet".fmt(f)
}
}
impl StdError for JoinPathsError {
#[allow(deprecated)]
fn description(&self) -> &str {
"not supported on this platform yet"
}
}
pub fn current_exe() -> io::Result<PathBuf> {
unsupported()
}
pub struct Env(!);
impl Iterator for Env {
type Item = (OsString, OsString);
fn next(&mut self) -> Option<(OsString, OsString)> {
self.0
}
}
pub fn env() -> Env {
panic!("not supported on this platform")
}
impl Env {
pub fn str_debug(&self) -> impl fmt::Debug + '_ {
let Self(inner) = self;
match *inner {}
}
}
impl fmt::Debug for Env {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self(inner) = self;
match *inner {}
}
}
pub fn getenv(varname: &OsStr) -> Option<OsString> {
let varname = varname.as_encoded_bytes();
let nbytes =
unsafe { abi::sys_getenv(crate::ptr::null_mut(), 0, varname.as_ptr(), varname.len()) };
if nbytes == usize::MAX {
return None;
}
let nwords = (nbytes + WORD_SIZE - 1) / WORD_SIZE;
let words = unsafe { abi::sys_alloc_words(nwords) };
let nbytes2 = unsafe { abi::sys_getenv(words, nwords, varname.as_ptr(), varname.len()) };
debug_assert_eq!(nbytes, nbytes2);
// Convert to OsString.
//
// FIXME: We can probably get rid of the extra copy here if we
// reimplement "os_str" instead of just using the generic unix
// "os_str".
let u8s: &[u8] = unsafe { crate::slice::from_raw_parts(words.cast() as *const u8, nbytes) };
Some(OsString::from_inner(super::os_str::Buf { inner: u8s.to_vec() }))
}
pub fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> {
Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot set env vars on this platform"))
}
pub fn unsetenv(_: &OsStr) -> io::Result<()> {
Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot unset env vars on this platform"))
}
pub fn temp_dir() -> PathBuf {
panic!("no filesystem on this platform")
}
pub fn home_dir() -> Option<PathBuf> {
None
}
pub fn exit(_code: i32) -> ! {
crate::intrinsics::abort()
}
pub fn getpid() -> u32 {
panic!("no pids on this platform")
}

View file

@ -0,0 +1,64 @@
use super::{abi, abi::fileno};
use crate::io;
pub struct Stdin;
pub struct Stdout;
pub struct Stderr;
impl Stdin {
pub const fn new() -> Stdin {
Stdin
}
}
impl io::Read for Stdin {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
Ok(unsafe { abi::sys_read(fileno::STDIN, buf.as_mut_ptr(), buf.len()) })
}
}
impl Stdout {
pub const fn new() -> Stdout {
Stdout
}
}
impl io::Write for Stdout {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
unsafe { abi::sys_write(fileno::STDOUT, buf.as_ptr(), buf.len()) }
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl Stderr {
pub const fn new() -> Stderr {
Stderr
}
}
impl io::Write for Stderr {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
unsafe { abi::sys_write(fileno::STDERR, buf.as_ptr(), buf.len()) }
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE;
pub fn is_ebadf(_err: &io::Error) -> bool {
true
}
pub fn panic_output() -> Option<impl io::Write> {
Some(Stderr::new())
}

View file

@ -0,0 +1,23 @@
use crate::alloc::{alloc, Layout};
pub type Key = usize;
#[inline]
pub unsafe fn create(_dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key {
alloc(Layout::new::<*mut u8>()) as _
}
#[inline]
pub unsafe fn set(key: Key, value: *mut u8) {
let key: *mut *mut u8 = core::ptr::from_exposed_addr_mut(key);
*key = value;
}
#[inline]
pub unsafe fn get(key: Key) -> *mut u8 {
let key: *mut *mut u8 = core::ptr::from_exposed_addr_mut(key);
*key
}
#[inline]
pub unsafe fn destroy(_key: Key) {}

View file

@ -323,7 +323,8 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
// Prevent the usage of `Instant` in some cases:
// - It's currently not supported for wasm targets.
// - We disable it for miri because it's not available when isolation is enabled.
let is_instant_supported = !cfg!(target_family = "wasm") && !cfg!(miri);
let is_instant_supported =
!cfg!(target_family = "wasm") && !cfg!(target_os = "zkvm") && !cfg!(miri);
let start_time = is_instant_supported.then(Instant::now);
run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?;

View file

@ -540,7 +540,7 @@ pub fn run_test(
// Emscripten can catch panics but other wasm targets cannot
let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
&& cfg!(target_family = "wasm")
&& (cfg!(target_family = "wasm") || cfg!(target_os = "zkvm"))
&& !cfg!(target_os = "emscripten");
if force_ignore || desc.ignore || ignore_because_no_process_support {

View file

@ -90,6 +90,10 @@ const EXTRA_CHECK_CFGS: &[(Option<Mode>, &str, Option<&[&'static str]>)] = &[
/* Extra values not defined in the built-in targets yet, but used in std */
(Some(Mode::Std), "target_env", Some(&["libnx"])),
// (Some(Mode::Std), "target_os", Some(&[])),
// #[cfg(bootstrap)] zkvm
(Some(Mode::Std), "target_os", Some(&["zkvm"])),
// #[cfg(bootstrap)] risc0
(Some(Mode::Std), "target_vendor", Some(&["risc0"])),
(Some(Mode::Std), "target_arch", Some(&["spirv", "nvptx", "xtensa"])),
/* Extra names used by dependencies */
// FIXME: Used by serde_json, but we should not be triggering on external dependencies.
@ -721,6 +725,11 @@ impl Build {
if self.config.profiler_enabled(target) {
features.push_str(" profiler");
}
// Generate memcpy, etc. FIXME: Remove this once compiler-builtins
// automatically detects this target.
if target.contains("zkvm") {
features.push_str(" compiler-builtins-mem");
}
features
}

View file

@ -47,6 +47,7 @@
- [mipsisa\*r6\*-unknown-linux-gnu\*](platform-support/mips-release-6.md)
- [nvptx64-nvidia-cuda](platform-support/nvptx64-nvidia-cuda.md)
- [powerpc64-ibm-aix](platform-support/aix.md)
- [riscv32im-risc0-zkvm-elf](platform-support/riscv32im-risc0-zkvm-elf.md)
- [riscv32imac-unknown-xous-elf](platform-support/riscv32imac-unknown-xous-elf.md)
- [riscv32*-unknown-none-elf](platform-support/riscv32imac-unknown-none-elf.md)
- [sparc-unknown-none-elf](./platform-support/sparc-unknown-none-elf.md)

View file

@ -338,6 +338,7 @@ target | std | host | notes
[`powerpc64-ibm-aix`](platform-support/aix.md) | ? | | 64-bit AIX (7.2 and newer)
`riscv32gc-unknown-linux-gnu` | | | RISC-V Linux (kernel 5.4, glibc 2.33)
`riscv32gc-unknown-linux-musl` | | | RISC-V Linux (kernel 5.4, musl + RISCV32 support patches)
[`riscv32im-risc0-zkvm-elf`](platform-support/riscv32im-risc0-zkvm-elf.md) | ? | | RISC Zero's zero-knowledge Virtual Machine (RV32IM ISA)
[`riscv32imac-unknown-xous-elf`](platform-support/riscv32imac-unknown-xous-elf.md) | ? | | RISC-V Xous (RV32IMAC ISA)
[`riscv32imc-esp-espidf`](platform-support/esp-idf.md) | ✓ | | RISC-V ESP-IDF
[`riscv32imac-esp-espidf`](platform-support/esp-idf.md) | ✓ | | RISC-V ESP-IDF

View file

@ -0,0 +1,86 @@
# `riscv32im-risc0-zkvm-elf`
**Tier: 3**
RISC Zero's Zero Knowledge Virtual Machine (zkVM) implementing the RV32IM instruction set.
## Target maintainers
- Frank Laub, `frank@risczero.com`, https://github.com/flaub
- Jeremy Bruestle, `jeremy@risczero.com`, https://github.com/jbruestle
- Erik Kaneda, `erik@risczero.com`, https://github.com/SchmErik
## Background
This target is an execution environment to produce a proof of execution of
a RISC-V ELF binary and any output that the developer of the binary wishes to
display publicly. In order to do this, the target will execute the ELF to
generate a receipt containing the output of the computation along with a
cryptographic seal. This receipt can be verified to ensure the integrity of the
computation and its result. This target is implemented as software only; it has
no hardware implementation.
We have a cargo extension called [cargo-risczero] that allow users to generate
project templates, install tools for improved user experience, build the binary
using a docker environment and test programs.
## Requirements
The target only supports cross compilation and no host tools. The target
supports `alloc` with a default allocator and has experimental support for
`std`. The target expects the binaries to be in ELF.
The target's execution environment is single threaded, non-preemptive, and does
not support any privileged instructions, nor unaligned accesses. At the time of
writing the VM has 192 MB of memory and text/data, heap, and stack need to be
with in the address range `0x400` - `0x0C000000`. The binaries themselves expect
no operating system and can be thought of as running on bare-metal. The target
does not use `#[target_feature(...)]` or `-C target-feature=` values.
Calling `extern "C"` on the target uses the C calling convention outlined in the
[RISC-V specification].
## Building for the zkVM
Programs for the zkVM could be built by adding it to the `target` list in
`config.toml`. However, we recommend building programs in our starter template
generated by the [cargo-risczero] utility and the [risc0-build] crate. This
crate calls `rustc` with `-C "link-arg=-Ttext=` so that it maps the text in the
appropriate location as well as generating variables that represent the ELF and
a unique ID associated with the ELF. The starter template provides developers
with system calls that are useful to zero knowledge computing such as writing to
the public output, hashing using sha256, and multiply big integers.
## Building Rust programs
Rust does not yet ship pre-compiled artifacts for this target. To compile for
this target, you will either need to build Rust with the target enabled (see
"Building the target" above). We do not recommend using `build-std` as we have
run into issues building core in the past on our starter template. An alternate
solution is to download the risc0 tool chain by running `cargo risczero install`.
## Testing
Note: the target is implemented as a software emulator called the zkVM and there
is no hardware implementation of the target.
The most practical way to test the target program is to use our starter template
that can be generated by using the `cargo risczero new` command. The template
generates a sample "host" and "guest" code. The guest code compiled to the
target (which is RV32IM) whereas the "host" code is compiled to run on the
programmer's machine running either a Linux distribution or macOS. The host
program is responsible for running the guest binary on the zkVM and retrieving
its public output.
The target currently does not support running the Rust test suite.
## Cross-compilation toolchains and C code
Compatible C code can be built for this target on any compiler that has a RV32IM
target. On clang and ld.lld linker, it can be generated using the
`-march=rv32im`, `-mabi=ilp32` with llvm features flag `features=+m` and llvm
target `riscv32-unknown-none`.
[RISC-V specification]: https://riscv.org/wp-content/uploads/2015/01/riscv-calling.pdf
[cargo-risczero]: https://docs.rs/cargo-risczero/latest/cargo_risczero/
[risc0-build]: https://crates.io/crates/risc0-build

View file

@ -108,15 +108,13 @@ impl Res {
Res::Primitive(_) => return Suggestion::Prefix("prim"),
Res::Def(kind, _) => kind,
};
if kind == DefKind::Macro(MacroKind::Bang) {
return Suggestion::Macro;
} else if kind == DefKind::Fn || kind == DefKind::AssocFn {
return Suggestion::Function;
} else if kind == DefKind::Field {
return Suggestion::RemoveDisambiguator;
}
let prefix = match kind {
DefKind::Fn | DefKind::AssocFn => return Suggestion::Function,
DefKind::Field => return Suggestion::RemoveDisambiguator,
DefKind::Macro(MacroKind::Bang) => return Suggestion::Macro,
DefKind::Macro(MacroKind::Derive) => "derive",
DefKind::Struct => "struct",
DefKind::Enum => "enum",
DefKind::Trait => "trait",
@ -126,7 +124,6 @@ impl Res {
"const"
}
DefKind::Static(_) => "static",
DefKind::Macro(MacroKind::Derive) => "derive",
// Now handle things that don't have a specific disambiguator
_ => match kind
.ns()
@ -283,20 +280,15 @@ impl<'a, 'tcx> LinkCollector<'a, 'tcx> {
debug!("looking for enum variant {path_str}");
let mut split = path_str.rsplitn(3, "::");
let variant_field_name = split
.next()
.map(|f| Symbol::intern(f))
.expect("fold_item should ensure link is non-empty");
let variant_name =
// we're not sure this is a variant at all, so use the full string
let variant_field_name = Symbol::intern(split.next().unwrap());
// We're not sure this is a variant at all, so use the full string.
// If there's no second component, the link looks like `[path]`.
// So there's no partial res and we should say the whole link failed to resolve.
split.next().map(|f| Symbol::intern(f)).ok_or_else(no_res)?;
let path = split
.next()
let variant_name = Symbol::intern(split.next().ok_or_else(no_res)?);
// If there's no third component, we saw `[a::b]` before and it failed to resolve.
// So there's no partial res.
.ok_or_else(no_res)?;
let path = split.next().ok_or_else(no_res)?;
let ty_res = self.resolve_path(&path, TypeNS, item_id, module_id).ok_or_else(no_res)?;
match ty_res {
@ -447,41 +439,29 @@ impl<'a, 'tcx> LinkCollector<'a, 'tcx> {
}
// Try looking for methods and associated items.
let mut split = path_str.rsplitn(2, "::");
// NB: `split`'s first element is always defined, even if the delimiter was not present.
// NB: `item_str` could be empty when resolving in the root namespace (e.g. `::std`).
let item_str = split.next().unwrap();
let item_name = Symbol::intern(item_str);
let path_root = split
.next()
// NB: `path_root` could be empty when resolving in the root namespace (e.g. `::std`).
let (path_root, item_str) = path_str.rsplit_once("::").ok_or_else(|| {
// If there's no `::`, it's not an associated item.
// So we can be sure that `rustc_resolve` was accurate when it said it wasn't resolved.
.ok_or_else(|| {
debug!("found no `::`, assuming {item_name} was correctly not in scope");
UnresolvedPath {
item_id,
module_id,
partial_res: None,
unresolved: item_str.into(),
}
debug!("found no `::`, assuming {path_str} was correctly not in scope");
UnresolvedPath { item_id, module_id, partial_res: None, unresolved: path_str.into() }
})?;
let item_name = Symbol::intern(item_str);
// FIXME(#83862): this arbitrarily gives precedence to primitives over modules to support
// links to primitives when `#[rustc_doc_primitive]` is present. It should give an ambiguity
// error instead and special case *only* modules with `#[rustc_doc_primitive]`, not all
// primitives.
match resolve_primitive(&path_root, TypeNS)
.or_else(|| self.resolve_path(&path_root, TypeNS, item_id, module_id))
.and_then(|ty_res| {
let candidates = self
.resolve_associated_item(ty_res, item_name, ns, module_id)
match resolve_primitive(path_root, TypeNS)
.or_else(|| self.resolve_path(path_root, TypeNS, item_id, module_id))
.map(|ty_res| {
self.resolve_associated_item(ty_res, item_name, ns, module_id)
.into_iter()
.map(|(res, def_id)| (res, Some(def_id)))
.collect::<Vec<_>>();
if !candidates.is_empty() { Some(candidates) } else { None }
.collect::<Vec<_>>()
}) {
Some(r) => Ok(r),
None => {
Some(r) if !r.is_empty() => Ok(r),
_ => {
if ns == Namespace::ValueNS {
self.variant_field(path_str, item_id, module_id)
.map(|(res, def_id)| vec![(res, Some(def_id))])
@ -1263,7 +1243,7 @@ impl LinkCollector<'_, '_> {
self.report_rawptr_assoc_feature_gate(diag.dox, &diag.link_range, diag.item);
return None;
} else {
candidates = vec![candidates[0]];
candidates = vec![*candidate];
}
}
@ -1271,8 +1251,10 @@ impl LinkCollector<'_, '_> {
// and after removing duplicated kinds, only one remains, the `ambiguity_error` function
// won't emit an error. So at this point, we can just take the first candidate as it was
// the first retrieved and use it to generate the link.
if candidates.len() > 1 && !ambiguity_error(self.cx, &diag, &key.path_str, &candidates) {
candidates = vec![candidates[0]];
if let [candidate, _candidate2, ..] = *candidates
&& !ambiguity_error(self.cx, &diag, &key.path_str, &candidates)
{
candidates = vec![candidate];
}
if let &[(res, def_id)] = candidates.as_slice() {
@ -1322,12 +1304,11 @@ impl LinkCollector<'_, '_> {
let mut err = ResolutionFailure::NotResolved(err);
for other_ns in [TypeNS, ValueNS, MacroNS] {
if other_ns != expected_ns {
if let Ok(res) =
self.resolve(path_str, other_ns, item_id, module_id)
&& !res.is_empty()
if let Ok(&[res, ..]) =
self.resolve(path_str, other_ns, item_id, module_id).as_deref()
{
err = ResolutionFailure::WrongNamespace {
res: full_res(self.cx.tcx, res[0]),
res: full_res(self.cx.tcx, res),
expected_ns,
};
break;
@ -1748,7 +1729,6 @@ fn report_diagnostic(
lint.note(format!(
"the link appears in this line:\n\n{line}\n\
{indicator: <before$}{indicator:^<found$}",
line = line,
indicator = "",
before = md_range.start - last_new_line_offset,
found = md_range.len(),
@ -1807,18 +1787,13 @@ fn resolution_failure(
let item_id = *item_id;
let module_id = *module_id;
// FIXME(jynelson): this might conflict with my `Self` fix in #76467
// FIXME: maybe use itertools `collect_tuple` instead?
fn split(path: &str) -> Option<(&str, &str)> {
let mut splitter = path.rsplitn(2, "::");
splitter.next().and_then(|right| splitter.next().map(|left| (left, right)))
}
// Check if _any_ parent of the path gets resolved.
// If so, report it and say the first which failed; if not, say the first path segment didn't resolve.
let mut name = path_str;
'outer: loop {
let Some((start, end)) = split(name) else {
// FIXME(jynelson): this might conflict with my `Self` fix in #76467
let Some((start, end)) = name.rsplit_once("::") else {
// avoid bug that marked [Quux::Z] as missing Z, not Quux
if partial_res.is_none() {
*unresolved = name.into();
@ -1829,8 +1804,8 @@ fn resolution_failure(
for ns in [TypeNS, ValueNS, MacroNS] {
if let Ok(v_res) = collector.resolve(start, ns, item_id, module_id) {
debug!("found partial_res={v_res:?}");
if !v_res.is_empty() {
*partial_res = Some(full_res(tcx, v_res[0]));
if let Some(&res) = v_res.first() {
*partial_res = Some(full_res(tcx, res));
*unresolved = end.into();
break 'outer;
}

View file

@ -121,6 +121,7 @@ static TARGETS: &[&str] = &[
"powerpc64-unknown-linux-gnu",
"powerpc64le-unknown-linux-gnu",
"riscv32i-unknown-none-elf",
"riscv32im-risc0-zkvm-elf",
"riscv32im-unknown-none-elf",
"riscv32imc-unknown-none-elf",
"riscv32imac-unknown-none-elf",

Some files were not shown because too many files have changed in this diff Show more