2017-07-05 14:52:18 +02:00
|
|
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2017-08-21 11:31:35 +02:00
|
|
|
//! This query borrow-checks the MIR to (further) ensure it is not broken.
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2018-05-07 23:25:05 -04:00
|
|
|
use borrow_check::nll::region_infer::RegionInferenceContext;
|
2017-10-21 21:15:04 +02:00
|
|
|
use rustc::hir;
|
2017-11-17 04:47:02 -05:00
|
|
|
use rustc::hir::def_id::DefId;
|
2017-12-04 00:56:06 +02:00
|
|
|
use rustc::hir::map::definitions::DefPathData;
|
2017-11-17 04:47:02 -05:00
|
|
|
use rustc::infer::InferCtxt;
|
2018-02-28 01:09:08 -08:00
|
|
|
use rustc::lint::builtin::UNUSED_MUT;
|
2018-07-20 17:29:29 +02:00
|
|
|
use rustc::middle::borrowck::SignalledError;
|
2018-07-15 15:11:29 +01:00
|
|
|
use rustc::mir::{AggregateKind, BasicBlock, BorrowCheckResult, BorrowKind};
|
2018-06-22 00:10:52 -03:00
|
|
|
use rustc::mir::{ClearCrossCrate, Local, Location, Mir, Mutability, Operand, Place};
|
|
|
|
use rustc::mir::{Field, Projection, ProjectionElem, Rvalue, Statement, StatementKind};
|
2018-03-02 20:42:37 -08:00
|
|
|
use rustc::mir::{Terminator, TerminatorKind};
|
2018-06-22 00:10:52 -03:00
|
|
|
use rustc::ty::query::Providers;
|
Special-case `Box` in `rustc_mir::borrow_check`.
This should address issue 45696.
Since we know dropping a box will not access any `&mut` or `&`
references, it is safe to model its destructor as only touching the
contents *owned* by the box.
Note: At some point we may want to generalize this machinery to other
reference and collection types that are "pure" in the same sense as
box. If we add a `&move` reference type, it would probably also fall
into this branch of code. But for the short term, we will be
conservative and restrict this change to `Box<T>` alone.
The code works by recursively descending a deref of the `Box`. We
prevent `visit_terminator_drop` infinite-loop (which can arise in a
very obscure scenario) via a linked-list of seen types.
Note: A similar style stack-only linked-list definition can be found
in `rustc_mir::borrow_check::places_conflict`. It might be good at
some point in the future to unify the two types and put the resulting
definition into `librustc_data_structures/`.
----
One final note: Review feedback led to significant simplification of
logic here.
During review, eddyb RalfJung and I uncovered the heart of why I
needed a so-called "step 2" aka the Shallow Write to the Deref of the
box. It was because the `visit_terminator_drop`, in its base case,
will not emit any write at all (shallow or deep) to a place unless
that place has a need_drop.
So I was encoding a Shallow Write by hand for a `Box<T>`, as a
separate step from recursively descending through `*a_box` (which was
at the time known as "step 1"; it is now the *only* step, apart from
the change to the base case for `visit_terminator_drop` that this
commit now has encoded).
eddyb aruged that *something* should be emitting some sort of write in
the base case here (even a shallow one), of the dropped place, since
by analogy we also emit a write when you *move* a place. That led
to the revision here in this commit.
* (Its possible that this desired write should be attached in some
manner to StorageDead instead of Drop. But in this PR, I tried to
leave the StorageDead logic alone and focus my attention solely on
how Drop(x) is modelled in MIR-borrowck.)
2018-07-26 22:29:50 +02:00
|
|
|
use rustc::ty::{self, ParamEnv, TyCtxt, Ty};
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2018-07-20 17:29:29 +02:00
|
|
|
use rustc_errors::{Diagnostic, DiagnosticBuilder, Level};
|
2018-07-02 06:14:49 -04:00
|
|
|
use rustc_data_structures::graph::dominators::Dominators;
|
2017-11-14 19:47:31 +00:00
|
|
|
use rustc_data_structures::fx::FxHashSet;
|
2017-12-03 12:49:08 -05:00
|
|
|
use rustc_data_structures::indexed_set::IdxSetBuf;
|
2017-11-17 04:47:02 -05:00
|
|
|
use rustc_data_structures::indexed_vec::Idx;
|
2018-03-02 20:42:37 -08:00
|
|
|
use rustc_data_structures::small_vec::SmallVec;
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2017-12-07 04:30:39 -05:00
|
|
|
use std::rc::Rc;
|
|
|
|
|
2017-11-17 17:19:57 +02:00
|
|
|
use syntax_pos::Span;
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2018-06-22 00:10:52 -03:00
|
|
|
use dataflow::indexes::BorrowIndex;
|
2018-07-13 21:56:04 +01:00
|
|
|
use dataflow::move_paths::{HasMoveData, LookupResult, MoveData, MoveError, MovePathIndex};
|
2018-06-22 00:10:52 -03:00
|
|
|
use dataflow::Borrows;
|
|
|
|
use dataflow::DataflowResultsConsumer;
|
2017-12-03 12:49:08 -05:00
|
|
|
use dataflow::FlowAtLocation;
|
2017-11-17 04:47:02 -05:00
|
|
|
use dataflow::MoveDataParamEnv;
|
2018-06-22 00:10:52 -03:00
|
|
|
use dataflow::{do_dataflow, DebugFormatted};
|
2018-01-29 01:49:29 +02:00
|
|
|
use dataflow::{EverInitializedPlaces, MovingOutStatements};
|
2018-06-22 00:10:52 -03:00
|
|
|
use dataflow::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
|
2017-07-05 14:52:18 +02:00
|
|
|
use util::borrowck_errors::{BorrowckErrors, Origin};
|
|
|
|
|
2018-06-22 00:10:52 -03:00
|
|
|
use self::borrow_set::{BorrowData, BorrowSet};
|
2017-12-07 17:00:26 +02:00
|
|
|
use self::flows::Flows;
|
2018-05-01 10:03:36 -04:00
|
|
|
use self::location::LocationTable;
|
2017-12-06 20:27:38 +02:00
|
|
|
use self::prefixes::PrefixSet;
|
2017-07-05 14:52:18 +02:00
|
|
|
use self::MutateMode::{JustWrite, WriteAndRead};
|
2018-07-15 15:11:29 +01:00
|
|
|
use self::mutability_errors::AccessKind;
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2018-05-18 23:47:48 -07:00
|
|
|
use self::path_utils::*;
|
|
|
|
|
2018-04-06 15:18:01 -04:00
|
|
|
crate mod borrow_set;
|
2017-12-06 20:27:38 +02:00
|
|
|
mod error_reporting;
|
2017-12-07 17:00:26 +02:00
|
|
|
mod flows;
|
2018-05-01 10:03:36 -04:00
|
|
|
mod location;
|
2018-07-15 15:11:29 +01:00
|
|
|
mod move_errors;
|
|
|
|
mod mutability_errors;
|
2018-06-22 00:10:52 -03:00
|
|
|
mod path_utils;
|
2018-04-06 15:53:49 -04:00
|
|
|
crate mod place_ext;
|
2018-06-27 11:16:24 -04:00
|
|
|
mod places_conflict;
|
2017-12-06 20:27:38 +02:00
|
|
|
mod prefixes;
|
2018-06-20 18:02:44 +02:00
|
|
|
mod used_muts;
|
New `ActiveBorrows` dataflow for two-phase `&mut`; not yet borrowed-checked.
High-level picture: The old `Borrows` analysis is now called
`Reservations` (implemented as a newtype wrapper around `Borrows`);
this continues to compute whether a `Rvalue::Ref` can reach a
statement without an intervening `EndRegion`. In addition, we also
track what `Place` each such `Rvalue::Ref` was immediately assigned
to in a given borrow (yay for MIR-structural properties!).
The new `ActiveBorrows` analysis then tracks the initial use of any of
those assigned `Places` for a given borrow. I.e. a borrow becomes
"active" immediately after it starts being "used" in some way. (This
is conservative in the sense that we will treat a copy `x = y;` as a
use of `y`; in principle one might further delay activation in such
cases.)
The new `ActiveBorrows` analysis needs to take the `Reservations`
results as an initial input, because the reservation state influences
the gen/kill sets for `ActiveBorrows`. In particular, a use of `a`
activates a borrow `a = &b` if and only if there exists a path (in the
control flow graph) from the borrow to that use. So we need to know if
the borrow reaches a given use to know if it really gets a gen-bit or
not.
* Incorporating the output from one dataflow analysis into the input
of another required more changes to the infrastructure than I had
expected, and even after those changes, the resulting code is still
a bit subtle.
* In particular, Since we need to know the intrablock reservation
state, we need to dynamically update a bitvector for the
reservations as we are also trying to compute the gen/kills
bitvector for the active borrows.
* The way I ended up deciding to do this (after also toying with at
least two other designs) is to put both the reservation state and
the active borrow state into a single bitvector. That is why we now
have separate (but related) `BorrowIndex` and
`ReserveOrActivateIndex`: each borrow index maps to a pair of
neighboring reservation and activation indexes.
As noted above, these changes are solely adding the active borrows
dataflow analysis (and updating the existing code to cope with the
switch from `Borrows` to `Reservations`). The code to process the
bitvector in the borrow checker currently just skips over all of the
active borrow bits.
But atop this commit, one *can* observe the analysis results by
looking at the graphviz output, e.g. via
```rust
#[rustc_mir(borrowck_graphviz_preflow="pre_two_phase.dot",
borrowck_graphviz_postflow="post_two_phase.dot")]
```
Includes doc for `FindPlaceUses`, as well as `Reservations` and
`ActiveBorrows` structs, which are wrappers are the `Borrows` struct
that dictate which flow analysis should be performed.
2017-12-01 12:32:51 +01:00
|
|
|
|
2017-11-17 04:34:02 -05:00
|
|
|
pub(crate) mod nll;
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2017-08-21 10:24:12 +02:00
|
|
|
pub fn provide(providers: &mut Providers) {
|
|
|
|
*providers = Providers {
|
|
|
|
mir_borrowck,
|
|
|
|
..*providers
|
|
|
|
};
|
|
|
|
}
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2018-06-22 00:10:52 -03:00
|
|
|
fn mir_borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> BorrowCheckResult<'tcx> {
|
2017-10-30 05:50:39 -04:00
|
|
|
let input_mir = tcx.mir_validated(def_id);
|
2017-11-10 19:20:35 +02:00
|
|
|
debug!("run query mir_borrowck: {}", tcx.item_path_str(def_id));
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2018-06-26 17:00:39 +02:00
|
|
|
let mut return_early;
|
|
|
|
|
|
|
|
// Return early if we are not supposed to use MIR borrow checker for this function.
|
2018-07-04 19:25:37 -03:00
|
|
|
return_early = !tcx.has_attr(def_id, "rustc_mir") && !tcx.use_mir_borrowck();
|
2018-06-26 17:00:39 +02:00
|
|
|
|
|
|
|
if tcx.is_struct_constructor(def_id) {
|
|
|
|
// We are not borrow checking the automatically generated struct constructors
|
|
|
|
// because we want to accept structs such as this (taken from the `linked-hash-map`
|
|
|
|
// crate):
|
|
|
|
// ```rust
|
|
|
|
// struct Qey<Q: ?Sized>(Q);
|
|
|
|
// ```
|
|
|
|
// MIR of this struct constructor looks something like this:
|
|
|
|
// ```rust
|
|
|
|
// fn Qey(_1: Q) -> Qey<Q>{
|
|
|
|
// let mut _0: Qey<Q>; // return place
|
|
|
|
//
|
|
|
|
// bb0: {
|
|
|
|
// (_0.0: Q) = move _1; // bb0[0]: scope 0 at src/main.rs:1:1: 1:26
|
|
|
|
// return; // bb0[1]: scope 0 at src/main.rs:1:1: 1:26
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// ```
|
|
|
|
// The problem here is that `(_0.0: Q) = move _1;` is valid only if `Q` is
|
|
|
|
// of statically known size, which is not known to be true because of the
|
|
|
|
// `Q: ?Sized` constraint. However, it is true because the constructor can be
|
|
|
|
// called only when `Q` is of statically known size.
|
|
|
|
return_early = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if return_early {
|
2018-03-02 20:42:37 -08:00
|
|
|
return BorrowCheckResult {
|
|
|
|
closure_requirements: None,
|
|
|
|
used_mut_upvars: SmallVec::new(),
|
|
|
|
};
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
|
2017-11-22 17:38:51 -05:00
|
|
|
let opt_closure_req = tcx.infer_ctxt().enter(|infcx| {
|
2017-10-30 05:50:39 -04:00
|
|
|
let input_mir: &Mir = &input_mir.borrow();
|
2017-11-22 17:38:51 -05:00
|
|
|
do_mir_borrowck(&infcx, input_mir, def_id)
|
2017-10-30 05:50:39 -04:00
|
|
|
});
|
|
|
|
debug!("mir_borrowck done");
|
2017-11-22 17:38:51 -05:00
|
|
|
|
|
|
|
opt_closure_req
|
2017-10-30 05:50:39 -04:00
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
fn do_mir_borrowck<'a, 'gcx, 'tcx>(
|
|
|
|
infcx: &InferCtxt<'a, 'gcx, 'tcx>,
|
|
|
|
input_mir: &Mir<'gcx>,
|
|
|
|
def_id: DefId,
|
2018-03-02 20:42:37 -08:00
|
|
|
) -> BorrowCheckResult<'gcx> {
|
2018-07-03 11:38:09 -04:00
|
|
|
debug!("do_mir_borrowck(def_id = {:?})", def_id);
|
|
|
|
|
2017-10-30 05:50:39 -04:00
|
|
|
let tcx = infcx.tcx;
|
2017-07-05 14:52:18 +02:00
|
|
|
let attributes = tcx.get_attrs(def_id);
|
|
|
|
let param_env = tcx.param_env(def_id);
|
2018-06-22 00:10:52 -03:00
|
|
|
let id = tcx
|
|
|
|
.hir
|
2017-11-17 04:47:02 -05:00
|
|
|
.as_local_node_id(def_id)
|
2017-11-10 19:20:35 +02:00
|
|
|
.expect("do_mir_borrowck: non-local DefId");
|
2017-10-30 05:50:39 -04:00
|
|
|
|
2018-04-09 05:28:00 -04:00
|
|
|
// Replace all regions with fresh inference variables. This
|
|
|
|
// requires first making our own copy of the MIR. This copy will
|
|
|
|
// be modified (in place) to contain non-lexical lifetimes. It
|
|
|
|
// will have a lifetime tied to the inference context.
|
2017-11-17 04:34:02 -05:00
|
|
|
let mut mir: Mir<'tcx> = input_mir.clone();
|
2018-04-09 05:28:00 -04:00
|
|
|
let free_regions = nll::replace_regions_in_mir(infcx, def_id, param_env, &mut mir);
|
|
|
|
let mir = &mir; // no further changes
|
2018-05-01 10:03:36 -04:00
|
|
|
let location_table = &LocationTable::new(mir);
|
2017-11-17 04:34:02 -05:00
|
|
|
|
2018-07-18 18:10:08 -03:00
|
|
|
let mut errors_buffer = Vec::new();
|
2018-08-07 17:06:21 +02:00
|
|
|
let (move_data, move_errors): (MoveData<'tcx>, Option<Vec<(Place<'tcx>, MoveError<'tcx>)>>) =
|
2018-07-13 21:56:04 +01:00
|
|
|
match MoveData::gather_moves(mir, tcx) {
|
|
|
|
Ok(move_data) => (move_data, None),
|
|
|
|
Err((move_data, move_errors)) => (move_data, Some(move_errors)),
|
|
|
|
};
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
let mdpe = MoveDataParamEnv {
|
|
|
|
move_data: move_data,
|
|
|
|
param_env: param_env,
|
|
|
|
};
|
2017-12-04 00:56:06 +02:00
|
|
|
let body_id = match tcx.def_key(def_id).disambiguated_data.data {
|
2017-12-14 07:33:29 -05:00
|
|
|
DefPathData::StructCtor | DefPathData::EnumVariant(_) => None,
|
|
|
|
_ => Some(tcx.hir.body_owned_by(id)),
|
2017-12-04 00:56:06 +02:00
|
|
|
};
|
|
|
|
|
2017-10-30 05:50:39 -04:00
|
|
|
let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
|
2017-12-07 17:00:26 +02:00
|
|
|
let mut flow_inits = FlowAtLocation::new(do_dataflow(
|
2017-11-17 04:47:02 -05:00
|
|
|
tcx,
|
|
|
|
mir,
|
|
|
|
id,
|
|
|
|
&attributes,
|
|
|
|
&dead_unwinds,
|
2018-01-29 01:49:29 +02:00
|
|
|
MaybeInitializedPlaces::new(tcx, mir, &mdpe),
|
2017-11-24 13:00:09 +01:00
|
|
|
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
|
2017-11-17 04:34:02 -05:00
|
|
|
));
|
2017-12-07 17:00:26 +02:00
|
|
|
let flow_uninits = FlowAtLocation::new(do_dataflow(
|
2017-11-17 04:47:02 -05:00
|
|
|
tcx,
|
|
|
|
mir,
|
|
|
|
id,
|
|
|
|
&attributes,
|
|
|
|
&dead_unwinds,
|
2018-01-29 01:49:29 +02:00
|
|
|
MaybeUninitializedPlaces::new(tcx, mir, &mdpe),
|
2017-11-24 13:00:09 +01:00
|
|
|
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
|
2017-11-17 04:34:02 -05:00
|
|
|
));
|
2017-12-07 17:00:26 +02:00
|
|
|
let flow_move_outs = FlowAtLocation::new(do_dataflow(
|
2017-11-17 04:47:02 -05:00
|
|
|
tcx,
|
|
|
|
mir,
|
|
|
|
id,
|
|
|
|
&attributes,
|
|
|
|
&dead_unwinds,
|
|
|
|
MovingOutStatements::new(tcx, mir, &mdpe),
|
2017-11-24 13:00:09 +01:00
|
|
|
|bd, i| DebugFormatted::new(&bd.move_data().moves[i]),
|
2017-11-17 04:34:02 -05:00
|
|
|
));
|
2017-12-07 17:00:26 +02:00
|
|
|
let flow_ever_inits = FlowAtLocation::new(do_dataflow(
|
2017-11-17 04:47:02 -05:00
|
|
|
tcx,
|
|
|
|
mir,
|
|
|
|
id,
|
|
|
|
&attributes,
|
|
|
|
&dead_unwinds,
|
2018-01-29 01:49:29 +02:00
|
|
|
EverInitializedPlaces::new(tcx, mir, &mdpe),
|
2017-11-24 13:00:09 +01:00
|
|
|
|bd, i| DebugFormatted::new(&bd.move_data().inits[i]),
|
2017-11-17 04:34:02 -05:00
|
|
|
));
|
|
|
|
|
2018-04-07 07:11:01 -04:00
|
|
|
let borrow_set = Rc::new(BorrowSet::build(tcx, mir));
|
2018-04-06 15:57:21 -04:00
|
|
|
|
2017-11-17 04:34:02 -05:00
|
|
|
// If we are in non-lexical mode, compute the non-lexical lifetimes.
|
2018-05-26 01:14:45 +00:00
|
|
|
let (regioncx, polonius_output, opt_closure_req) = nll::compute_regions(
|
2018-04-09 05:28:00 -04:00
|
|
|
infcx,
|
|
|
|
def_id,
|
|
|
|
free_regions,
|
|
|
|
mir,
|
2018-05-01 10:03:36 -04:00
|
|
|
location_table,
|
2018-04-09 05:28:00 -04:00
|
|
|
param_env,
|
|
|
|
&mut flow_inits,
|
|
|
|
&mdpe.move_data,
|
|
|
|
&borrow_set,
|
2018-07-18 18:10:08 -03:00
|
|
|
&mut errors_buffer,
|
2018-04-09 05:28:00 -04:00
|
|
|
);
|
|
|
|
let regioncx = Rc::new(regioncx);
|
2017-10-30 05:50:39 -04:00
|
|
|
|
2018-03-05 02:44:10 -05:00
|
|
|
let flow_borrows = FlowAtLocation::new(do_dataflow(
|
|
|
|
tcx,
|
|
|
|
mir,
|
|
|
|
id,
|
|
|
|
&attributes,
|
|
|
|
&dead_unwinds,
|
2018-04-09 05:28:00 -04:00
|
|
|
Borrows::new(tcx, mir, regioncx.clone(), def_id, body_id, &borrow_set),
|
2018-04-07 05:53:44 -04:00
|
|
|
|rs, i| DebugFormatted::new(&rs.location(i)),
|
2018-03-05 02:44:10 -05:00
|
|
|
));
|
|
|
|
|
2018-04-07 07:11:01 -04:00
|
|
|
let movable_generator = match tcx.hir.get(id) {
|
2018-01-11 19:50:40 +01:00
|
|
|
hir::map::Node::NodeExpr(&hir::Expr {
|
2018-07-11 20:05:29 +08:00
|
|
|
node: hir::ExprKind::Closure(.., Some(hir::GeneratorMovability::Static)),
|
2018-01-11 19:50:40 +01:00
|
|
|
..
|
2018-04-07 07:11:01 -04:00
|
|
|
}) => false,
|
|
|
|
_ => true,
|
2018-01-11 19:50:40 +01:00
|
|
|
};
|
|
|
|
|
2018-04-06 20:48:13 -04:00
|
|
|
let dominators = mir.dominators();
|
|
|
|
|
2017-12-06 20:27:38 +02:00
|
|
|
let mut mbcx = MirBorrowckCtxt {
|
|
|
|
tcx: tcx,
|
|
|
|
mir: mir,
|
2018-04-07 08:01:21 -04:00
|
|
|
mir_def_id: def_id,
|
2017-12-06 20:27:38 +02:00
|
|
|
move_data: &mdpe.move_data,
|
|
|
|
param_env: param_env,
|
2018-06-26 07:12:51 -04:00
|
|
|
location_table,
|
2018-01-11 19:50:40 +01:00
|
|
|
movable_generator,
|
2017-12-06 20:27:38 +02:00
|
|
|
locals_are_invalidated_at_exit: match tcx.hir.body_owner_kind(id) {
|
2017-12-14 07:33:29 -05:00
|
|
|
hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => false,
|
2017-12-06 20:27:38 +02:00
|
|
|
hir::BodyOwnerKind::Fn => true,
|
|
|
|
},
|
2018-01-20 02:15:57 +00:00
|
|
|
access_place_error_reported: FxHashSet(),
|
2017-12-07 17:45:13 +01:00
|
|
|
reservation_error_reported: FxHashSet(),
|
2018-03-26 22:00:14 +02:00
|
|
|
moved_error_reported: FxHashSet(),
|
2018-07-18 18:10:08 -03:00
|
|
|
errors_buffer,
|
2018-03-16 02:52:07 -07:00
|
|
|
nonlexical_regioncx: regioncx,
|
2018-02-28 01:09:08 -08:00
|
|
|
used_mut: FxHashSet(),
|
2018-03-02 20:42:37 -08:00
|
|
|
used_mut_upvars: SmallVec::new(),
|
2018-04-07 08:01:21 -04:00
|
|
|
borrow_set,
|
2018-04-06 20:48:13 -04:00
|
|
|
dominators,
|
2017-12-06 20:27:38 +02:00
|
|
|
};
|
|
|
|
|
New `ActiveBorrows` dataflow for two-phase `&mut`; not yet borrowed-checked.
High-level picture: The old `Borrows` analysis is now called
`Reservations` (implemented as a newtype wrapper around `Borrows`);
this continues to compute whether a `Rvalue::Ref` can reach a
statement without an intervening `EndRegion`. In addition, we also
track what `Place` each such `Rvalue::Ref` was immediately assigned
to in a given borrow (yay for MIR-structural properties!).
The new `ActiveBorrows` analysis then tracks the initial use of any of
those assigned `Places` for a given borrow. I.e. a borrow becomes
"active" immediately after it starts being "used" in some way. (This
is conservative in the sense that we will treat a copy `x = y;` as a
use of `y`; in principle one might further delay activation in such
cases.)
The new `ActiveBorrows` analysis needs to take the `Reservations`
results as an initial input, because the reservation state influences
the gen/kill sets for `ActiveBorrows`. In particular, a use of `a`
activates a borrow `a = &b` if and only if there exists a path (in the
control flow graph) from the borrow to that use. So we need to know if
the borrow reaches a given use to know if it really gets a gen-bit or
not.
* Incorporating the output from one dataflow analysis into the input
of another required more changes to the infrastructure than I had
expected, and even after those changes, the resulting code is still
a bit subtle.
* In particular, Since we need to know the intrablock reservation
state, we need to dynamically update a bitvector for the
reservations as we are also trying to compute the gen/kills
bitvector for the active borrows.
* The way I ended up deciding to do this (after also toying with at
least two other designs) is to put both the reservation state and
the active borrow state into a single bitvector. That is why we now
have separate (but related) `BorrowIndex` and
`ReserveOrActivateIndex`: each borrow index maps to a pair of
neighboring reservation and activation indexes.
As noted above, these changes are solely adding the active borrows
dataflow analysis (and updating the existing code to cope with the
switch from `Borrows` to `Reservations`). The code to process the
bitvector in the borrow checker currently just skips over all of the
active borrow bits.
But atop this commit, one *can* observe the analysis results by
looking at the graphviz output, e.g. via
```rust
#[rustc_mir(borrowck_graphviz_preflow="pre_two_phase.dot",
borrowck_graphviz_postflow="post_two_phase.dot")]
```
Includes doc for `FindPlaceUses`, as well as `Reservations` and
`ActiveBorrows` structs, which are wrappers are the `Borrows` struct
that dictate which flow analysis should be performed.
2017-12-01 12:32:51 +01:00
|
|
|
let mut state = Flows::new(
|
2018-03-05 02:44:10 -05:00
|
|
|
flow_borrows,
|
New `ActiveBorrows` dataflow for two-phase `&mut`; not yet borrowed-checked.
High-level picture: The old `Borrows` analysis is now called
`Reservations` (implemented as a newtype wrapper around `Borrows`);
this continues to compute whether a `Rvalue::Ref` can reach a
statement without an intervening `EndRegion`. In addition, we also
track what `Place` each such `Rvalue::Ref` was immediately assigned
to in a given borrow (yay for MIR-structural properties!).
The new `ActiveBorrows` analysis then tracks the initial use of any of
those assigned `Places` for a given borrow. I.e. a borrow becomes
"active" immediately after it starts being "used" in some way. (This
is conservative in the sense that we will treat a copy `x = y;` as a
use of `y`; in principle one might further delay activation in such
cases.)
The new `ActiveBorrows` analysis needs to take the `Reservations`
results as an initial input, because the reservation state influences
the gen/kill sets for `ActiveBorrows`. In particular, a use of `a`
activates a borrow `a = &b` if and only if there exists a path (in the
control flow graph) from the borrow to that use. So we need to know if
the borrow reaches a given use to know if it really gets a gen-bit or
not.
* Incorporating the output from one dataflow analysis into the input
of another required more changes to the infrastructure than I had
expected, and even after those changes, the resulting code is still
a bit subtle.
* In particular, Since we need to know the intrablock reservation
state, we need to dynamically update a bitvector for the
reservations as we are also trying to compute the gen/kills
bitvector for the active borrows.
* The way I ended up deciding to do this (after also toying with at
least two other designs) is to put both the reservation state and
the active borrow state into a single bitvector. That is why we now
have separate (but related) `BorrowIndex` and
`ReserveOrActivateIndex`: each borrow index maps to a pair of
neighboring reservation and activation indexes.
As noted above, these changes are solely adding the active borrows
dataflow analysis (and updating the existing code to cope with the
switch from `Borrows` to `Reservations`). The code to process the
bitvector in the borrow checker currently just skips over all of the
active borrow bits.
But atop this commit, one *can* observe the analysis results by
looking at the graphviz output, e.g. via
```rust
#[rustc_mir(borrowck_graphviz_preflow="pre_two_phase.dot",
borrowck_graphviz_postflow="post_two_phase.dot")]
```
Includes doc for `FindPlaceUses`, as well as `Reservations` and
`ActiveBorrows` structs, which are wrappers are the `Borrows` struct
that dictate which flow analysis should be performed.
2017-12-01 12:32:51 +01:00
|
|
|
flow_uninits,
|
|
|
|
flow_move_outs,
|
|
|
|
flow_ever_inits,
|
2018-05-26 01:38:50 +00:00
|
|
|
polonius_output,
|
New `ActiveBorrows` dataflow for two-phase `&mut`; not yet borrowed-checked.
High-level picture: The old `Borrows` analysis is now called
`Reservations` (implemented as a newtype wrapper around `Borrows`);
this continues to compute whether a `Rvalue::Ref` can reach a
statement without an intervening `EndRegion`. In addition, we also
track what `Place` each such `Rvalue::Ref` was immediately assigned
to in a given borrow (yay for MIR-structural properties!).
The new `ActiveBorrows` analysis then tracks the initial use of any of
those assigned `Places` for a given borrow. I.e. a borrow becomes
"active" immediately after it starts being "used" in some way. (This
is conservative in the sense that we will treat a copy `x = y;` as a
use of `y`; in principle one might further delay activation in such
cases.)
The new `ActiveBorrows` analysis needs to take the `Reservations`
results as an initial input, because the reservation state influences
the gen/kill sets for `ActiveBorrows`. In particular, a use of `a`
activates a borrow `a = &b` if and only if there exists a path (in the
control flow graph) from the borrow to that use. So we need to know if
the borrow reaches a given use to know if it really gets a gen-bit or
not.
* Incorporating the output from one dataflow analysis into the input
of another required more changes to the infrastructure than I had
expected, and even after those changes, the resulting code is still
a bit subtle.
* In particular, Since we need to know the intrablock reservation
state, we need to dynamically update a bitvector for the
reservations as we are also trying to compute the gen/kills
bitvector for the active borrows.
* The way I ended up deciding to do this (after also toying with at
least two other designs) is to put both the reservation state and
the active borrow state into a single bitvector. That is why we now
have separate (but related) `BorrowIndex` and
`ReserveOrActivateIndex`: each borrow index maps to a pair of
neighboring reservation and activation indexes.
As noted above, these changes are solely adding the active borrows
dataflow analysis (and updating the existing code to cope with the
switch from `Borrows` to `Reservations`). The code to process the
bitvector in the borrow checker currently just skips over all of the
active borrow bits.
But atop this commit, one *can* observe the analysis results by
looking at the graphviz output, e.g. via
```rust
#[rustc_mir(borrowck_graphviz_preflow="pre_two_phase.dot",
borrowck_graphviz_postflow="post_two_phase.dot")]
```
Includes doc for `FindPlaceUses`, as well as `Reservations` and
`ActiveBorrows` structs, which are wrappers are the `Borrows` struct
that dictate which flow analysis should be performed.
2017-12-01 12:32:51 +01:00
|
|
|
);
|
|
|
|
|
2018-07-13 21:56:04 +01:00
|
|
|
if let Some(errors) = move_errors {
|
|
|
|
mbcx.report_move_errors(errors);
|
|
|
|
}
|
2017-10-30 05:50:39 -04:00
|
|
|
mbcx.analyze_results(&mut state); // entry point for DataflowResultsConsumer
|
2017-11-22 17:38:51 -05:00
|
|
|
|
2018-04-21 23:41:44 -07:00
|
|
|
// For each non-user used mutable variable, check if it's been assigned from
|
|
|
|
// a user-declared local. If so, then put that local into the used_mut set.
|
|
|
|
// Note that this set is expected to be small - only upvars from closures
|
|
|
|
// would have a chance of erroneously adding non-user-defined mutable vars
|
|
|
|
// to the set.
|
2018-06-22 00:10:52 -03:00
|
|
|
let temporary_used_locals: FxHashSet<Local> = mbcx
|
|
|
|
.used_mut
|
|
|
|
.iter()
|
|
|
|
.filter(|&local| !mbcx.mir.local_decls[*local].is_user_variable.is_some())
|
|
|
|
.cloned()
|
|
|
|
.collect();
|
2018-06-20 18:02:44 +02:00
|
|
|
mbcx.gather_used_muts(temporary_used_locals);
|
2018-04-21 23:41:44 -07:00
|
|
|
|
2018-03-02 20:42:37 -08:00
|
|
|
debug!("mbcx.used_mut: {:?}", mbcx.used_mut);
|
|
|
|
|
2018-07-18 18:10:08 -03:00
|
|
|
let used_mut = mbcx.used_mut;
|
|
|
|
|
2018-06-22 00:10:52 -03:00
|
|
|
for local in mbcx
|
|
|
|
.mir
|
|
|
|
.mut_vars_and_args_iter()
|
2018-07-18 18:10:08 -03:00
|
|
|
.filter(|local| !used_mut.contains(local))
|
2018-06-22 00:10:52 -03:00
|
|
|
{
|
2018-05-28 17:37:48 +03:00
|
|
|
if let ClearCrossCrate::Set(ref vsi) = mbcx.mir.source_scope_local_data {
|
2018-03-12 08:47:44 -07:00
|
|
|
let local_decl = &mbcx.mir.local_decls[local];
|
|
|
|
|
2018-03-23 01:59:56 -07:00
|
|
|
// Skip implicit `self` argument for closures
|
|
|
|
if local.index() == 1 && tcx.is_closure(mbcx.mir_def_id) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-04-30 15:31:37 -07:00
|
|
|
// Skip over locals that begin with an underscore or have no name
|
2018-03-12 08:47:44 -07:00
|
|
|
match local_decl.name {
|
2018-06-22 00:10:52 -03:00
|
|
|
Some(name) => if name.as_str().starts_with("_") {
|
|
|
|
continue;
|
|
|
|
},
|
2018-04-30 15:31:37 -07:00
|
|
|
None => continue,
|
2018-03-12 08:47:44 -07:00
|
|
|
}
|
|
|
|
|
2018-05-29 21:31:33 +03:00
|
|
|
let span = local_decl.source_info.span;
|
2018-08-18 12:14:09 +02:00
|
|
|
let mut_span = tcx.sess.source_map().span_until_non_whitespace(span);
|
2018-03-02 20:42:37 -08:00
|
|
|
|
2018-07-18 18:10:08 -03:00
|
|
|
let mut err = tcx.struct_span_lint_node(
|
2018-03-02 20:42:37 -08:00
|
|
|
UNUSED_MUT,
|
2018-05-29 21:31:33 +03:00
|
|
|
vsi[local_decl.source_info.scope].lint_root,
|
2018-05-29 13:55:21 +03:00
|
|
|
span,
|
2018-06-22 00:10:52 -03:00
|
|
|
"variable does not need to be mutable",
|
2018-07-18 18:10:08 -03:00
|
|
|
);
|
|
|
|
err.span_suggestion_short(mut_span, "remove this `mut`", "".to_owned());
|
|
|
|
|
|
|
|
err.buffer(&mut mbcx.errors_buffer);
|
2018-03-02 20:42:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-20 17:29:29 +02:00
|
|
|
if mbcx.errors_buffer.len() > 0 {
|
2018-07-31 23:00:01 +02:00
|
|
|
mbcx.errors_buffer.sort_by_key(|diag| diag.span.primary_span());
|
2018-07-31 14:30:46 +02:00
|
|
|
|
2018-07-20 17:29:29 +02:00
|
|
|
if tcx.migrate_borrowck() {
|
|
|
|
match tcx.borrowck(def_id).signalled_any_error {
|
|
|
|
SignalledError::NoErrorsSeen => {
|
|
|
|
// if AST-borrowck signalled no errors, then
|
|
|
|
// downgrade all the buffered MIR-borrowck errors
|
|
|
|
// to warnings.
|
|
|
|
for err in &mut mbcx.errors_buffer {
|
2018-07-25 01:34:17 +02:00
|
|
|
if err.is_error() {
|
|
|
|
err.level = Level::Warning;
|
|
|
|
err.warn("This error has been downgraded to a warning \
|
|
|
|
for backwards compatibility with previous releases.\n\
|
|
|
|
It represents potential unsoundness in your code.\n\
|
|
|
|
This warning will become a hard error in the future.");
|
|
|
|
}
|
2018-07-20 17:29:29 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
SignalledError::SawSomeError => {
|
|
|
|
// if AST-borrowck signalled a (cancelled) error,
|
|
|
|
// then we will just emit the buffered
|
|
|
|
// MIR-borrowck errors as normal.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for diag in mbcx.errors_buffer.drain(..) {
|
|
|
|
DiagnosticBuilder::new_diagnostic(mbcx.tcx.sess.diagnostic(), diag).emit();
|
|
|
|
}
|
2018-07-18 18:10:08 -03:00
|
|
|
}
|
|
|
|
|
2018-07-03 11:38:09 -04:00
|
|
|
let result = BorrowCheckResult {
|
2018-03-02 20:42:37 -08:00
|
|
|
closure_requirements: opt_closure_req,
|
|
|
|
used_mut_upvars: mbcx.used_mut_upvars,
|
2018-07-03 11:38:09 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
debug!("do_mir_borrowck: result = {:#?}", result);
|
|
|
|
|
|
|
|
result
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
|
2017-11-07 04:44:41 -05:00
|
|
|
pub struct MirBorrowckCtxt<'cx, 'gcx: 'tcx, 'tcx: 'cx> {
|
|
|
|
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
|
|
|
|
mir: &'cx Mir<'tcx>,
|
2018-04-07 08:01:21 -04:00
|
|
|
mir_def_id: DefId,
|
2017-11-07 04:44:41 -05:00
|
|
|
move_data: &'cx MoveData<'tcx>,
|
2018-06-26 07:12:51 -04:00
|
|
|
|
|
|
|
/// Map from MIR `Location` to `LocationIndex`; created
|
|
|
|
/// when MIR borrowck begins.
|
|
|
|
location_table: &'cx LocationTable,
|
|
|
|
|
2017-11-07 04:44:41 -05:00
|
|
|
param_env: ParamEnv<'gcx>,
|
2018-01-11 19:50:40 +01:00
|
|
|
movable_generator: bool,
|
2017-12-03 16:08:28 +02:00
|
|
|
/// This keeps track of whether local variables are free-ed when the function
|
2017-12-06 00:51:47 +02:00
|
|
|
/// exits even without a `StorageDead`, which appears to be the case for
|
|
|
|
/// constants.
|
|
|
|
///
|
|
|
|
/// I'm not sure this is the right approach - @eddyb could you try and
|
|
|
|
/// figure this out?
|
2017-12-03 16:08:28 +02:00
|
|
|
locals_are_invalidated_at_exit: bool,
|
2018-01-20 02:15:57 +00:00
|
|
|
/// This field keeps track of when borrow errors are reported in the access_place function
|
|
|
|
/// so that there is no duplicate reporting. This field cannot also be used for the conflicting
|
|
|
|
/// borrow errors that is handled by the `reservation_error_reported` field as the inclusion
|
|
|
|
/// of the `Span` type (while required to mute some errors) stops the muting of the reservation
|
|
|
|
/// errors.
|
|
|
|
access_place_error_reported: FxHashSet<(Place<'tcx>, Span)>,
|
2017-12-07 17:45:13 +01:00
|
|
|
/// This field keeps track of when borrow conflict errors are reported
|
|
|
|
/// for reservations, so that we don't report seemingly duplicate
|
|
|
|
/// errors for corresponding activations
|
|
|
|
///
|
|
|
|
/// FIXME: Ideally this would be a set of BorrowIndex, not Places,
|
|
|
|
/// but it is currently inconvenient to track down the BorrowIndex
|
|
|
|
/// at the time we detect and report a reservation error.
|
|
|
|
reservation_error_reported: FxHashSet<Place<'tcx>>,
|
2018-03-26 22:00:14 +02:00
|
|
|
/// This field keeps track of errors reported in the checking of moved variables,
|
2018-07-06 17:25:40 -03:00
|
|
|
/// so that we don't report seemingly duplicate errors.
|
2018-03-26 22:00:14 +02:00
|
|
|
moved_error_reported: FxHashSet<Place<'tcx>>,
|
2018-07-18 18:10:08 -03:00
|
|
|
/// Errors to be reported buffer
|
|
|
|
errors_buffer: Vec<Diagnostic>,
|
2018-02-28 01:09:08 -08:00
|
|
|
/// This field keeps track of all the local variables that are declared mut and are mutated.
|
|
|
|
/// Used for the warning issued by an unused mutable local variable.
|
|
|
|
used_mut: FxHashSet<Local>,
|
2018-03-02 20:42:37 -08:00
|
|
|
/// If the function we're checking is a closure, then we'll need to report back the list of
|
|
|
|
/// mutable upvars that have been used. This field keeps track of them.
|
|
|
|
used_mut_upvars: SmallVec<[Field; 8]>,
|
2017-12-07 11:21:29 -05:00
|
|
|
/// Non-lexical region inference context, if NLL is enabled. This
|
|
|
|
/// contains the results from region inference and lets us e.g.
|
|
|
|
/// find out which CFG points are contained in each borrow region.
|
2018-04-09 05:28:00 -04:00
|
|
|
nonlexical_regioncx: Rc<RegionInferenceContext<'tcx>>,
|
2018-04-07 08:01:21 -04:00
|
|
|
|
|
|
|
/// The set of borrows extracted from the MIR
|
|
|
|
borrow_set: Rc<BorrowSet<'tcx>>,
|
|
|
|
|
|
|
|
/// Dominators for MIR
|
2018-04-06 20:48:13 -04:00
|
|
|
dominators: Dominators<BasicBlock>,
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that:
|
|
|
|
// 1. assignments are always made to mutable locations (FIXME: does that still really go here?)
|
|
|
|
// 2. loans made in overlapping scopes do not conflict
|
|
|
|
// 3. assignments do not affect things loaned out as immutable
|
|
|
|
// 4. moves do not affect things loaned out in any way
|
2017-11-07 04:44:41 -05:00
|
|
|
impl<'cx, 'gcx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
|
2017-12-07 17:00:26 +02:00
|
|
|
type FlowState = Flows<'cx, 'gcx, 'tcx>;
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
fn mir(&self) -> &'cx Mir<'tcx> {
|
|
|
|
self.mir
|
|
|
|
}
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
fn visit_block_entry(&mut self, bb: BasicBlock, flow_state: &Self::FlowState) {
|
2017-12-03 15:15:29 +02:00
|
|
|
debug!("MirBorrowckCtxt::process_block({:?}): {}", bb, flow_state);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
fn visit_statement_entry(
|
|
|
|
&mut self,
|
|
|
|
location: Location,
|
|
|
|
stmt: &Statement<'tcx>,
|
|
|
|
flow_state: &Self::FlowState,
|
|
|
|
) {
|
|
|
|
debug!(
|
|
|
|
"MirBorrowckCtxt::process_statement({:?}, {:?}): {}",
|
2018-03-06 02:29:03 -03:00
|
|
|
location, stmt, flow_state
|
2017-11-17 04:47:02 -05:00
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
let span = stmt.source_info.span;
|
2017-12-07 17:45:13 +01:00
|
|
|
|
|
|
|
self.check_activations(location, span, flow_state);
|
|
|
|
|
2017-07-05 14:52:18 +02:00
|
|
|
match stmt.kind {
|
|
|
|
StatementKind::Assign(ref lhs, ref rhs) => {
|
2018-02-05 22:31:56 +00:00
|
|
|
self.consume_rvalue(
|
|
|
|
ContextKind::AssignRhs.new(location),
|
|
|
|
(rhs, span),
|
|
|
|
location,
|
|
|
|
flow_state,
|
|
|
|
);
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
self.mutate_place(
|
|
|
|
ContextKind::AssignLhs.new(location),
|
|
|
|
(lhs, span),
|
2017-12-15 14:27:06 -06:00
|
|
|
Shallow(None),
|
2017-11-17 04:47:02 -05:00
|
|
|
JustWrite,
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
2018-05-04 12:04:33 +02:00
|
|
|
StatementKind::ReadForMatch(ref place) => {
|
2018-06-22 00:10:52 -03:00
|
|
|
self.access_place(
|
|
|
|
ContextKind::ReadForMatch.new(location),
|
|
|
|
(place, span),
|
|
|
|
(Deep, Read(ReadKind::Borrow(BorrowKind::Shared))),
|
|
|
|
LocalMutationIsAllowed::No,
|
|
|
|
flow_state,
|
|
|
|
);
|
2018-05-04 12:04:33 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
StatementKind::SetDiscriminant {
|
|
|
|
ref place,
|
|
|
|
variant_index: _,
|
|
|
|
} => {
|
|
|
|
self.mutate_place(
|
|
|
|
ContextKind::SetDiscrim.new(location),
|
|
|
|
(place, span),
|
|
|
|
Shallow(Some(ArtificialField::Discriminant)),
|
|
|
|
JustWrite,
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
StatementKind::InlineAsm {
|
|
|
|
ref asm,
|
|
|
|
ref outputs,
|
|
|
|
ref inputs,
|
|
|
|
} => {
|
2017-11-17 17:19:57 +02:00
|
|
|
let context = ContextKind::InlineAsm.new(location);
|
2017-07-05 14:52:18 +02:00
|
|
|
for (o, output) in asm.outputs.iter().zip(outputs) {
|
|
|
|
if o.is_indirect {
|
2017-11-17 17:19:57 +02:00
|
|
|
// FIXME(eddyb) indirect inline asm outputs should
|
2017-12-01 14:39:51 +02:00
|
|
|
// be encoeded through MIR place derefs instead.
|
2017-11-17 04:47:02 -05:00
|
|
|
self.access_place(
|
|
|
|
context,
|
|
|
|
(output, span),
|
|
|
|
(Deep, Read(ReadKind::Copy)),
|
|
|
|
LocalMutationIsAllowed::No,
|
|
|
|
flow_state,
|
|
|
|
);
|
2018-03-26 22:00:14 +02:00
|
|
|
self.check_if_path_or_subpath_is_moved(
|
2017-11-17 04:47:02 -05:00
|
|
|
context,
|
|
|
|
InitializationRequiringAction::Use,
|
|
|
|
(output, span),
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
} else {
|
2017-11-17 04:47:02 -05:00
|
|
|
self.mutate_place(
|
|
|
|
context,
|
|
|
|
(output, span),
|
2017-12-23 23:45:07 +00:00
|
|
|
if o.is_rw { Deep } else { Shallow(None) },
|
2017-11-17 04:47:02 -05:00
|
|
|
if o.is_rw { WriteAndRead } else { JustWrite },
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for input in inputs {
|
2017-11-17 17:19:57 +02:00
|
|
|
self.consume_operand(context, (input, span), flow_state);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
StatementKind::EndRegion(ref _rgn) => {
|
|
|
|
// ignored when consuming results (update to
|
|
|
|
// flow_state already handled).
|
|
|
|
}
|
2018-06-22 00:10:52 -03:00
|
|
|
StatementKind::Nop
|
|
|
|
| StatementKind::UserAssertTy(..)
|
|
|
|
| StatementKind::Validate(..)
|
|
|
|
| StatementKind::StorageLive(..) => {
|
2018-02-23 20:52:05 +00:00
|
|
|
// `Nop`, `UserAssertTy`, `Validate`, and `StorageLive` are irrelevant
|
2017-08-21 12:48:33 +02:00
|
|
|
// to borrow check.
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
2017-09-04 08:01:46 +03:00
|
|
|
StatementKind::StorageDead(local) => {
|
2017-11-17 04:47:02 -05:00
|
|
|
self.access_place(
|
|
|
|
ContextKind::StorageDead.new(location),
|
2017-12-01 14:31:47 +02:00
|
|
|
(&Place::Local(local), span),
|
2017-11-16 17:44:24 +01:00
|
|
|
(Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
|
|
|
|
LocalMutationIsAllowed::Yes,
|
2017-11-17 04:47:02 -05:00
|
|
|
flow_state,
|
|
|
|
);
|
2017-08-14 14:42:17 +02:00
|
|
|
}
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
fn visit_terminator_entry(
|
|
|
|
&mut self,
|
|
|
|
location: Location,
|
|
|
|
term: &Terminator<'tcx>,
|
|
|
|
flow_state: &Self::FlowState,
|
|
|
|
) {
|
2017-07-05 14:52:18 +02:00
|
|
|
let loc = location;
|
2017-11-17 04:47:02 -05:00
|
|
|
debug!(
|
|
|
|
"MirBorrowckCtxt::process_terminator({:?}, {:?}): {}",
|
2018-03-06 02:29:03 -03:00
|
|
|
location, term, flow_state
|
2017-11-17 04:47:02 -05:00
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
let span = term.source_info.span;
|
2017-12-07 17:45:13 +01:00
|
|
|
|
|
|
|
self.check_activations(location, span, flow_state);
|
|
|
|
|
2017-07-05 14:52:18 +02:00
|
|
|
match term.kind {
|
2017-11-17 04:47:02 -05:00
|
|
|
TerminatorKind::SwitchInt {
|
|
|
|
ref discr,
|
|
|
|
switch_ty: _,
|
|
|
|
values: _,
|
|
|
|
targets: _,
|
|
|
|
} => {
|
|
|
|
self.consume_operand(ContextKind::SwitchInt.new(loc), (discr, span), flow_state);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
TerminatorKind::Drop {
|
|
|
|
location: ref drop_place,
|
|
|
|
target: _,
|
|
|
|
unwind: _,
|
|
|
|
} => {
|
2018-02-14 18:14:31 +00:00
|
|
|
let gcx = self.tcx.global_tcx();
|
|
|
|
|
|
|
|
// Compute the type with accurate region information.
|
|
|
|
let drop_place_ty = drop_place.ty(self.mir, self.tcx);
|
|
|
|
|
|
|
|
// Erase the regions.
|
|
|
|
let drop_place_ty = self.tcx.erase_regions(&drop_place_ty).to_ty(self.tcx);
|
|
|
|
|
|
|
|
// "Lift" into the gcx -- once regions are erased, this type should be in the
|
|
|
|
// global arenas; this "lift" operation basically just asserts that is true, but
|
|
|
|
// that is useful later.
|
|
|
|
let drop_place_ty = gcx.lift(&drop_place_ty).unwrap();
|
|
|
|
|
Special-case `Box` in `rustc_mir::borrow_check`.
This should address issue 45696.
Since we know dropping a box will not access any `&mut` or `&`
references, it is safe to model its destructor as only touching the
contents *owned* by the box.
Note: At some point we may want to generalize this machinery to other
reference and collection types that are "pure" in the same sense as
box. If we add a `&move` reference type, it would probably also fall
into this branch of code. But for the short term, we will be
conservative and restrict this change to `Box<T>` alone.
The code works by recursively descending a deref of the `Box`. We
prevent `visit_terminator_drop` infinite-loop (which can arise in a
very obscure scenario) via a linked-list of seen types.
Note: A similar style stack-only linked-list definition can be found
in `rustc_mir::borrow_check::places_conflict`. It might be good at
some point in the future to unify the two types and put the resulting
definition into `librustc_data_structures/`.
----
One final note: Review feedback led to significant simplification of
logic here.
During review, eddyb RalfJung and I uncovered the heart of why I
needed a so-called "step 2" aka the Shallow Write to the Deref of the
box. It was because the `visit_terminator_drop`, in its base case,
will not emit any write at all (shallow or deep) to a place unless
that place has a need_drop.
So I was encoding a Shallow Write by hand for a `Box<T>`, as a
separate step from recursively descending through `*a_box` (which was
at the time known as "step 1"; it is now the *only* step, apart from
the change to the base case for `visit_terminator_drop` that this
commit now has encoded).
eddyb aruged that *something* should be emitting some sort of write in
the base case here (even a shallow one), of the dropped place, since
by analogy we also emit a write when you *move* a place. That led
to the revision here in this commit.
* (Its possible that this desired write should be attached in some
manner to StorageDead instead of Drop. But in this PR, I tried to
leave the StorageDead logic alone and focus my attention solely on
how Drop(x) is modelled in MIR-borrowck.)
2018-07-26 22:29:50 +02:00
|
|
|
debug!("visit_terminator_drop \
|
|
|
|
loc: {:?} term: {:?} drop_place: {:?} drop_place_ty: {:?} span: {:?}",
|
|
|
|
loc, term, drop_place, drop_place_ty, span);
|
|
|
|
|
|
|
|
self.visit_terminator_drop(
|
|
|
|
loc, term, flow_state, drop_place, drop_place_ty, span, SeenTy(None));
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
TerminatorKind::DropAndReplace {
|
|
|
|
location: ref drop_place,
|
|
|
|
value: ref new_value,
|
|
|
|
target: _,
|
|
|
|
unwind: _,
|
|
|
|
} => {
|
|
|
|
self.mutate_place(
|
|
|
|
ContextKind::DropAndReplace.new(loc),
|
|
|
|
(drop_place, span),
|
|
|
|
Deep,
|
|
|
|
JustWrite,
|
|
|
|
flow_state,
|
|
|
|
);
|
|
|
|
self.consume_operand(
|
|
|
|
ContextKind::DropAndReplace.new(loc),
|
|
|
|
(new_value, span),
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
TerminatorKind::Call {
|
|
|
|
ref func,
|
|
|
|
ref args,
|
|
|
|
ref destination,
|
|
|
|
cleanup: _,
|
|
|
|
} => {
|
|
|
|
self.consume_operand(ContextKind::CallOperator.new(loc), (func, span), flow_state);
|
2017-07-05 14:52:18 +02:00
|
|
|
for arg in args {
|
2017-11-17 04:47:02 -05:00
|
|
|
self.consume_operand(
|
|
|
|
ContextKind::CallOperand.new(loc),
|
|
|
|
(arg, span),
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
if let Some((ref dest, _ /*bb*/)) = *destination {
|
|
|
|
self.mutate_place(
|
|
|
|
ContextKind::CallDest.new(loc),
|
|
|
|
(dest, span),
|
|
|
|
Deep,
|
|
|
|
JustWrite,
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
TerminatorKind::Assert {
|
|
|
|
ref cond,
|
|
|
|
expected: _,
|
|
|
|
ref msg,
|
|
|
|
target: _,
|
|
|
|
cleanup: _,
|
|
|
|
} => {
|
|
|
|
self.consume_operand(ContextKind::Assert.new(loc), (cond, span), flow_state);
|
2018-04-27 15:21:31 +02:00
|
|
|
use rustc::mir::interpret::EvalErrorKind::BoundsCheck;
|
|
|
|
if let BoundsCheck { ref len, ref index } = *msg {
|
|
|
|
self.consume_operand(ContextKind::Assert.new(loc), (len, span), flow_state);
|
2018-06-22 00:10:52 -03:00
|
|
|
self.consume_operand(ContextKind::Assert.new(loc), (index, span), flow_state);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
TerminatorKind::Yield {
|
|
|
|
ref value,
|
|
|
|
resume: _,
|
|
|
|
drop: _,
|
|
|
|
} => {
|
|
|
|
self.consume_operand(ContextKind::Yield.new(loc), (value, span), flow_state);
|
2018-01-11 19:50:40 +01:00
|
|
|
|
|
|
|
if self.movable_generator {
|
|
|
|
// Look for any active borrows to locals
|
2018-04-07 08:01:21 -04:00
|
|
|
let borrow_set = self.borrow_set.clone();
|
2018-04-07 08:20:24 -04:00
|
|
|
flow_state.with_outgoing_borrows(|borrows| {
|
2018-01-11 19:50:40 +01:00
|
|
|
for i in borrows {
|
2018-04-07 08:01:21 -04:00
|
|
|
let borrow = &borrow_set[i];
|
2018-01-11 19:50:40 +01:00
|
|
|
self.check_for_local_borrow(borrow, span);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
2017-08-16 13:05:48 -07:00
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
|
2017-11-19 04:26:23 -08:00
|
|
|
// Returning from the function implicitly kills storage for all locals and statics.
|
|
|
|
// Often, the storage will already have been killed by an explicit
|
|
|
|
// StorageDead, but we don't always emit those (notably on unwind paths),
|
|
|
|
// so this "extra check" serves as a kind of backup.
|
2018-04-07 08:01:21 -04:00
|
|
|
let borrow_set = self.borrow_set.clone();
|
2018-04-07 08:20:24 -04:00
|
|
|
flow_state.with_outgoing_borrows(|borrows| {
|
2017-11-17 04:47:02 -05:00
|
|
|
for i in borrows {
|
2018-04-07 08:01:21 -04:00
|
|
|
let borrow = &borrow_set[i];
|
2017-12-10 17:11:02 +02:00
|
|
|
let context = ContextKind::StorageDead.new(loc);
|
2018-04-07 08:01:21 -04:00
|
|
|
self.check_for_invalidation_at_exit(context, borrow, span);
|
2017-11-19 04:26:23 -08:00
|
|
|
}
|
2017-11-28 01:43:59 +02:00
|
|
|
});
|
2017-11-19 04:26:23 -08:00
|
|
|
}
|
2017-12-14 07:33:29 -05:00
|
|
|
TerminatorKind::Goto { target: _ }
|
2017-12-19 01:17:16 +01:00
|
|
|
| TerminatorKind::Abort
|
2017-12-14 07:33:29 -05:00
|
|
|
| TerminatorKind::Unreachable
|
2018-03-06 02:29:03 -03:00
|
|
|
| TerminatorKind::FalseEdges {
|
|
|
|
real_target: _,
|
|
|
|
imaginary_targets: _,
|
|
|
|
}
|
|
|
|
| TerminatorKind::FalseUnwind {
|
|
|
|
real_target: _,
|
|
|
|
unwind: _,
|
|
|
|
} => {
|
2017-07-05 14:52:18 +02:00
|
|
|
// no data used, thus irrelevant to borrowck
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
2017-11-17 04:47:02 -05:00
|
|
|
enum MutateMode {
|
|
|
|
JustWrite,
|
|
|
|
WriteAndRead,
|
|
|
|
}
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2017-12-07 17:45:13 +01:00
|
|
|
use self::ReadOrWrite::{Activation, Read, Reservation, Write};
|
2018-06-22 00:10:52 -03:00
|
|
|
use self::ShallowOrDeep::{Deep, Shallow};
|
2017-08-21 12:48:33 +02:00
|
|
|
|
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
|
|
|
enum ArtificialField {
|
|
|
|
Discriminant,
|
|
|
|
ArrayLength,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
|
|
|
enum ShallowOrDeep {
|
|
|
|
/// From the RFC: "A *shallow* access means that the immediate
|
2018-01-29 02:25:35 +02:00
|
|
|
/// fields reached at P are accessed, but references or pointers
|
2017-08-21 12:48:33 +02:00
|
|
|
/// found within are not dereferenced. Right now, the only access
|
|
|
|
/// that is shallow is an assignment like `x = ...;`, which would
|
|
|
|
/// be a *shallow write* of `x`."
|
|
|
|
Shallow(Option<ArtificialField>),
|
|
|
|
|
|
|
|
/// From the RFC: "A *deep* access means that all data reachable
|
2017-12-01 14:39:51 +02:00
|
|
|
/// through the given place may be invalidated or accesses by
|
2017-08-21 12:48:33 +02:00
|
|
|
/// this action."
|
|
|
|
Deep,
|
|
|
|
}
|
|
|
|
|
2017-11-16 17:44:24 +01:00
|
|
|
/// Kind of access to a value: read or write
|
|
|
|
/// (For informational purposes only)
|
2017-08-21 12:48:33 +02:00
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
|
|
|
enum ReadOrWrite {
|
|
|
|
/// From the RFC: "A *read* means that the existing data may be
|
|
|
|
/// read, but will not be changed."
|
|
|
|
Read(ReadKind),
|
|
|
|
|
|
|
|
/// From the RFC: "A *write* means that the data may be mutated to
|
|
|
|
/// new values or otherwise invalidated (for example, it could be
|
|
|
|
/// de-initialized, as in a move operation).
|
|
|
|
Write(WriteKind),
|
2017-12-07 17:45:13 +01:00
|
|
|
|
|
|
|
/// For two-phase borrows, we distinguish a reservation (which is treated
|
|
|
|
/// like a Read) from an activation (which is treated like a write), and
|
|
|
|
/// each of those is furthermore distinguished from Reads/Writes above.
|
|
|
|
Reservation(WriteKind),
|
|
|
|
Activation(WriteKind, BorrowIndex),
|
2017-08-21 12:48:33 +02:00
|
|
|
}
|
|
|
|
|
2017-11-16 17:44:24 +01:00
|
|
|
/// Kind of read access to a value
|
|
|
|
/// (For informational purposes only)
|
2017-08-21 12:48:33 +02:00
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
|
|
|
enum ReadKind {
|
|
|
|
Borrow(BorrowKind),
|
|
|
|
Copy,
|
|
|
|
}
|
|
|
|
|
2017-11-16 17:44:24 +01:00
|
|
|
/// Kind of write access to a value
|
|
|
|
/// (For informational purposes only)
|
2017-08-21 12:48:33 +02:00
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
|
|
|
enum WriteKind {
|
2017-11-17 00:09:18 +00:00
|
|
|
StorageDeadOrDrop,
|
2017-08-21 12:48:33 +02:00
|
|
|
MutableBorrow(BorrowKind),
|
|
|
|
Mutate,
|
|
|
|
Move,
|
|
|
|
}
|
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
/// When checking permissions for a place access, this flag is used to indicate that an immutable
|
|
|
|
/// local place can be mutated.
|
2017-11-16 17:44:24 +01:00
|
|
|
///
|
|
|
|
/// FIXME: @nikomatsakis suggested that this flag could be removed with the following modifications:
|
|
|
|
/// - Merge `check_access_permissions()` and `check_if_reassignment_to_immutable_state()`
|
|
|
|
/// - Split `is_mutable()` into `is_assignable()` (can be directly assigned) and
|
|
|
|
/// `is_declared_mutable()`
|
|
|
|
/// - Take flow state into consideration in `is_assignable()` for local variables
|
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
|
|
|
enum LocalMutationIsAllowed {
|
|
|
|
Yes,
|
2017-12-07 20:48:12 +02:00
|
|
|
/// We want use of immutable upvars to cause a "write to immutable upvar"
|
|
|
|
/// error, not an "reassignment" error.
|
|
|
|
ExceptUpvars,
|
2017-12-14 07:33:29 -05:00
|
|
|
No,
|
2017-11-16 17:44:24 +01:00
|
|
|
}
|
|
|
|
|
MIR: Fix value moved diagnose messages
MIR: adopt borrowck test
Fix trailing whitespace
span_bug! on unexpected action
Make RegionVid use newtype_index!
Closes #45843
Check rvalue aggregates during check_stmt in tycheck, add initial, (not passing) test
Fix failing test
Remove attributes and test comments accidentally left behind, add in span_mirbugs
Normalize LvalueTy for ops and format code to satisfy tidy check
only normalize operand types when in an ADT constructor
avoid early return
handle the active field index in unions
normalize types in ADT constructor
Fixes #45940
Fix borrowck compiler errors for upvars contain "spurious" dereferences
Fixes #46003
added associated function Box::leak
Box::leak - improve documentation
Box::leak - fixed bug in documentation
Box::leak - relaxed constraints wrt. lifetimes
Box::leak - updated documentation
Box::leak - made an oops, fixed now =)
Box::leak: update unstable issue number (46179).
Add test for #44953
Add missing Debug impls to std_unicode
Also adds #![deny(missing_debug_implementations)] so they don't get
missed again.
Amend RELEASES for 1.22.1
and fix the date for 1.22.0
Rename param in `[T]::swap_with_slice` from `src` to `other`.
The idea of ‘source’ and ‘destination’ aren’t very applicable for this
operation since both slices can both be considered sources and
destinations.
Clarify stdin behavior of `Command::output`.
Fixes #44929.
Add hints for the case of confusing enum with its variants
Add failing testcases
Add module population and case of enum in place of expression
Use for_each_child_stable in find_module
Use multiline text for crate conflict diagnostics
Make float::from_bits transmute (and update the documentation to reflect this).
The current implementation/documentation was made to avoid sNaN because of
potential safety issues implied by old/bad LLVM documentation. These issues
aren't real, so we can just make the implementation transmute (as permitted
by the existing documentation of this method).
Also the documentation didn't actually match the behaviour: it said we may
change sNaNs, but in fact we canonicalized *all* NaNs.
Also an example in the documentation was wrong: it said we *always* change
sNaNs, when the documentation was explicitly written to indicate it was
implementation-defined.
This makes to_bits and from_bits perfectly roundtrip cross-platform, except
for one caveat: although the 2008 edition of IEEE-754 specifies how to
interpet the signaling bit, earlier editions didn't. This lead to some platforms
picking the opposite interpretation, so all signaling NaNs on x86/ARM are quiet
on MIPS, and vice-versa.
NaN-boxing is a fairly important optimization, while we don't even guarantee
that float operations properly preserve signalingness. As such, this seems like
the more natural strategy to take (as opposed to trying to mangle the signaling
bit on a per-platform basis).
This implementation is also, of course, faster.
Simplify an Iterator::fold to Iterator::any
This method of once-diagnostics doesn't allow nesting
UI tests extract the regular output from the 'rendered' field in json
Merge cfail and ui tests into ui tests
Add a MIR pass to lower 128-bit operators to lang item calls
Runs only with `-Z lower_128bit_ops` since it's not hooked into targets yet.
Include tuple projections in MIR tests
Add type checking for the lang item
As part of doing so, add more lang items instead of passing u128 to the i128 ones where it doesn't matter in twos-complement.
Handle shifts properly
* The overflow-checking shift items need to take a full 128-bit type, since they need to be able to detect idiocy like `1i128 << (1u128 << 127)`
* The unchecked ones just take u32, like the `*_sh?` methods in core
* Because shift-by-anything is allowed, cast into a new local for every shift
incr.comp.: Make sure we don't lose unused green results from the query cache.
rustbuild: Update LLVM and enable ThinLTO
This commit updates LLVM to fix #45511 (https://reviews.llvm.org/D39981) and
also reenables ThinLTO for libtest now that we shouldn't hit #45768. This also
opportunistically enables ThinLTO for libstd which was previously blocked
(#45661) on test failures related to debuginfo with a presumed cause of #45511.
Closes #45511
std: Flag Windows TLS dtor symbol as #[used]
Turns out ThinLTO was internalizing this symbol and eliminating it. Worse yet if
you compiled with LTO turns out no TLS destructors would run on Windows! The
`#[used]` annotation should be a more bulletproof implementation (in the face of
LTO) of preserving this symbol all the way through in LLVM and ensuring it makes
it all the way to the linker which will take care of it.
Add enum InitializationRequiringAction
Fix tidy tests
2017-11-23 17:06:48 +05:30
|
|
|
#[derive(Copy, Clone)]
|
|
|
|
enum InitializationRequiringAction {
|
|
|
|
Update,
|
|
|
|
Borrow,
|
|
|
|
Use,
|
|
|
|
Assignment,
|
|
|
|
}
|
|
|
|
|
2018-04-21 23:41:44 -07:00
|
|
|
struct RootPlace<'d, 'tcx: 'd> {
|
|
|
|
place: &'d Place<'tcx>,
|
|
|
|
is_local_mutation_allowed: LocalMutationIsAllowed,
|
|
|
|
}
|
|
|
|
|
MIR: Fix value moved diagnose messages
MIR: adopt borrowck test
Fix trailing whitespace
span_bug! on unexpected action
Make RegionVid use newtype_index!
Closes #45843
Check rvalue aggregates during check_stmt in tycheck, add initial, (not passing) test
Fix failing test
Remove attributes and test comments accidentally left behind, add in span_mirbugs
Normalize LvalueTy for ops and format code to satisfy tidy check
only normalize operand types when in an ADT constructor
avoid early return
handle the active field index in unions
normalize types in ADT constructor
Fixes #45940
Fix borrowck compiler errors for upvars contain "spurious" dereferences
Fixes #46003
added associated function Box::leak
Box::leak - improve documentation
Box::leak - fixed bug in documentation
Box::leak - relaxed constraints wrt. lifetimes
Box::leak - updated documentation
Box::leak - made an oops, fixed now =)
Box::leak: update unstable issue number (46179).
Add test for #44953
Add missing Debug impls to std_unicode
Also adds #![deny(missing_debug_implementations)] so they don't get
missed again.
Amend RELEASES for 1.22.1
and fix the date for 1.22.0
Rename param in `[T]::swap_with_slice` from `src` to `other`.
The idea of ‘source’ and ‘destination’ aren’t very applicable for this
operation since both slices can both be considered sources and
destinations.
Clarify stdin behavior of `Command::output`.
Fixes #44929.
Add hints for the case of confusing enum with its variants
Add failing testcases
Add module population and case of enum in place of expression
Use for_each_child_stable in find_module
Use multiline text for crate conflict diagnostics
Make float::from_bits transmute (and update the documentation to reflect this).
The current implementation/documentation was made to avoid sNaN because of
potential safety issues implied by old/bad LLVM documentation. These issues
aren't real, so we can just make the implementation transmute (as permitted
by the existing documentation of this method).
Also the documentation didn't actually match the behaviour: it said we may
change sNaNs, but in fact we canonicalized *all* NaNs.
Also an example in the documentation was wrong: it said we *always* change
sNaNs, when the documentation was explicitly written to indicate it was
implementation-defined.
This makes to_bits and from_bits perfectly roundtrip cross-platform, except
for one caveat: although the 2008 edition of IEEE-754 specifies how to
interpet the signaling bit, earlier editions didn't. This lead to some platforms
picking the opposite interpretation, so all signaling NaNs on x86/ARM are quiet
on MIPS, and vice-versa.
NaN-boxing is a fairly important optimization, while we don't even guarantee
that float operations properly preserve signalingness. As such, this seems like
the more natural strategy to take (as opposed to trying to mangle the signaling
bit on a per-platform basis).
This implementation is also, of course, faster.
Simplify an Iterator::fold to Iterator::any
This method of once-diagnostics doesn't allow nesting
UI tests extract the regular output from the 'rendered' field in json
Merge cfail and ui tests into ui tests
Add a MIR pass to lower 128-bit operators to lang item calls
Runs only with `-Z lower_128bit_ops` since it's not hooked into targets yet.
Include tuple projections in MIR tests
Add type checking for the lang item
As part of doing so, add more lang items instead of passing u128 to the i128 ones where it doesn't matter in twos-complement.
Handle shifts properly
* The overflow-checking shift items need to take a full 128-bit type, since they need to be able to detect idiocy like `1i128 << (1u128 << 127)`
* The unchecked ones just take u32, like the `*_sh?` methods in core
* Because shift-by-anything is allowed, cast into a new local for every shift
incr.comp.: Make sure we don't lose unused green results from the query cache.
rustbuild: Update LLVM and enable ThinLTO
This commit updates LLVM to fix #45511 (https://reviews.llvm.org/D39981) and
also reenables ThinLTO for libtest now that we shouldn't hit #45768. This also
opportunistically enables ThinLTO for libstd which was previously blocked
(#45661) on test failures related to debuginfo with a presumed cause of #45511.
Closes #45511
std: Flag Windows TLS dtor symbol as #[used]
Turns out ThinLTO was internalizing this symbol and eliminating it. Worse yet if
you compiled with LTO turns out no TLS destructors would run on Windows! The
`#[used]` annotation should be a more bulletproof implementation (in the face of
LTO) of preserving this symbol all the way through in LLVM and ensuring it makes
it all the way to the linker which will take care of it.
Add enum InitializationRequiringAction
Fix tidy tests
2017-11-23 17:06:48 +05:30
|
|
|
impl InitializationRequiringAction {
|
|
|
|
fn as_noun(self) -> &'static str {
|
|
|
|
match self {
|
2017-11-17 04:47:02 -05:00
|
|
|
InitializationRequiringAction::Update => "update",
|
|
|
|
InitializationRequiringAction::Borrow => "borrow",
|
|
|
|
InitializationRequiringAction::Use => "use",
|
|
|
|
InitializationRequiringAction::Assignment => "assign",
|
MIR: Fix value moved diagnose messages
MIR: adopt borrowck test
Fix trailing whitespace
span_bug! on unexpected action
Make RegionVid use newtype_index!
Closes #45843
Check rvalue aggregates during check_stmt in tycheck, add initial, (not passing) test
Fix failing test
Remove attributes and test comments accidentally left behind, add in span_mirbugs
Normalize LvalueTy for ops and format code to satisfy tidy check
only normalize operand types when in an ADT constructor
avoid early return
handle the active field index in unions
normalize types in ADT constructor
Fixes #45940
Fix borrowck compiler errors for upvars contain "spurious" dereferences
Fixes #46003
added associated function Box::leak
Box::leak - improve documentation
Box::leak - fixed bug in documentation
Box::leak - relaxed constraints wrt. lifetimes
Box::leak - updated documentation
Box::leak - made an oops, fixed now =)
Box::leak: update unstable issue number (46179).
Add test for #44953
Add missing Debug impls to std_unicode
Also adds #![deny(missing_debug_implementations)] so they don't get
missed again.
Amend RELEASES for 1.22.1
and fix the date for 1.22.0
Rename param in `[T]::swap_with_slice` from `src` to `other`.
The idea of ‘source’ and ‘destination’ aren’t very applicable for this
operation since both slices can both be considered sources and
destinations.
Clarify stdin behavior of `Command::output`.
Fixes #44929.
Add hints for the case of confusing enum with its variants
Add failing testcases
Add module population and case of enum in place of expression
Use for_each_child_stable in find_module
Use multiline text for crate conflict diagnostics
Make float::from_bits transmute (and update the documentation to reflect this).
The current implementation/documentation was made to avoid sNaN because of
potential safety issues implied by old/bad LLVM documentation. These issues
aren't real, so we can just make the implementation transmute (as permitted
by the existing documentation of this method).
Also the documentation didn't actually match the behaviour: it said we may
change sNaNs, but in fact we canonicalized *all* NaNs.
Also an example in the documentation was wrong: it said we *always* change
sNaNs, when the documentation was explicitly written to indicate it was
implementation-defined.
This makes to_bits and from_bits perfectly roundtrip cross-platform, except
for one caveat: although the 2008 edition of IEEE-754 specifies how to
interpet the signaling bit, earlier editions didn't. This lead to some platforms
picking the opposite interpretation, so all signaling NaNs on x86/ARM are quiet
on MIPS, and vice-versa.
NaN-boxing is a fairly important optimization, while we don't even guarantee
that float operations properly preserve signalingness. As such, this seems like
the more natural strategy to take (as opposed to trying to mangle the signaling
bit on a per-platform basis).
This implementation is also, of course, faster.
Simplify an Iterator::fold to Iterator::any
This method of once-diagnostics doesn't allow nesting
UI tests extract the regular output from the 'rendered' field in json
Merge cfail and ui tests into ui tests
Add a MIR pass to lower 128-bit operators to lang item calls
Runs only with `-Z lower_128bit_ops` since it's not hooked into targets yet.
Include tuple projections in MIR tests
Add type checking for the lang item
As part of doing so, add more lang items instead of passing u128 to the i128 ones where it doesn't matter in twos-complement.
Handle shifts properly
* The overflow-checking shift items need to take a full 128-bit type, since they need to be able to detect idiocy like `1i128 << (1u128 << 127)`
* The unchecked ones just take u32, like the `*_sh?` methods in core
* Because shift-by-anything is allowed, cast into a new local for every shift
incr.comp.: Make sure we don't lose unused green results from the query cache.
rustbuild: Update LLVM and enable ThinLTO
This commit updates LLVM to fix #45511 (https://reviews.llvm.org/D39981) and
also reenables ThinLTO for libtest now that we shouldn't hit #45768. This also
opportunistically enables ThinLTO for libstd which was previously blocked
(#45661) on test failures related to debuginfo with a presumed cause of #45511.
Closes #45511
std: Flag Windows TLS dtor symbol as #[used]
Turns out ThinLTO was internalizing this symbol and eliminating it. Worse yet if
you compiled with LTO turns out no TLS destructors would run on Windows! The
`#[used]` annotation should be a more bulletproof implementation (in the face of
LTO) of preserving this symbol all the way through in LLVM and ensuring it makes
it all the way to the linker which will take care of it.
Add enum InitializationRequiringAction
Fix tidy tests
2017-11-23 17:06:48 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn as_verb_in_past_tense(self) -> &'static str {
|
|
|
|
match self {
|
2017-11-17 04:47:02 -05:00
|
|
|
InitializationRequiringAction::Update => "updated",
|
|
|
|
InitializationRequiringAction::Borrow => "borrowed",
|
|
|
|
InitializationRequiringAction::Use => "used",
|
|
|
|
InitializationRequiringAction::Assignment => "assigned",
|
MIR: Fix value moved diagnose messages
MIR: adopt borrowck test
Fix trailing whitespace
span_bug! on unexpected action
Make RegionVid use newtype_index!
Closes #45843
Check rvalue aggregates during check_stmt in tycheck, add initial, (not passing) test
Fix failing test
Remove attributes and test comments accidentally left behind, add in span_mirbugs
Normalize LvalueTy for ops and format code to satisfy tidy check
only normalize operand types when in an ADT constructor
avoid early return
handle the active field index in unions
normalize types in ADT constructor
Fixes #45940
Fix borrowck compiler errors for upvars contain "spurious" dereferences
Fixes #46003
added associated function Box::leak
Box::leak - improve documentation
Box::leak - fixed bug in documentation
Box::leak - relaxed constraints wrt. lifetimes
Box::leak - updated documentation
Box::leak - made an oops, fixed now =)
Box::leak: update unstable issue number (46179).
Add test for #44953
Add missing Debug impls to std_unicode
Also adds #![deny(missing_debug_implementations)] so they don't get
missed again.
Amend RELEASES for 1.22.1
and fix the date for 1.22.0
Rename param in `[T]::swap_with_slice` from `src` to `other`.
The idea of ‘source’ and ‘destination’ aren’t very applicable for this
operation since both slices can both be considered sources and
destinations.
Clarify stdin behavior of `Command::output`.
Fixes #44929.
Add hints for the case of confusing enum with its variants
Add failing testcases
Add module population and case of enum in place of expression
Use for_each_child_stable in find_module
Use multiline text for crate conflict diagnostics
Make float::from_bits transmute (and update the documentation to reflect this).
The current implementation/documentation was made to avoid sNaN because of
potential safety issues implied by old/bad LLVM documentation. These issues
aren't real, so we can just make the implementation transmute (as permitted
by the existing documentation of this method).
Also the documentation didn't actually match the behaviour: it said we may
change sNaNs, but in fact we canonicalized *all* NaNs.
Also an example in the documentation was wrong: it said we *always* change
sNaNs, when the documentation was explicitly written to indicate it was
implementation-defined.
This makes to_bits and from_bits perfectly roundtrip cross-platform, except
for one caveat: although the 2008 edition of IEEE-754 specifies how to
interpet the signaling bit, earlier editions didn't. This lead to some platforms
picking the opposite interpretation, so all signaling NaNs on x86/ARM are quiet
on MIPS, and vice-versa.
NaN-boxing is a fairly important optimization, while we don't even guarantee
that float operations properly preserve signalingness. As such, this seems like
the more natural strategy to take (as opposed to trying to mangle the signaling
bit on a per-platform basis).
This implementation is also, of course, faster.
Simplify an Iterator::fold to Iterator::any
This method of once-diagnostics doesn't allow nesting
UI tests extract the regular output from the 'rendered' field in json
Merge cfail and ui tests into ui tests
Add a MIR pass to lower 128-bit operators to lang item calls
Runs only with `-Z lower_128bit_ops` since it's not hooked into targets yet.
Include tuple projections in MIR tests
Add type checking for the lang item
As part of doing so, add more lang items instead of passing u128 to the i128 ones where it doesn't matter in twos-complement.
Handle shifts properly
* The overflow-checking shift items need to take a full 128-bit type, since they need to be able to detect idiocy like `1i128 << (1u128 << 127)`
* The unchecked ones just take u32, like the `*_sh?` methods in core
* Because shift-by-anything is allowed, cast into a new local for every shift
incr.comp.: Make sure we don't lose unused green results from the query cache.
rustbuild: Update LLVM and enable ThinLTO
This commit updates LLVM to fix #45511 (https://reviews.llvm.org/D39981) and
also reenables ThinLTO for libtest now that we shouldn't hit #45768. This also
opportunistically enables ThinLTO for libstd which was previously blocked
(#45661) on test failures related to debuginfo with a presumed cause of #45511.
Closes #45511
std: Flag Windows TLS dtor symbol as #[used]
Turns out ThinLTO was internalizing this symbol and eliminating it. Worse yet if
you compiled with LTO turns out no TLS destructors would run on Windows! The
`#[used]` annotation should be a more bulletproof implementation (in the face of
LTO) of preserving this symbol all the way through in LLVM and ensuring it makes
it all the way to the linker which will take care of it.
Add enum InitializationRequiringAction
Fix tidy tests
2017-11-23 17:06:48 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Special-case `Box` in `rustc_mir::borrow_check`.
This should address issue 45696.
Since we know dropping a box will not access any `&mut` or `&`
references, it is safe to model its destructor as only touching the
contents *owned* by the box.
Note: At some point we may want to generalize this machinery to other
reference and collection types that are "pure" in the same sense as
box. If we add a `&move` reference type, it would probably also fall
into this branch of code. But for the short term, we will be
conservative and restrict this change to `Box<T>` alone.
The code works by recursively descending a deref of the `Box`. We
prevent `visit_terminator_drop` infinite-loop (which can arise in a
very obscure scenario) via a linked-list of seen types.
Note: A similar style stack-only linked-list definition can be found
in `rustc_mir::borrow_check::places_conflict`. It might be good at
some point in the future to unify the two types and put the resulting
definition into `librustc_data_structures/`.
----
One final note: Review feedback led to significant simplification of
logic here.
During review, eddyb RalfJung and I uncovered the heart of why I
needed a so-called "step 2" aka the Shallow Write to the Deref of the
box. It was because the `visit_terminator_drop`, in its base case,
will not emit any write at all (shallow or deep) to a place unless
that place has a need_drop.
So I was encoding a Shallow Write by hand for a `Box<T>`, as a
separate step from recursively descending through `*a_box` (which was
at the time known as "step 1"; it is now the *only* step, apart from
the change to the base case for `visit_terminator_drop` that this
commit now has encoded).
eddyb aruged that *something* should be emitting some sort of write in
the base case here (even a shallow one), of the dropped place, since
by analogy we also emit a write when you *move* a place. That led
to the revision here in this commit.
* (Its possible that this desired write should be attached in some
manner to StorageDead instead of Drop. But in this PR, I tried to
leave the StorageDead logic alone and focus my attention solely on
how Drop(x) is modelled in MIR-borrowck.)
2018-07-26 22:29:50 +02:00
|
|
|
/// A simple linked-list threaded up the stack of recursive calls in `visit_terminator_drop`.
|
|
|
|
#[derive(Copy, Clone, Debug)]
|
|
|
|
struct SeenTy<'a, 'gcx: 'a>(Option<(Ty<'gcx>, &'a SeenTy<'a, 'gcx>)>);
|
|
|
|
|
|
|
|
impl<'a, 'gcx> SeenTy<'a, 'gcx> {
|
|
|
|
/// Return a new list with `ty` prepended to the front of `self`.
|
|
|
|
fn cons(&'a self, ty: Ty<'gcx>) -> Self {
|
|
|
|
SeenTy(Some((ty, self)))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// True if and only if `ty` occurs on the linked list `self`.
|
|
|
|
fn have_seen(self, ty: Ty) -> bool {
|
|
|
|
let mut this = self.0;
|
|
|
|
loop {
|
|
|
|
match this {
|
|
|
|
None => return false,
|
|
|
|
Some((seen_ty, recur)) => {
|
|
|
|
if seen_ty == ty {
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
this = recur.0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-07 04:44:41 -05:00
|
|
|
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
|
2018-02-05 14:49:27 -05:00
|
|
|
/// Invokes `access_place` as appropriate for dropping the value
|
|
|
|
/// at `drop_place`. Note that the *actual* `Drop` in the MIR is
|
|
|
|
/// always for a variable (e.g., `Drop(x)`) -- but we recursively
|
|
|
|
/// break this variable down into subpaths (e.g., `Drop(x.foo)`)
|
|
|
|
/// to indicate more precisely which fields might actually be
|
|
|
|
/// accessed by a destructor.
|
2018-02-01 12:27:56 +00:00
|
|
|
fn visit_terminator_drop(
|
|
|
|
&mut self,
|
|
|
|
loc: Location,
|
|
|
|
term: &Terminator<'tcx>,
|
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
|
|
|
drop_place: &Place<'tcx>,
|
2018-02-14 18:14:31 +00:00
|
|
|
erased_drop_place_ty: ty::Ty<'gcx>,
|
2018-02-01 12:27:56 +00:00
|
|
|
span: Span,
|
Special-case `Box` in `rustc_mir::borrow_check`.
This should address issue 45696.
Since we know dropping a box will not access any `&mut` or `&`
references, it is safe to model its destructor as only touching the
contents *owned* by the box.
Note: At some point we may want to generalize this machinery to other
reference and collection types that are "pure" in the same sense as
box. If we add a `&move` reference type, it would probably also fall
into this branch of code. But for the short term, we will be
conservative and restrict this change to `Box<T>` alone.
The code works by recursively descending a deref of the `Box`. We
prevent `visit_terminator_drop` infinite-loop (which can arise in a
very obscure scenario) via a linked-list of seen types.
Note: A similar style stack-only linked-list definition can be found
in `rustc_mir::borrow_check::places_conflict`. It might be good at
some point in the future to unify the two types and put the resulting
definition into `librustc_data_structures/`.
----
One final note: Review feedback led to significant simplification of
logic here.
During review, eddyb RalfJung and I uncovered the heart of why I
needed a so-called "step 2" aka the Shallow Write to the Deref of the
box. It was because the `visit_terminator_drop`, in its base case,
will not emit any write at all (shallow or deep) to a place unless
that place has a need_drop.
So I was encoding a Shallow Write by hand for a `Box<T>`, as a
separate step from recursively descending through `*a_box` (which was
at the time known as "step 1"; it is now the *only* step, apart from
the change to the base case for `visit_terminator_drop` that this
commit now has encoded).
eddyb aruged that *something* should be emitting some sort of write in
the base case here (even a shallow one), of the dropped place, since
by analogy we also emit a write when you *move* a place. That led
to the revision here in this commit.
* (Its possible that this desired write should be attached in some
manner to StorageDead instead of Drop. But in this PR, I tried to
leave the StorageDead logic alone and focus my attention solely on
how Drop(x) is modelled in MIR-borrowck.)
2018-07-26 22:29:50 +02:00
|
|
|
prev_seen: SeenTy<'_, 'gcx>,
|
2018-02-01 12:27:56 +00:00
|
|
|
) {
|
Special-case `Box` in `rustc_mir::borrow_check`.
This should address issue 45696.
Since we know dropping a box will not access any `&mut` or `&`
references, it is safe to model its destructor as only touching the
contents *owned* by the box.
Note: At some point we may want to generalize this machinery to other
reference and collection types that are "pure" in the same sense as
box. If we add a `&move` reference type, it would probably also fall
into this branch of code. But for the short term, we will be
conservative and restrict this change to `Box<T>` alone.
The code works by recursively descending a deref of the `Box`. We
prevent `visit_terminator_drop` infinite-loop (which can arise in a
very obscure scenario) via a linked-list of seen types.
Note: A similar style stack-only linked-list definition can be found
in `rustc_mir::borrow_check::places_conflict`. It might be good at
some point in the future to unify the two types and put the resulting
definition into `librustc_data_structures/`.
----
One final note: Review feedback led to significant simplification of
logic here.
During review, eddyb RalfJung and I uncovered the heart of why I
needed a so-called "step 2" aka the Shallow Write to the Deref of the
box. It was because the `visit_terminator_drop`, in its base case,
will not emit any write at all (shallow or deep) to a place unless
that place has a need_drop.
So I was encoding a Shallow Write by hand for a `Box<T>`, as a
separate step from recursively descending through `*a_box` (which was
at the time known as "step 1"; it is now the *only* step, apart from
the change to the base case for `visit_terminator_drop` that this
commit now has encoded).
eddyb aruged that *something* should be emitting some sort of write in
the base case here (even a shallow one), of the dropped place, since
by analogy we also emit a write when you *move* a place. That led
to the revision here in this commit.
* (Its possible that this desired write should be attached in some
manner to StorageDead instead of Drop. But in this PR, I tried to
leave the StorageDead logic alone and focus my attention solely on
how Drop(x) is modelled in MIR-borrowck.)
2018-07-26 22:29:50 +02:00
|
|
|
if prev_seen.have_seen(erased_drop_place_ty) {
|
|
|
|
// if we have directly seen the input ty `T`, then we must
|
|
|
|
// have had some *direct* ownership loop between `T` and
|
|
|
|
// some directly-owned (as in, actually traversed by
|
|
|
|
// recursive calls below) part that is also of type `T`.
|
|
|
|
//
|
|
|
|
// Note: in *all* such cases, the data in question cannot
|
|
|
|
// be constructed (nor destructed) in finite time/space.
|
|
|
|
//
|
|
|
|
// Proper examples, some of which are statically rejected:
|
|
|
|
//
|
|
|
|
// * `struct A { field: A, ... }`:
|
|
|
|
// statically rejected as infinite size
|
|
|
|
//
|
|
|
|
// * `type B = (B, ...);`:
|
|
|
|
// statically rejected as cyclic
|
|
|
|
//
|
|
|
|
// * `struct C { field: Box<C>, ... }`
|
|
|
|
// * `struct D { field: Box<(D, D)>, ... }`:
|
|
|
|
// *accepted*, though impossible to construct
|
|
|
|
//
|
|
|
|
// Here is *NOT* an example:
|
|
|
|
// * `struct Z { field: Option<Box<Z>>, ... }`:
|
|
|
|
// Here, the type is both representable in finite space (due to the boxed indirection)
|
|
|
|
// and constructable in finite time (since the recursion can bottom out with `None`).
|
|
|
|
// This is an obvious instance of something the compiler must accept.
|
|
|
|
//
|
|
|
|
// Since some of the above impossible cases like `C` and
|
|
|
|
// `D` are accepted by the compiler, we must take care not
|
|
|
|
// to infinite-loop while processing them. But since such
|
|
|
|
// cases cannot actually arise, it is sound for us to just
|
|
|
|
// skip them during drop. If the developer uses unsafe
|
|
|
|
// code to construct them, they should not be surprised by
|
|
|
|
// weird drop behavior in their resulting code.
|
|
|
|
debug!("visit_terminator_drop previously seen \
|
|
|
|
erased_drop_place_ty: {:?} on prev_seen: {:?}; returning early.",
|
|
|
|
erased_drop_place_ty, prev_seen);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-04-27 20:41:30 +01:00
|
|
|
let gcx = self.tcx.global_tcx();
|
2018-06-22 00:10:52 -03:00
|
|
|
let drop_field = |mir: &mut MirBorrowckCtxt<'cx, 'gcx, 'tcx>,
|
|
|
|
(index, field): (usize, ty::Ty<'gcx>)| {
|
2018-04-27 20:41:30 +01:00
|
|
|
let field_ty = gcx.normalize_erasing_regions(mir.param_env, field);
|
|
|
|
let place = drop_place.clone().field(Field::new(index), field_ty);
|
|
|
|
|
Special-case `Box` in `rustc_mir::borrow_check`.
This should address issue 45696.
Since we know dropping a box will not access any `&mut` or `&`
references, it is safe to model its destructor as only touching the
contents *owned* by the box.
Note: At some point we may want to generalize this machinery to other
reference and collection types that are "pure" in the same sense as
box. If we add a `&move` reference type, it would probably also fall
into this branch of code. But for the short term, we will be
conservative and restrict this change to `Box<T>` alone.
The code works by recursively descending a deref of the `Box`. We
prevent `visit_terminator_drop` infinite-loop (which can arise in a
very obscure scenario) via a linked-list of seen types.
Note: A similar style stack-only linked-list definition can be found
in `rustc_mir::borrow_check::places_conflict`. It might be good at
some point in the future to unify the two types and put the resulting
definition into `librustc_data_structures/`.
----
One final note: Review feedback led to significant simplification of
logic here.
During review, eddyb RalfJung and I uncovered the heart of why I
needed a so-called "step 2" aka the Shallow Write to the Deref of the
box. It was because the `visit_terminator_drop`, in its base case,
will not emit any write at all (shallow or deep) to a place unless
that place has a need_drop.
So I was encoding a Shallow Write by hand for a `Box<T>`, as a
separate step from recursively descending through `*a_box` (which was
at the time known as "step 1"; it is now the *only* step, apart from
the change to the base case for `visit_terminator_drop` that this
commit now has encoded).
eddyb aruged that *something* should be emitting some sort of write in
the base case here (even a shallow one), of the dropped place, since
by analogy we also emit a write when you *move* a place. That led
to the revision here in this commit.
* (Its possible that this desired write should be attached in some
manner to StorageDead instead of Drop. But in this PR, I tried to
leave the StorageDead logic alone and focus my attention solely on
how Drop(x) is modelled in MIR-borrowck.)
2018-07-26 22:29:50 +02:00
|
|
|
debug!("visit_terminator_drop drop_field place: {:?} field_ty: {:?}", place, field_ty);
|
|
|
|
let seen = prev_seen.cons(erased_drop_place_ty);
|
|
|
|
mir.visit_terminator_drop(loc, term, flow_state, &place, field_ty, span, seen);
|
2018-04-27 20:41:30 +01:00
|
|
|
};
|
|
|
|
|
2018-02-14 18:14:31 +00:00
|
|
|
match erased_drop_place_ty.sty {
|
2018-02-05 14:49:27 -05:00
|
|
|
// When a struct is being dropped, we need to check
|
|
|
|
// whether it has a destructor, if it does, then we can
|
|
|
|
// call it, if it does not then we need to check the
|
|
|
|
// individual fields instead. This way if `foo` has a
|
|
|
|
// destructor but `bar` does not, we will only check for
|
|
|
|
// borrows of `x.foo` and not `x.bar`. See #47703.
|
2018-02-01 12:27:56 +00:00
|
|
|
ty::TyAdt(def, substs) if def.is_struct() && !def.has_dtor(self.tcx) => {
|
2018-04-27 20:41:30 +01:00
|
|
|
def.all_fields()
|
|
|
|
.map(|field| field.ty(gcx, substs))
|
|
|
|
.enumerate()
|
|
|
|
.for_each(|field| drop_field(self, field));
|
|
|
|
}
|
|
|
|
// Same as above, but for tuples.
|
|
|
|
ty::TyTuple(tys) => {
|
2018-06-22 00:10:52 -03:00
|
|
|
tys.iter()
|
|
|
|
.cloned()
|
|
|
|
.enumerate()
|
2018-04-27 20:41:30 +01:00
|
|
|
.for_each(|field| drop_field(self, field));
|
|
|
|
}
|
2018-05-02 13:14:30 +02:00
|
|
|
// Closures also have disjoint fields, but they are only
|
|
|
|
// directly accessed in the body of the closure.
|
2018-04-27 20:41:30 +01:00
|
|
|
ty::TyClosure(def, substs)
|
2018-06-22 00:10:52 -03:00
|
|
|
if *drop_place == Place::Local(Local::new(1))
|
|
|
|
&& !self.mir.upvar_decls.is_empty() =>
|
|
|
|
{
|
|
|
|
substs
|
|
|
|
.upvar_tys(def, self.tcx)
|
|
|
|
.enumerate()
|
2018-05-02 13:14:30 +02:00
|
|
|
.for_each(|field| drop_field(self, field));
|
|
|
|
}
|
|
|
|
// Generators also have disjoint fields, but they are only
|
|
|
|
// directly accessed in the body of the generator.
|
|
|
|
ty::TyGenerator(def, substs, _)
|
2018-06-22 00:10:52 -03:00
|
|
|
if *drop_place == Place::Local(Local::new(1))
|
|
|
|
&& !self.mir.upvar_decls.is_empty() =>
|
|
|
|
{
|
|
|
|
substs
|
|
|
|
.upvar_tys(def, self.tcx)
|
|
|
|
.enumerate()
|
2018-04-27 20:41:30 +01:00
|
|
|
.for_each(|field| drop_field(self, field));
|
2018-03-06 02:29:03 -03:00
|
|
|
}
|
Special-case `Box` in `rustc_mir::borrow_check`.
This should address issue 45696.
Since we know dropping a box will not access any `&mut` or `&`
references, it is safe to model its destructor as only touching the
contents *owned* by the box.
Note: At some point we may want to generalize this machinery to other
reference and collection types that are "pure" in the same sense as
box. If we add a `&move` reference type, it would probably also fall
into this branch of code. But for the short term, we will be
conservative and restrict this change to `Box<T>` alone.
The code works by recursively descending a deref of the `Box`. We
prevent `visit_terminator_drop` infinite-loop (which can arise in a
very obscure scenario) via a linked-list of seen types.
Note: A similar style stack-only linked-list definition can be found
in `rustc_mir::borrow_check::places_conflict`. It might be good at
some point in the future to unify the two types and put the resulting
definition into `librustc_data_structures/`.
----
One final note: Review feedback led to significant simplification of
logic here.
During review, eddyb RalfJung and I uncovered the heart of why I
needed a so-called "step 2" aka the Shallow Write to the Deref of the
box. It was because the `visit_terminator_drop`, in its base case,
will not emit any write at all (shallow or deep) to a place unless
that place has a need_drop.
So I was encoding a Shallow Write by hand for a `Box<T>`, as a
separate step from recursively descending through `*a_box` (which was
at the time known as "step 1"; it is now the *only* step, apart from
the change to the base case for `visit_terminator_drop` that this
commit now has encoded).
eddyb aruged that *something* should be emitting some sort of write in
the base case here (even a shallow one), of the dropped place, since
by analogy we also emit a write when you *move* a place. That led
to the revision here in this commit.
* (Its possible that this desired write should be attached in some
manner to StorageDead instead of Drop. But in this PR, I tried to
leave the StorageDead logic alone and focus my attention solely on
how Drop(x) is modelled in MIR-borrowck.)
2018-07-26 22:29:50 +02:00
|
|
|
|
|
|
|
// #45696: special-case Box<T> by treating its dtor as
|
|
|
|
// only deep *across owned content*. Namely, we know
|
|
|
|
// dropping a box does not touch data behind any
|
|
|
|
// references it holds; if we were to instead fall into
|
|
|
|
// the base case below, we would have a Deep Write due to
|
|
|
|
// the box being `needs_drop`, and that Deep Write would
|
|
|
|
// touch `&mut` data in the box.
|
|
|
|
ty::TyAdt(def, _) if def.is_box() => {
|
|
|
|
// When/if we add a `&own T` type, this action would
|
|
|
|
// be like running the destructor of the `&own T`.
|
|
|
|
// (And the owner of backing storage referenced by the
|
|
|
|
// `&own T` would be responsible for deallocating that
|
|
|
|
// backing storage.)
|
|
|
|
|
|
|
|
// we model dropping any content owned by the box by
|
|
|
|
// recurring on box contents. This catches cases like
|
|
|
|
// `Box<Box<ScribbleWhenDropped<&mut T>>>`, while
|
|
|
|
// still restricting Write to *owned* content.
|
|
|
|
let ty = erased_drop_place_ty.boxed_ty();
|
|
|
|
let deref_place = drop_place.clone().deref();
|
|
|
|
debug!("visit_terminator_drop drop-box-content deref_place: {:?} ty: {:?}",
|
|
|
|
deref_place, ty);
|
|
|
|
let seen = prev_seen.cons(erased_drop_place_ty);
|
|
|
|
self.visit_terminator_drop(
|
|
|
|
loc, term, flow_state, &deref_place, ty, span, seen);
|
|
|
|
}
|
|
|
|
|
2018-02-01 12:27:56 +00:00
|
|
|
_ => {
|
2018-02-05 14:49:27 -05:00
|
|
|
// We have now refined the type of the value being
|
|
|
|
// dropped (potentially) to just the type of a
|
|
|
|
// subfield; so check whether that field's type still
|
Special-case `Box` in `rustc_mir::borrow_check`.
This should address issue 45696.
Since we know dropping a box will not access any `&mut` or `&`
references, it is safe to model its destructor as only touching the
contents *owned* by the box.
Note: At some point we may want to generalize this machinery to other
reference and collection types that are "pure" in the same sense as
box. If we add a `&move` reference type, it would probably also fall
into this branch of code. But for the short term, we will be
conservative and restrict this change to `Box<T>` alone.
The code works by recursively descending a deref of the `Box`. We
prevent `visit_terminator_drop` infinite-loop (which can arise in a
very obscure scenario) via a linked-list of seen types.
Note: A similar style stack-only linked-list definition can be found
in `rustc_mir::borrow_check::places_conflict`. It might be good at
some point in the future to unify the two types and put the resulting
definition into `librustc_data_structures/`.
----
One final note: Review feedback led to significant simplification of
logic here.
During review, eddyb RalfJung and I uncovered the heart of why I
needed a so-called "step 2" aka the Shallow Write to the Deref of the
box. It was because the `visit_terminator_drop`, in its base case,
will not emit any write at all (shallow or deep) to a place unless
that place has a need_drop.
So I was encoding a Shallow Write by hand for a `Box<T>`, as a
separate step from recursively descending through `*a_box` (which was
at the time known as "step 1"; it is now the *only* step, apart from
the change to the base case for `visit_terminator_drop` that this
commit now has encoded).
eddyb aruged that *something* should be emitting some sort of write in
the base case here (even a shallow one), of the dropped place, since
by analogy we also emit a write when you *move* a place. That led
to the revision here in this commit.
* (Its possible that this desired write should be attached in some
manner to StorageDead instead of Drop. But in this PR, I tried to
leave the StorageDead logic alone and focus my attention solely on
how Drop(x) is modelled in MIR-borrowck.)
2018-07-26 22:29:50 +02:00
|
|
|
// "needs drop".
|
2018-02-14 18:14:31 +00:00
|
|
|
if erased_drop_place_ty.needs_drop(gcx, self.param_env) {
|
Special-case `Box` in `rustc_mir::borrow_check`.
This should address issue 45696.
Since we know dropping a box will not access any `&mut` or `&`
references, it is safe to model its destructor as only touching the
contents *owned* by the box.
Note: At some point we may want to generalize this machinery to other
reference and collection types that are "pure" in the same sense as
box. If we add a `&move` reference type, it would probably also fall
into this branch of code. But for the short term, we will be
conservative and restrict this change to `Box<T>` alone.
The code works by recursively descending a deref of the `Box`. We
prevent `visit_terminator_drop` infinite-loop (which can arise in a
very obscure scenario) via a linked-list of seen types.
Note: A similar style stack-only linked-list definition can be found
in `rustc_mir::borrow_check::places_conflict`. It might be good at
some point in the future to unify the two types and put the resulting
definition into `librustc_data_structures/`.
----
One final note: Review feedback led to significant simplification of
logic here.
During review, eddyb RalfJung and I uncovered the heart of why I
needed a so-called "step 2" aka the Shallow Write to the Deref of the
box. It was because the `visit_terminator_drop`, in its base case,
will not emit any write at all (shallow or deep) to a place unless
that place has a need_drop.
So I was encoding a Shallow Write by hand for a `Box<T>`, as a
separate step from recursively descending through `*a_box` (which was
at the time known as "step 1"; it is now the *only* step, apart from
the change to the base case for `visit_terminator_drop` that this
commit now has encoded).
eddyb aruged that *something* should be emitting some sort of write in
the base case here (even a shallow one), of the dropped place, since
by analogy we also emit a write when you *move* a place. That led
to the revision here in this commit.
* (Its possible that this desired write should be attached in some
manner to StorageDead instead of Drop. But in this PR, I tried to
leave the StorageDead logic alone and focus my attention solely on
how Drop(x) is modelled in MIR-borrowck.)
2018-07-26 22:29:50 +02:00
|
|
|
// If so, we assume that the destructor may access
|
|
|
|
// any data it likes (i.e., a Deep Write).
|
2018-02-05 14:49:27 -05:00
|
|
|
self.access_place(
|
|
|
|
ContextKind::Drop.new(loc),
|
|
|
|
(drop_place, span),
|
|
|
|
(Deep, Write(WriteKind::StorageDeadOrDrop)),
|
|
|
|
LocalMutationIsAllowed::Yes,
|
|
|
|
flow_state,
|
|
|
|
);
|
Special-case `Box` in `rustc_mir::borrow_check`.
This should address issue 45696.
Since we know dropping a box will not access any `&mut` or `&`
references, it is safe to model its destructor as only touching the
contents *owned* by the box.
Note: At some point we may want to generalize this machinery to other
reference and collection types that are "pure" in the same sense as
box. If we add a `&move` reference type, it would probably also fall
into this branch of code. But for the short term, we will be
conservative and restrict this change to `Box<T>` alone.
The code works by recursively descending a deref of the `Box`. We
prevent `visit_terminator_drop` infinite-loop (which can arise in a
very obscure scenario) via a linked-list of seen types.
Note: A similar style stack-only linked-list definition can be found
in `rustc_mir::borrow_check::places_conflict`. It might be good at
some point in the future to unify the two types and put the resulting
definition into `librustc_data_structures/`.
----
One final note: Review feedback led to significant simplification of
logic here.
During review, eddyb RalfJung and I uncovered the heart of why I
needed a so-called "step 2" aka the Shallow Write to the Deref of the
box. It was because the `visit_terminator_drop`, in its base case,
will not emit any write at all (shallow or deep) to a place unless
that place has a need_drop.
So I was encoding a Shallow Write by hand for a `Box<T>`, as a
separate step from recursively descending through `*a_box` (which was
at the time known as "step 1"; it is now the *only* step, apart from
the change to the base case for `visit_terminator_drop` that this
commit now has encoded).
eddyb aruged that *something* should be emitting some sort of write in
the base case here (even a shallow one), of the dropped place, since
by analogy we also emit a write when you *move* a place. That led
to the revision here in this commit.
* (Its possible that this desired write should be attached in some
manner to StorageDead instead of Drop. But in this PR, I tried to
leave the StorageDead logic alone and focus my attention solely on
how Drop(x) is modelled in MIR-borrowck.)
2018-07-26 22:29:50 +02:00
|
|
|
} else {
|
|
|
|
// If there is no destructor, we still include a
|
|
|
|
// *shallow* write. This essentially ensures that
|
|
|
|
// borrows of the memory directly at `drop_place`
|
|
|
|
// cannot continue to be borrowed across the drop.
|
|
|
|
//
|
|
|
|
// If we were to use a Deep Write here, then any
|
|
|
|
// `&mut T` that is reachable from `drop_place`
|
|
|
|
// would get invalidated; fixing that is the
|
|
|
|
// essence of resolving issue #45696.
|
|
|
|
//
|
|
|
|
// * Note: In the compiler today, doing a Deep
|
|
|
|
// Write here would not actually break
|
|
|
|
// anything beyond #45696; for example it does not
|
|
|
|
// break this example:
|
|
|
|
//
|
|
|
|
// ```rust
|
|
|
|
// fn reborrow(x: &mut i32) -> &mut i32 { &mut *x }
|
|
|
|
// ```
|
|
|
|
//
|
|
|
|
// Why? Because we do not schedule/emit
|
|
|
|
// `Drop(x)` in the MIR unless `x` needs drop in
|
|
|
|
// the first place.
|
|
|
|
//
|
|
|
|
// FIXME: Its possible this logic actually should
|
|
|
|
// be attached to the `StorageDead` statement
|
|
|
|
// rather than the `Drop`. See discussion on PR
|
|
|
|
// #52782.
|
|
|
|
self.access_place(
|
|
|
|
ContextKind::Drop.new(loc),
|
|
|
|
(drop_place, span),
|
|
|
|
(Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
|
|
|
|
LocalMutationIsAllowed::Yes,
|
|
|
|
flow_state,
|
|
|
|
);
|
2018-02-05 14:49:27 -05:00
|
|
|
}
|
2018-03-06 02:29:03 -03:00
|
|
|
}
|
2018-02-01 12:27:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
/// Checks an access to the given place to see if it is allowed. Examines the set of borrows
|
2017-11-17 00:09:18 +00:00
|
|
|
/// that are in scope, as well as which paths have been initialized, to ensure that (a) the
|
2017-12-01 14:39:51 +02:00
|
|
|
/// place is initialized and (b) it is not borrowed in some way that would prevent this
|
2017-11-17 00:09:18 +00:00
|
|
|
/// access.
|
|
|
|
///
|
|
|
|
/// Returns true if an error is reported, false otherwise.
|
2017-11-17 04:47:02 -05:00
|
|
|
fn access_place(
|
|
|
|
&mut self,
|
|
|
|
context: Context,
|
|
|
|
place_span: (&Place<'tcx>, Span),
|
|
|
|
kind: (ShallowOrDeep, ReadOrWrite),
|
|
|
|
is_local_mutation_allowed: LocalMutationIsAllowed,
|
2017-12-07 17:00:26 +02:00
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
2018-08-10 17:34:56 -04:00
|
|
|
) {
|
2017-08-21 12:48:33 +02:00
|
|
|
let (sd, rw) = kind;
|
2017-10-21 21:15:04 +02:00
|
|
|
|
2017-12-07 17:45:13 +01:00
|
|
|
if let Activation(_, borrow_index) = rw {
|
|
|
|
if self.reservation_error_reported.contains(&place_span.0) {
|
2018-03-06 02:29:03 -03:00
|
|
|
debug!(
|
|
|
|
"skipping access_place for activation of invalid reservation \
|
|
|
|
place: {:?} borrow_index: {:?}",
|
|
|
|
place_span.0, borrow_index
|
|
|
|
);
|
2018-08-10 17:34:56 -04:00
|
|
|
return;
|
2017-12-07 17:45:13 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-21 18:06:55 +10:00
|
|
|
// Check is_empty() first because it's the common case, and doing that
|
|
|
|
// way we avoid the clone() call.
|
|
|
|
if !self.access_place_error_reported.is_empty() &&
|
|
|
|
self
|
2018-06-22 00:10:52 -03:00
|
|
|
.access_place_error_reported
|
2018-03-06 02:29:03 -03:00
|
|
|
.contains(&(place_span.0.clone(), place_span.1))
|
|
|
|
{
|
|
|
|
debug!(
|
|
|
|
"access_place: suppressing error place_span=`{:?}` kind=`{:?}`",
|
|
|
|
place_span, kind
|
|
|
|
);
|
2018-08-10 17:34:56 -04:00
|
|
|
return;
|
2018-01-19 22:11:59 +00:00
|
|
|
}
|
|
|
|
|
2017-12-07 19:12:01 +02:00
|
|
|
let mutability_error =
|
2018-07-15 15:11:29 +01:00
|
|
|
self.check_access_permissions(
|
|
|
|
place_span,
|
|
|
|
rw,
|
|
|
|
is_local_mutation_allowed,
|
|
|
|
flow_state,
|
|
|
|
context.loc,
|
|
|
|
);
|
2017-12-07 19:12:01 +02:00
|
|
|
let conflict_error =
|
|
|
|
self.check_access_for_conflict(context, place_span, sd, rw, flow_state);
|
|
|
|
|
2018-01-20 02:15:57 +00:00
|
|
|
if conflict_error || mutability_error {
|
2018-03-06 02:29:03 -03:00
|
|
|
debug!(
|
|
|
|
"access_place: logging error place_span=`{:?}` kind=`{:?}`",
|
|
|
|
place_span, kind
|
|
|
|
);
|
|
|
|
self.access_place_error_reported
|
|
|
|
.insert((place_span.0.clone(), place_span.1));
|
2018-01-19 22:11:59 +00:00
|
|
|
}
|
2017-12-07 19:12:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn check_access_for_conflict(
|
|
|
|
&mut self,
|
|
|
|
context: Context,
|
|
|
|
place_span: (&Place<'tcx>, Span),
|
|
|
|
sd: ShallowOrDeep,
|
|
|
|
rw: ReadOrWrite,
|
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
|
|
|
) -> bool {
|
2018-04-06 18:03:23 -04:00
|
|
|
debug!(
|
|
|
|
"check_access_for_conflict(context={:?}, place_span={:?}, sd={:?}, rw={:?})",
|
2018-06-22 00:10:52 -03:00
|
|
|
context, place_span, sd, rw,
|
2018-04-06 18:03:23 -04:00
|
|
|
);
|
|
|
|
|
2017-12-07 19:12:01 +02:00
|
|
|
let mut error_reported = false;
|
2018-05-18 23:47:48 -07:00
|
|
|
let tcx = self.tcx;
|
|
|
|
let mir = self.mir;
|
2018-06-26 07:12:51 -04:00
|
|
|
let location = self.location_table.start_index(context.loc);
|
2018-05-18 23:47:48 -07:00
|
|
|
let borrow_set = self.borrow_set.clone();
|
|
|
|
each_borrow_involving_path(
|
|
|
|
self,
|
|
|
|
tcx,
|
|
|
|
mir,
|
2017-11-17 04:47:02 -05:00
|
|
|
context,
|
|
|
|
(sd, place_span.0),
|
2018-05-18 23:47:48 -07:00
|
|
|
&borrow_set,
|
2018-05-29 08:54:15 -03:00
|
|
|
flow_state.borrows_in_scope(location),
|
2018-06-22 00:10:52 -03:00
|
|
|
|this, borrow_index, borrow| match (rw, borrow.kind) {
|
2017-12-14 17:34:16 -06:00
|
|
|
// Obviously an activation is compatible with its own
|
|
|
|
// reservation (or even prior activating uses of same
|
|
|
|
// borrow); so don't check if they interfere.
|
|
|
|
//
|
|
|
|
// NOTE: *reservations* do conflict with themselves;
|
|
|
|
// thus aren't injecting unsoundenss w/ this check.)
|
2018-04-06 17:03:14 -04:00
|
|
|
(Activation(_, activating), _) if activating == borrow_index => {
|
2017-12-14 07:33:29 -05:00
|
|
|
debug!(
|
|
|
|
"check_access_for_conflict place_span: {:?} sd: {:?} rw: {:?} \
|
2018-04-06 17:03:14 -04:00
|
|
|
skipping {:?} b/c activation of same borrow_index",
|
2017-12-14 07:33:29 -05:00
|
|
|
place_span,
|
|
|
|
sd,
|
|
|
|
rw,
|
2018-04-06 17:03:14 -04:00
|
|
|
(borrow_index, borrow),
|
2017-12-14 07:33:29 -05:00
|
|
|
);
|
2017-12-14 17:34:16 -06:00
|
|
|
Control::Continue
|
|
|
|
}
|
2017-12-01 16:02:15 +01:00
|
|
|
|
2017-12-14 07:33:29 -05:00
|
|
|
(Read(_), BorrowKind::Shared) | (Reservation(..), BorrowKind::Shared) => {
|
|
|
|
Control::Continue
|
|
|
|
}
|
2017-12-07 17:45:13 +01:00
|
|
|
|
2018-01-15 12:47:26 +01:00
|
|
|
(Read(kind), BorrowKind::Unique) | (Read(kind), BorrowKind::Mut { .. }) => {
|
2017-12-01 16:02:15 +01:00
|
|
|
// Reading from mere reservations of mutable-borrows is OK.
|
2018-05-18 23:47:48 -07:00
|
|
|
if !is_active(&this.dominators, borrow, context.loc) {
|
|
|
|
assert!(allow_two_phase_borrow(&this.tcx, borrow.kind));
|
2017-12-01 16:02:15 +01:00
|
|
|
return Control::Continue;
|
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
match kind {
|
|
|
|
ReadKind::Copy => {
|
|
|
|
error_reported = true;
|
|
|
|
this.report_use_while_mutably_borrowed(context, place_span, borrow)
|
|
|
|
}
|
|
|
|
ReadKind::Borrow(bk) => {
|
|
|
|
error_reported = true;
|
2018-06-22 00:10:52 -03:00
|
|
|
this.report_conflicting_borrow(context, place_span, bk, &borrow)
|
2017-08-21 12:48:33 +02:00
|
|
|
}
|
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
Control::Break
|
|
|
|
}
|
2017-12-07 17:45:13 +01:00
|
|
|
|
2017-12-14 07:33:29 -05:00
|
|
|
(Reservation(kind), BorrowKind::Unique)
|
2018-01-15 12:47:26 +01:00
|
|
|
| (Reservation(kind), BorrowKind::Mut { .. })
|
2017-12-14 07:33:29 -05:00
|
|
|
| (Activation(kind, _), _)
|
|
|
|
| (Write(kind), _) => {
|
2017-12-07 17:45:13 +01:00
|
|
|
match rw {
|
|
|
|
Reservation(_) => {
|
2017-12-14 07:33:29 -05:00
|
|
|
debug!(
|
|
|
|
"recording invalid reservation of \
|
|
|
|
place: {:?}",
|
|
|
|
place_span.0
|
|
|
|
);
|
2017-12-07 17:45:13 +01:00
|
|
|
this.reservation_error_reported.insert(place_span.0.clone());
|
2018-03-06 02:29:03 -03:00
|
|
|
}
|
2017-12-07 17:45:13 +01:00
|
|
|
Activation(_, activating) => {
|
2017-12-14 07:33:29 -05:00
|
|
|
debug!(
|
|
|
|
"observing check_place for activation of \
|
|
|
|
borrow_index: {:?}",
|
|
|
|
activating
|
|
|
|
);
|
2018-03-06 02:29:03 -03:00
|
|
|
}
|
|
|
|
Read(..) | Write(..) => {}
|
2017-12-07 17:45:13 +01:00
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
match kind {
|
|
|
|
WriteKind::MutableBorrow(bk) => {
|
|
|
|
error_reported = true;
|
2018-06-22 00:10:52 -03:00
|
|
|
this.report_conflicting_borrow(context, place_span, bk, &borrow)
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
|
|
|
WriteKind::StorageDeadOrDrop => {
|
|
|
|
error_reported = true;
|
|
|
|
this.report_borrowed_value_does_not_live_long_enough(
|
2017-12-14 07:33:29 -05:00
|
|
|
context,
|
|
|
|
borrow,
|
2018-06-13 14:51:53 -03:00
|
|
|
place_span,
|
|
|
|
Some(kind),
|
2017-12-14 07:33:29 -05:00
|
|
|
);
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
|
|
|
WriteKind::Mutate => {
|
|
|
|
error_reported = true;
|
|
|
|
this.report_illegal_mutation_of_borrowed(context, place_span, borrow)
|
|
|
|
}
|
|
|
|
WriteKind::Move => {
|
|
|
|
error_reported = true;
|
|
|
|
this.report_move_out_while_borrowed(context, place_span, &borrow)
|
2017-08-21 12:48:33 +02:00
|
|
|
}
|
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
Control::Break
|
2017-08-21 12:48:33 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
},
|
|
|
|
);
|
2017-11-21 01:50:04 +02:00
|
|
|
|
2017-12-07 19:12:01 +02:00
|
|
|
error_reported
|
2017-08-21 12:48:33 +02:00
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
fn mutate_place(
|
|
|
|
&mut self,
|
|
|
|
context: Context,
|
|
|
|
place_span: (&Place<'tcx>, Span),
|
|
|
|
kind: ShallowOrDeep,
|
|
|
|
mode: MutateMode,
|
2017-12-07 17:00:26 +02:00
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
2017-11-17 04:47:02 -05:00
|
|
|
) {
|
2017-07-05 14:52:18 +02:00
|
|
|
// Write of P[i] or *P, or WriteAndRead of any P, requires P init'd.
|
|
|
|
match mode {
|
|
|
|
MutateMode::WriteAndRead => {
|
2018-03-26 22:00:14 +02:00
|
|
|
self.check_if_path_or_subpath_is_moved(
|
2017-11-17 04:47:02 -05:00
|
|
|
context,
|
|
|
|
InitializationRequiringAction::Update,
|
|
|
|
place_span,
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
MutateMode::JustWrite => {
|
2017-12-01 14:39:51 +02:00
|
|
|
self.check_if_assigned_path_is_moved(context, place_span, flow_state);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-10 17:34:56 -04:00
|
|
|
// Special case: you can assign a immutable local variable
|
|
|
|
// (e.g., `x = ...`) so long as it has never been initialized
|
|
|
|
// before (at this point in the flow).
|
|
|
|
if let &Place::Local(local) = place_span.0 {
|
|
|
|
if let Mutability::Not = self.mir.local_decls[local].mutability {
|
|
|
|
// check for reassignments to immutable local variables
|
2018-08-10 18:05:01 -04:00
|
|
|
self.check_if_reassignment_to_immutable_state(
|
|
|
|
context,
|
|
|
|
local,
|
|
|
|
place_span,
|
|
|
|
flow_state,
|
|
|
|
);
|
2018-08-10 17:34:56 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, use the normal access permission rules.
|
|
|
|
self.access_place(
|
2017-11-17 04:47:02 -05:00
|
|
|
context,
|
|
|
|
place_span,
|
|
|
|
(kind, Write(WriteKind::Mutate)),
|
2018-08-10 17:34:56 -04:00
|
|
|
LocalMutationIsAllowed::No,
|
2017-11-17 04:47:02 -05:00
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
fn consume_rvalue(
|
|
|
|
&mut self,
|
|
|
|
context: Context,
|
|
|
|
(rvalue, span): (&Rvalue<'tcx>, Span),
|
|
|
|
_location: Location,
|
2017-12-07 17:00:26 +02:00
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
2017-11-17 04:47:02 -05:00
|
|
|
) {
|
2017-07-05 14:52:18 +02:00
|
|
|
match *rvalue {
|
2017-11-17 04:47:02 -05:00
|
|
|
Rvalue::Ref(_ /*rgn*/, bk, ref place) => {
|
2017-08-21 12:48:33 +02:00
|
|
|
let access_kind = match bk {
|
|
|
|
BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
|
2018-01-15 12:47:26 +01:00
|
|
|
BorrowKind::Unique | BorrowKind::Mut { .. } => {
|
2017-12-07 17:45:13 +01:00
|
|
|
let wk = WriteKind::MutableBorrow(bk);
|
2018-05-18 23:47:48 -07:00
|
|
|
if allow_two_phase_borrow(&self.tcx, bk) {
|
2017-12-07 17:45:13 +01:00
|
|
|
(Deep, Reservation(wk))
|
|
|
|
} else {
|
|
|
|
(Deep, Write(wk))
|
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
2017-08-21 12:48:33 +02:00
|
|
|
};
|
2017-12-07 17:45:13 +01:00
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
self.access_place(
|
|
|
|
context,
|
|
|
|
(place, span),
|
|
|
|
access_kind,
|
|
|
|
LocalMutationIsAllowed::No,
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-12-07 17:45:13 +01:00
|
|
|
|
2018-03-26 22:00:14 +02:00
|
|
|
self.check_if_path_or_subpath_is_moved(
|
2017-11-17 04:47:02 -05:00
|
|
|
context,
|
|
|
|
InitializationRequiringAction::Borrow,
|
|
|
|
(place, span),
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
|
2017-12-14 07:33:29 -05:00
|
|
|
Rvalue::Use(ref operand)
|
|
|
|
| Rvalue::Repeat(ref operand, _)
|
|
|
|
| Rvalue::UnaryOp(_ /*un_op*/, ref operand)
|
|
|
|
| Rvalue::Cast(_ /*cast_kind*/, ref operand, _ /*ty*/) => {
|
2017-11-17 17:19:57 +02:00
|
|
|
self.consume_operand(context, (operand, span), flow_state)
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
Rvalue::Len(ref place) | Rvalue::Discriminant(ref place) => {
|
2017-08-21 12:48:33 +02:00
|
|
|
let af = match *rvalue {
|
|
|
|
Rvalue::Len(..) => ArtificialField::ArrayLength,
|
|
|
|
Rvalue::Discriminant(..) => ArtificialField::Discriminant,
|
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
2017-11-17 04:47:02 -05:00
|
|
|
self.access_place(
|
|
|
|
context,
|
|
|
|
(place, span),
|
|
|
|
(Shallow(Some(af)), Read(ReadKind::Copy)),
|
|
|
|
LocalMutationIsAllowed::No,
|
|
|
|
flow_state,
|
|
|
|
);
|
2018-03-26 22:00:14 +02:00
|
|
|
self.check_if_path_or_subpath_is_moved(
|
2017-11-17 04:47:02 -05:00
|
|
|
context,
|
|
|
|
InitializationRequiringAction::Use,
|
|
|
|
(place, span),
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
|
2017-12-14 07:33:29 -05:00
|
|
|
Rvalue::BinaryOp(_bin_op, ref operand1, ref operand2)
|
|
|
|
| Rvalue::CheckedBinaryOp(_bin_op, ref operand1, ref operand2) => {
|
2017-11-17 17:19:57 +02:00
|
|
|
self.consume_operand(context, (operand1, span), flow_state);
|
|
|
|
self.consume_operand(context, (operand2, span), flow_state);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Rvalue::NullaryOp(_op, _ty) => {
|
|
|
|
// nullary ops take no dynamic input; no borrowck effect.
|
|
|
|
//
|
|
|
|
// FIXME: is above actually true? Do we want to track
|
|
|
|
// the fact that uninitialized data can be created via
|
|
|
|
// `NullOp::Box`?
|
|
|
|
}
|
|
|
|
|
2018-03-02 20:42:37 -08:00
|
|
|
Rvalue::Aggregate(ref aggregate_kind, ref operands) => {
|
|
|
|
// We need to report back the list of mutable upvars that were
|
|
|
|
// moved into the closure and subsequently used by the closure,
|
|
|
|
// in order to populate our used_mut set.
|
2018-07-03 20:12:09 +01:00
|
|
|
match **aggregate_kind {
|
|
|
|
AggregateKind::Closure(def_id, _)
|
|
|
|
| AggregateKind::Generator(def_id, _, _) => {
|
|
|
|
let BorrowCheckResult {
|
|
|
|
used_mut_upvars, ..
|
|
|
|
} = self.tcx.mir_borrowck(def_id);
|
|
|
|
debug!("{:?} used_mut_upvars={:?}", def_id, used_mut_upvars);
|
|
|
|
for field in used_mut_upvars {
|
|
|
|
// This relies on the current way that by-value
|
|
|
|
// captures of a closure are copied/moved directly
|
|
|
|
// when generating MIR.
|
|
|
|
match operands[field.index()] {
|
|
|
|
Operand::Move(Place::Local(local))
|
|
|
|
| Operand::Copy(Place::Local(local)) => {
|
|
|
|
self.used_mut.insert(local);
|
|
|
|
}
|
|
|
|
Operand::Move(ref place @ Place::Projection(_))
|
|
|
|
| Operand::Copy(ref place @ Place::Projection(_)) => {
|
2018-07-20 17:30:31 +01:00
|
|
|
if let Some(field) = place.is_upvar_field_projection(
|
2018-07-21 14:16:25 +01:00
|
|
|
self.mir, &self.tcx) {
|
2018-07-03 20:12:09 +01:00
|
|
|
self.used_mut_upvars.push(field);
|
|
|
|
}
|
2018-03-02 20:42:37 -08:00
|
|
|
}
|
2018-07-03 20:12:09 +01:00
|
|
|
Operand::Move(Place::Static(..))
|
|
|
|
| Operand::Copy(Place::Static(..))
|
2018-07-22 01:01:07 +02:00
|
|
|
| Operand::Move(Place::Promoted(..))
|
|
|
|
| Operand::Copy(Place::Promoted(..))
|
2018-07-03 20:12:09 +01:00
|
|
|
| Operand::Constant(..) => {}
|
2018-03-02 20:42:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-07-03 20:12:09 +01:00
|
|
|
AggregateKind::Adt(..)
|
|
|
|
| AggregateKind::Array(..)
|
|
|
|
| AggregateKind::Tuple { .. } => (),
|
2018-03-02 20:42:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
for operand in operands {
|
|
|
|
self.consume_operand(context, (operand, span), flow_state);
|
|
|
|
}
|
|
|
|
}
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
fn consume_operand(
|
|
|
|
&mut self,
|
|
|
|
context: Context,
|
|
|
|
(operand, span): (&Operand<'tcx>, Span),
|
2017-12-07 17:00:26 +02:00
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
2017-11-17 04:47:02 -05:00
|
|
|
) {
|
2017-07-05 14:52:18 +02:00
|
|
|
match *operand {
|
2017-12-01 14:39:51 +02:00
|
|
|
Operand::Copy(ref place) => {
|
|
|
|
// copy of place: check if this is "copy of frozen path"
|
2017-11-17 17:19:57 +02:00
|
|
|
// (FIXME: see check_loans.rs)
|
2017-11-17 04:47:02 -05:00
|
|
|
self.access_place(
|
|
|
|
context,
|
|
|
|
(place, span),
|
|
|
|
(Deep, Read(ReadKind::Copy)),
|
|
|
|
LocalMutationIsAllowed::No,
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2017-11-17 17:19:57 +02:00
|
|
|
// Finally, check if path was already moved.
|
2018-03-26 22:00:14 +02:00
|
|
|
self.check_if_path_or_subpath_is_moved(
|
2017-11-17 04:47:02 -05:00
|
|
|
context,
|
|
|
|
InitializationRequiringAction::Use,
|
|
|
|
(place, span),
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
2017-12-01 14:39:51 +02:00
|
|
|
Operand::Move(ref place) => {
|
|
|
|
// move of place: check if this is move of already borrowed path
|
2017-11-17 04:47:02 -05:00
|
|
|
self.access_place(
|
|
|
|
context,
|
|
|
|
(place, span),
|
|
|
|
(Deep, Write(WriteKind::Move)),
|
2017-12-07 20:48:12 +02:00
|
|
|
LocalMutationIsAllowed::Yes,
|
2017-11-17 04:47:02 -05:00
|
|
|
flow_state,
|
|
|
|
);
|
2017-11-17 17:19:57 +02:00
|
|
|
|
|
|
|
// Finally, check if path was already moved.
|
2018-03-26 22:00:14 +02:00
|
|
|
self.check_if_path_or_subpath_is_moved(
|
2017-11-17 04:47:02 -05:00
|
|
|
context,
|
|
|
|
InitializationRequiringAction::Use,
|
|
|
|
(place, span),
|
|
|
|
flow_state,
|
|
|
|
);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
2017-11-17 17:19:57 +02:00
|
|
|
Operand::Constant(_) => {}
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
}
|
2017-11-28 01:43:59 +02:00
|
|
|
|
|
|
|
/// Returns whether a borrow of this place is invalidated when the function
|
|
|
|
/// exits
|
2017-12-14 07:33:29 -05:00
|
|
|
fn check_for_invalidation_at_exit(
|
|
|
|
&mut self,
|
|
|
|
context: Context,
|
|
|
|
borrow: &BorrowData<'tcx>,
|
|
|
|
span: Span,
|
|
|
|
) {
|
2017-12-10 17:11:02 +02:00
|
|
|
debug!("check_for_invalidation_at_exit({:?})", borrow);
|
2017-11-28 12:49:06 +01:00
|
|
|
let place = &borrow.borrowed_place;
|
2017-11-28 01:43:59 +02:00
|
|
|
let root_place = self.prefixes(place, PrefixSet::All).last().unwrap();
|
|
|
|
|
|
|
|
// FIXME(nll-rfc#40): do more precise destructor tracking here. For now
|
|
|
|
// we just know that all locals are dropped at function exit (otherwise
|
|
|
|
// we'll have a memory leak) and assume that all statics have a destructor.
|
2017-12-03 16:08:28 +02:00
|
|
|
//
|
2017-12-06 00:51:47 +02:00
|
|
|
// FIXME: allow thread-locals to borrow other thread locals?
|
2017-12-10 17:11:02 +02:00
|
|
|
let (might_be_alive, will_be_dropped) = match root_place {
|
2018-07-22 01:01:07 +02:00
|
|
|
Place::Promoted(_) => (true, false),
|
2018-08-06 21:06:00 +02:00
|
|
|
Place::Static(_) => {
|
2017-11-28 01:43:59 +02:00
|
|
|
// Thread-locals might be dropped after the function exits, but
|
|
|
|
// "true" statics will never be.
|
2018-08-06 21:06:00 +02:00
|
|
|
let is_thread_local = self.is_place_thread_local(&root_place);
|
2017-12-10 17:11:02 +02:00
|
|
|
(true, is_thread_local)
|
2017-11-28 01:43:59 +02:00
|
|
|
}
|
2017-12-10 17:11:02 +02:00
|
|
|
Place::Local(_) => {
|
2017-11-28 01:43:59 +02:00
|
|
|
// Locals are always dropped at function exit, and if they
|
|
|
|
// have a destructor it would've been called already.
|
2017-12-10 17:11:02 +02:00
|
|
|
(false, self.locals_are_invalidated_at_exit)
|
2017-11-28 01:43:59 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
Place::Projection(..) => {
|
|
|
|
bug!("root of {:?} is a projection ({:?})?", place, root_place)
|
|
|
|
}
|
2017-11-28 01:43:59 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
if !will_be_dropped {
|
2017-11-17 04:47:02 -05:00
|
|
|
debug!(
|
|
|
|
"place_is_invalidated_at_exit({:?}) - won't be dropped",
|
|
|
|
place
|
|
|
|
);
|
2017-12-10 17:11:02 +02:00
|
|
|
return;
|
2017-11-28 01:43:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: replace this with a proper borrow_conflicts_with_place when
|
|
|
|
// that is merged.
|
2017-12-14 07:33:29 -05:00
|
|
|
let sd = if might_be_alive { Deep } else { Shallow(None) };
|
2017-11-28 01:43:59 +02:00
|
|
|
|
2018-06-27 11:16:24 -04:00
|
|
|
if places_conflict::places_conflict(self.tcx, self.mir, place, root_place, sd) {
|
2017-12-10 17:11:02 +02:00
|
|
|
debug!("check_for_invalidation_at_exit({:?}): INVALID", place);
|
|
|
|
// FIXME: should be talking about the region lifetime instead
|
|
|
|
// of just a span here.
|
2018-08-18 12:14:09 +02:00
|
|
|
let span = self.tcx.sess.source_map().end_point(span);
|
2017-12-10 17:11:02 +02:00
|
|
|
self.report_borrowed_value_does_not_live_long_enough(
|
|
|
|
context,
|
|
|
|
borrow,
|
2018-06-13 14:51:53 -03:00
|
|
|
(place, span),
|
|
|
|
None,
|
2017-12-10 17:11:02 +02:00
|
|
|
)
|
2017-12-03 17:55:41 +02:00
|
|
|
}
|
2017-11-28 01:43:59 +02:00
|
|
|
}
|
2017-12-07 17:45:13 +01:00
|
|
|
|
2018-01-11 19:50:40 +01:00
|
|
|
/// Reports an error if this is a borrow of local data.
|
|
|
|
/// This is called for all Yield statements on movable generators
|
2018-03-06 02:29:03 -03:00
|
|
|
fn check_for_local_borrow(&mut self, borrow: &BorrowData<'tcx>, yield_span: Span) {
|
2018-01-11 19:50:40 +01:00
|
|
|
debug!("check_for_local_borrow({:?})", borrow);
|
|
|
|
|
|
|
|
if borrow_of_local_data(&borrow.borrowed_place) {
|
2018-07-18 18:10:08 -03:00
|
|
|
let err = self.tcx
|
2018-03-06 02:29:03 -03:00
|
|
|
.cannot_borrow_across_generator_yield(
|
2018-08-01 20:38:02 +01:00
|
|
|
self.retrieve_borrow_spans(borrow).var_or_use(),
|
2018-03-06 02:29:03 -03:00
|
|
|
yield_span,
|
|
|
|
Origin::Mir,
|
2018-07-18 18:10:08 -03:00
|
|
|
);
|
|
|
|
|
|
|
|
err.buffer(&mut self.errors_buffer);
|
2018-01-11 19:50:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-14 07:33:29 -05:00
|
|
|
fn check_activations(
|
|
|
|
&mut self,
|
|
|
|
location: Location,
|
|
|
|
span: Span,
|
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
|
|
|
) {
|
2018-02-14 16:11:02 +01:00
|
|
|
if !self.tcx.two_phase_borrows() {
|
2017-12-07 17:45:13 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Two-phase borrow support: For each activation that is newly
|
|
|
|
// generated at this statement, check if it interferes with
|
|
|
|
// another borrow.
|
2018-04-07 08:01:21 -04:00
|
|
|
let borrow_set = self.borrow_set.clone();
|
|
|
|
for &borrow_index in borrow_set.activations_at_location(location) {
|
|
|
|
let borrow = &borrow_set[borrow_index];
|
2018-04-07 05:43:36 -04:00
|
|
|
|
|
|
|
// only mutable borrows should be 2-phase
|
|
|
|
assert!(match borrow.kind {
|
|
|
|
BorrowKind::Shared => false,
|
|
|
|
BorrowKind::Unique | BorrowKind::Mut { .. } => true,
|
|
|
|
});
|
|
|
|
|
|
|
|
self.access_place(
|
|
|
|
ContextKind::Activation.new(location),
|
|
|
|
(&borrow.borrowed_place, span),
|
|
|
|
(
|
|
|
|
Deep,
|
|
|
|
Activation(WriteKind::MutableBorrow(borrow.kind), borrow_index),
|
|
|
|
),
|
|
|
|
LocalMutationIsAllowed::No,
|
|
|
|
flow_state,
|
|
|
|
);
|
|
|
|
// We do not need to call `check_if_path_or_subpath_is_moved`
|
|
|
|
// again, as we already called it when we made the
|
|
|
|
// initial reservation.
|
|
|
|
}
|
2017-12-07 17:45:13 +01:00
|
|
|
}
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
|
2017-11-07 04:44:41 -05:00
|
|
|
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
|
2017-11-17 04:47:02 -05:00
|
|
|
fn check_if_reassignment_to_immutable_state(
|
|
|
|
&mut self,
|
|
|
|
context: Context,
|
2018-08-10 18:05:01 -04:00
|
|
|
local: Local,
|
|
|
|
place_span: (&Place<'tcx>, Span),
|
2017-12-07 17:00:26 +02:00
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
2017-11-17 04:47:02 -05:00
|
|
|
) {
|
2018-08-10 18:05:01 -04:00
|
|
|
debug!("check_if_reassignment_to_immutable_state({:?})", local);
|
|
|
|
|
|
|
|
// Check if any of the initializiations of `local` have happened yet:
|
|
|
|
let mpi = self.move_data.rev_lookup.find_local(local);
|
|
|
|
let init_indices = &self.move_data.init_path_map[mpi];
|
|
|
|
let first_init_index = init_indices.iter().find(|ii| flow_state.ever_inits.contains(ii));
|
|
|
|
if let Some(&init_index) = first_init_index {
|
|
|
|
// And, if so, report an error.
|
|
|
|
let init = &self.move_data.inits[init_index];
|
|
|
|
self.report_illegal_reassignment(context, place_span, init.span, place_span.0);
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-01 10:01:51 +02:00
|
|
|
fn check_if_full_path_is_moved(
|
2017-11-17 04:47:02 -05:00
|
|
|
&mut self,
|
|
|
|
context: Context,
|
|
|
|
desired_action: InitializationRequiringAction,
|
|
|
|
place_span: (&Place<'tcx>, Span),
|
2017-12-07 17:00:26 +02:00
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
2017-11-17 04:47:02 -05:00
|
|
|
) {
|
2017-12-01 14:39:51 +02:00
|
|
|
// FIXME: analogous code in check_loans first maps `place` to
|
2017-07-05 14:52:18 +02:00
|
|
|
// its base_path ... but is that what we want here?
|
2017-12-01 14:39:51 +02:00
|
|
|
let place = self.base_path(place_span.0);
|
2017-07-05 14:52:18 +02:00
|
|
|
|
|
|
|
let maybe_uninits = &flow_state.uninits;
|
2017-12-06 20:27:38 +02:00
|
|
|
let curr_move_outs = &flow_state.move_outs;
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
|
|
|
|
// Bad scenarios:
|
|
|
|
//
|
|
|
|
// 1. Move of `a.b.c`, use of `a.b.c`
|
|
|
|
// 2. Move of `a.b.c`, use of `a.b.c.d` (without first reinitializing `a.b.c.d`)
|
2018-03-26 22:00:14 +02:00
|
|
|
// 3. Uninitialized `(a.b.c: &_)`, use of `*a.b.c`; note that with
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
// partial initialization support, one might have `a.x`
|
|
|
|
// initialized but not `a.b`.
|
|
|
|
//
|
|
|
|
// OK scenarios:
|
|
|
|
//
|
2018-03-26 22:00:14 +02:00
|
|
|
// 4. Move of `a.b.c`, use of `a.b.d`
|
|
|
|
// 5. Uninitialized `a.x`, initialized `a.b`, use of `a.b`
|
|
|
|
// 6. Copied `(a.b: &_)`, use of `*(a.b).c`; note that `a.b`
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
// must have been initialized for the use to be sound.
|
2018-03-26 22:00:14 +02:00
|
|
|
// 7. Move of `a.b.c` then reinit of `a.b.c.d`, use of `a.b.c.d`
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
|
|
|
|
// The dataflow tracks shallow prefixes distinctly (that is,
|
|
|
|
// field-accesses on P distinctly from P itself), in order to
|
|
|
|
// track substructure initialization separately from the whole
|
|
|
|
// structure.
|
|
|
|
//
|
|
|
|
// E.g., when looking at (*a.b.c).d, if the closest prefix for
|
|
|
|
// which we have a MovePath is `a.b`, then that means that the
|
|
|
|
// initialization state of `a.b` is all we need to inspect to
|
|
|
|
// know if `a.b.c` is valid (and from that we infer that the
|
|
|
|
// dereference and `.d` access is also valid, since we assume
|
|
|
|
// `a.b.c` is assigned a reference to a initialized and
|
|
|
|
// well-formed record structure.)
|
|
|
|
|
|
|
|
// Therefore, if we seek out the *closest* prefix for which we
|
|
|
|
// have a MovePath, that should capture the initialization
|
2017-12-01 14:39:51 +02:00
|
|
|
// state for the place scenario.
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
//
|
2018-03-26 22:00:14 +02:00
|
|
|
// This code covers scenarios 1, 2, and 3.
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
|
2018-04-01 10:01:51 +02:00
|
|
|
debug!("check_if_full_path_is_moved place: {:?}", place);
|
2017-12-01 14:39:51 +02:00
|
|
|
match self.move_path_closest_to(place) {
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
Ok(mpi) => {
|
2017-12-06 20:27:38 +02:00
|
|
|
if maybe_uninits.contains(&mpi) {
|
2017-11-17 04:47:02 -05:00
|
|
|
self.report_use_of_moved_or_uninitialized(
|
|
|
|
context,
|
|
|
|
desired_action,
|
|
|
|
place_span,
|
|
|
|
mpi,
|
|
|
|
curr_move_outs,
|
|
|
|
);
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
return; // don't bother finding other problems.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(NoMovePathFound::ReachedStatic) => {
|
|
|
|
// Okay: we do not build MoveData for static variables
|
2017-11-17 04:47:02 -05:00
|
|
|
} // Only query longest prefix with a MovePath, not further
|
|
|
|
// ancestors; dataflow recurs on children when parents
|
|
|
|
// move (to support partial (re)inits).
|
|
|
|
//
|
2018-03-26 22:00:14 +02:00
|
|
|
// (I.e. querying parents breaks scenario 7; but may want
|
2017-11-17 04:47:02 -05:00
|
|
|
// to do such a query based on partial-init feature-gate.)
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
}
|
2018-03-26 22:00:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn check_if_path_or_subpath_is_moved(
|
|
|
|
&mut self,
|
|
|
|
context: Context,
|
|
|
|
desired_action: InitializationRequiringAction,
|
|
|
|
place_span: (&Place<'tcx>, Span),
|
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
|
|
|
) {
|
|
|
|
// FIXME: analogous code in check_loans first maps `place` to
|
|
|
|
// its base_path ... but is that what we want here?
|
|
|
|
let place = self.base_path(place_span.0);
|
|
|
|
|
|
|
|
let maybe_uninits = &flow_state.uninits;
|
|
|
|
let curr_move_outs = &flow_state.move_outs;
|
|
|
|
|
|
|
|
// Bad scenarios:
|
|
|
|
//
|
|
|
|
// 1. Move of `a.b.c`, use of `a` or `a.b`
|
|
|
|
// partial initialization support, one might have `a.x`
|
|
|
|
// initialized but not `a.b`.
|
2018-04-01 10:01:51 +02:00
|
|
|
// 2. All bad scenarios from `check_if_full_path_is_moved`
|
2018-03-26 22:00:14 +02:00
|
|
|
//
|
|
|
|
// OK scenarios:
|
|
|
|
//
|
|
|
|
// 3. Move of `a.b.c`, use of `a.b.d`
|
|
|
|
// 4. Uninitialized `a.x`, initialized `a.b`, use of `a.b`
|
|
|
|
// 5. Copied `(a.b: &_)`, use of `*(a.b).c`; note that `a.b`
|
|
|
|
// must have been initialized for the use to be sound.
|
|
|
|
// 6. Move of `a.b.c` then reinit of `a.b.c.d`, use of `a.b.c.d`
|
|
|
|
|
2018-04-01 10:01:51 +02:00
|
|
|
self.check_if_full_path_is_moved(context, desired_action, place_span, flow_state);
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
// A move of any shallow suffix of `place` also interferes
|
|
|
|
// with an attempt to use `place`. This is scenario 3 above.
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
//
|
|
|
|
// (Distinct from handling of scenarios 1+2+4 above because
|
2017-12-01 14:39:51 +02:00
|
|
|
// `place` does not interfere with suffixes of its prefixes,
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
// e.g. `a.b.c` does not interfere with `a.b.d`)
|
2018-03-26 22:00:14 +02:00
|
|
|
//
|
|
|
|
// This code covers scenario 1.
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
|
2018-03-26 22:00:14 +02:00
|
|
|
debug!("check_if_path_or_subpath_is_moved place: {:?}", place);
|
2017-12-01 14:39:51 +02:00
|
|
|
if let Some(mpi) = self.move_path_for_place(place) {
|
2017-11-08 18:32:08 +03:00
|
|
|
if let Some(child_mpi) = maybe_uninits.has_any_child_of(mpi) {
|
2017-11-17 04:47:02 -05:00
|
|
|
self.report_use_of_moved_or_uninitialized(
|
|
|
|
context,
|
|
|
|
desired_action,
|
|
|
|
place_span,
|
|
|
|
child_mpi,
|
|
|
|
curr_move_outs,
|
|
|
|
);
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
return; // don't bother finding other problems.
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
/// Currently MoveData does not store entries for all places in
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
/// the input MIR. For example it will currently filter out
|
2017-12-01 14:39:51 +02:00
|
|
|
/// places that are Copy; thus we do not track places of shared
|
|
|
|
/// reference type. This routine will walk up a place along its
|
|
|
|
/// prefixes, searching for a foundational place that *is*
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
/// tracked in the MoveData.
|
|
|
|
///
|
|
|
|
/// An Err result includes a tag indicated why the search failed.
|
2018-02-16 15:56:50 +01:00
|
|
|
/// Currently this can only occur if the place is built off of a
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
/// static variable, as we do not track those in the MoveData.
|
2017-11-17 04:47:02 -05:00
|
|
|
fn move_path_closest_to(
|
|
|
|
&mut self,
|
|
|
|
place: &Place<'tcx>,
|
|
|
|
) -> Result<MovePathIndex, NoMovePathFound> {
|
2017-12-01 14:39:51 +02:00
|
|
|
let mut last_prefix = place;
|
|
|
|
for prefix in self.prefixes(place, PrefixSet::All) {
|
|
|
|
if let Some(mpi) = self.move_path_for_place(prefix) {
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
return Ok(mpi);
|
|
|
|
}
|
|
|
|
last_prefix = prefix;
|
|
|
|
}
|
|
|
|
match *last_prefix {
|
2017-12-01 14:31:47 +02:00
|
|
|
Place::Local(_) => panic!("should have move path for every Local"),
|
2018-02-16 15:56:50 +01:00
|
|
|
Place::Projection(_) => panic!("PrefixSet::All meant don't stop for Projection"),
|
2018-07-22 01:01:07 +02:00
|
|
|
Place::Promoted(_) |
|
2017-12-01 14:31:47 +02:00
|
|
|
Place::Static(_) => return Err(NoMovePathFound::ReachedStatic),
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
fn move_path_for_place(&mut self, place: &Place<'tcx>) -> Option<MovePathIndex> {
|
2017-07-05 14:52:18 +02:00
|
|
|
// If returns None, then there is no move path corresponding
|
2017-12-01 14:39:51 +02:00
|
|
|
// to a direct owner of `place` (which means there is nothing
|
2017-07-05 14:52:18 +02:00
|
|
|
// that borrowck tracks for its analysis).
|
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
match self.move_data.rev_lookup.find(place) {
|
2017-07-05 14:52:18 +02:00
|
|
|
LookupResult::Parent(_) => None,
|
|
|
|
LookupResult::Exact(mpi) => Some(mpi),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-17 04:47:02 -05:00
|
|
|
fn check_if_assigned_path_is_moved(
|
|
|
|
&mut self,
|
|
|
|
context: Context,
|
|
|
|
(place, span): (&Place<'tcx>, Span),
|
2017-12-07 17:00:26 +02:00
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
2017-11-17 04:47:02 -05:00
|
|
|
) {
|
2018-03-26 22:00:14 +02:00
|
|
|
debug!("check_if_assigned_path_is_moved place: {:?}", place);
|
|
|
|
// recur down place; dispatch to external checks when necessary
|
2017-12-01 14:39:51 +02:00
|
|
|
let mut place = place;
|
2017-07-05 14:52:18 +02:00
|
|
|
loop {
|
2017-12-01 14:39:51 +02:00
|
|
|
match *place {
|
2018-07-22 01:01:07 +02:00
|
|
|
Place::Promoted(_) |
|
2017-12-01 14:31:47 +02:00
|
|
|
Place::Local(_) | Place::Static(_) => {
|
2017-07-05 14:52:18 +02:00
|
|
|
// assigning to `x` does not require `x` be initialized.
|
|
|
|
break;
|
|
|
|
}
|
2017-12-01 14:31:47 +02:00
|
|
|
Place::Projection(ref proj) => {
|
2017-07-05 14:52:18 +02:00
|
|
|
let Projection { ref base, ref elem } = **proj;
|
|
|
|
match *elem {
|
|
|
|
ProjectionElem::Index(_/*operand*/) |
|
|
|
|
ProjectionElem::ConstantIndex { .. } |
|
2018-04-01 10:01:51 +02:00
|
|
|
// assigning to P[i] requires P to be valid.
|
2017-07-05 14:52:18 +02:00
|
|
|
ProjectionElem::Downcast(_/*adt_def*/, _/*variant_idx*/) =>
|
|
|
|
// assigning to (P->variant) is okay if assigning to `P` is okay
|
|
|
|
//
|
|
|
|
// FIXME: is this true even if P is a adt with a dtor?
|
|
|
|
{ }
|
|
|
|
|
2018-04-01 10:01:51 +02:00
|
|
|
// assigning to (*P) requires P to be initialized
|
2018-03-26 22:00:14 +02:00
|
|
|
ProjectionElem::Deref => {
|
2018-04-01 10:01:51 +02:00
|
|
|
self.check_if_full_path_is_moved(
|
2018-03-26 22:00:14 +02:00
|
|
|
context, InitializationRequiringAction::Use,
|
|
|
|
(base, span), flow_state);
|
2018-04-01 10:01:51 +02:00
|
|
|
// (base initialized; no need to
|
|
|
|
// recur further)
|
|
|
|
break;
|
2018-03-26 22:00:14 +02:00
|
|
|
}
|
|
|
|
|
2017-07-05 14:52:18 +02:00
|
|
|
ProjectionElem::Subslice { .. } => {
|
2018-02-16 15:56:50 +01:00
|
|
|
panic!("we don't allow assignments to subslices, context: {:?}",
|
2017-07-05 14:52:18 +02:00
|
|
|
context);
|
|
|
|
}
|
|
|
|
|
|
|
|
ProjectionElem::Field(..) => {
|
|
|
|
// if type of `P` has a dtor, then
|
|
|
|
// assigning to `P.f` requires `P` itself
|
|
|
|
// be already initialized
|
|
|
|
let tcx = self.tcx;
|
|
|
|
match base.ty(self.mir, tcx).to_ty(tcx).sty {
|
|
|
|
ty::TyAdt(def, _) if def.has_dtor(tcx) => {
|
|
|
|
|
|
|
|
// FIXME: analogous code in
|
|
|
|
// check_loans.rs first maps
|
|
|
|
// `base` to its base_path.
|
|
|
|
|
2018-03-26 22:00:14 +02:00
|
|
|
self.check_if_path_or_subpath_is_moved(
|
MIR: Fix value moved diagnose messages
MIR: adopt borrowck test
Fix trailing whitespace
span_bug! on unexpected action
Make RegionVid use newtype_index!
Closes #45843
Check rvalue aggregates during check_stmt in tycheck, add initial, (not passing) test
Fix failing test
Remove attributes and test comments accidentally left behind, add in span_mirbugs
Normalize LvalueTy for ops and format code to satisfy tidy check
only normalize operand types when in an ADT constructor
avoid early return
handle the active field index in unions
normalize types in ADT constructor
Fixes #45940
Fix borrowck compiler errors for upvars contain "spurious" dereferences
Fixes #46003
added associated function Box::leak
Box::leak - improve documentation
Box::leak - fixed bug in documentation
Box::leak - relaxed constraints wrt. lifetimes
Box::leak - updated documentation
Box::leak - made an oops, fixed now =)
Box::leak: update unstable issue number (46179).
Add test for #44953
Add missing Debug impls to std_unicode
Also adds #![deny(missing_debug_implementations)] so they don't get
missed again.
Amend RELEASES for 1.22.1
and fix the date for 1.22.0
Rename param in `[T]::swap_with_slice` from `src` to `other`.
The idea of ‘source’ and ‘destination’ aren’t very applicable for this
operation since both slices can both be considered sources and
destinations.
Clarify stdin behavior of `Command::output`.
Fixes #44929.
Add hints for the case of confusing enum with its variants
Add failing testcases
Add module population and case of enum in place of expression
Use for_each_child_stable in find_module
Use multiline text for crate conflict diagnostics
Make float::from_bits transmute (and update the documentation to reflect this).
The current implementation/documentation was made to avoid sNaN because of
potential safety issues implied by old/bad LLVM documentation. These issues
aren't real, so we can just make the implementation transmute (as permitted
by the existing documentation of this method).
Also the documentation didn't actually match the behaviour: it said we may
change sNaNs, but in fact we canonicalized *all* NaNs.
Also an example in the documentation was wrong: it said we *always* change
sNaNs, when the documentation was explicitly written to indicate it was
implementation-defined.
This makes to_bits and from_bits perfectly roundtrip cross-platform, except
for one caveat: although the 2008 edition of IEEE-754 specifies how to
interpet the signaling bit, earlier editions didn't. This lead to some platforms
picking the opposite interpretation, so all signaling NaNs on x86/ARM are quiet
on MIPS, and vice-versa.
NaN-boxing is a fairly important optimization, while we don't even guarantee
that float operations properly preserve signalingness. As such, this seems like
the more natural strategy to take (as opposed to trying to mangle the signaling
bit on a per-platform basis).
This implementation is also, of course, faster.
Simplify an Iterator::fold to Iterator::any
This method of once-diagnostics doesn't allow nesting
UI tests extract the regular output from the 'rendered' field in json
Merge cfail and ui tests into ui tests
Add a MIR pass to lower 128-bit operators to lang item calls
Runs only with `-Z lower_128bit_ops` since it's not hooked into targets yet.
Include tuple projections in MIR tests
Add type checking for the lang item
As part of doing so, add more lang items instead of passing u128 to the i128 ones where it doesn't matter in twos-complement.
Handle shifts properly
* The overflow-checking shift items need to take a full 128-bit type, since they need to be able to detect idiocy like `1i128 << (1u128 << 127)`
* The unchecked ones just take u32, like the `*_sh?` methods in core
* Because shift-by-anything is allowed, cast into a new local for every shift
incr.comp.: Make sure we don't lose unused green results from the query cache.
rustbuild: Update LLVM and enable ThinLTO
This commit updates LLVM to fix #45511 (https://reviews.llvm.org/D39981) and
also reenables ThinLTO for libtest now that we shouldn't hit #45768. This also
opportunistically enables ThinLTO for libstd which was previously blocked
(#45661) on test failures related to debuginfo with a presumed cause of #45511.
Closes #45511
std: Flag Windows TLS dtor symbol as #[used]
Turns out ThinLTO was internalizing this symbol and eliminating it. Worse yet if
you compiled with LTO turns out no TLS destructors would run on Windows! The
`#[used]` annotation should be a more bulletproof implementation (in the face of
LTO) of preserving this symbol all the way through in LLVM and ensuring it makes
it all the way to the linker which will take care of it.
Add enum InitializationRequiringAction
Fix tidy tests
2017-11-23 17:06:48 +05:30
|
|
|
context, InitializationRequiringAction::Assignment,
|
|
|
|
(base, span), flow_state);
|
2017-07-05 14:52:18 +02:00
|
|
|
|
|
|
|
// (base initialized; no need to
|
|
|
|
// recur further)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
place = base;
|
2017-07-05 14:52:18 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-10-21 21:15:04 +02:00
|
|
|
|
2018-07-15 15:11:29 +01:00
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
/// Check the permissions for the given place and read or write kind
|
2017-11-16 17:44:24 +01:00
|
|
|
///
|
|
|
|
/// Returns true if an error is reported, false otherwise.
|
2017-11-17 04:47:02 -05:00
|
|
|
fn check_access_permissions(
|
2018-02-28 01:09:08 -08:00
|
|
|
&mut self,
|
2017-11-17 04:47:02 -05:00
|
|
|
(place, span): (&Place<'tcx>, Span),
|
|
|
|
kind: ReadOrWrite,
|
|
|
|
is_local_mutation_allowed: LocalMutationIsAllowed,
|
2018-03-12 08:47:44 -07:00
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
2018-07-15 15:11:29 +01:00
|
|
|
location: Location,
|
2017-11-17 04:47:02 -05:00
|
|
|
) -> bool {
|
|
|
|
debug!(
|
|
|
|
"check_access_permissions({:?}, {:?}, {:?})",
|
2018-03-06 02:29:03 -03:00
|
|
|
place, kind, is_local_mutation_allowed
|
2017-11-17 04:47:02 -05:00
|
|
|
);
|
2018-06-11 19:11:48 +02:00
|
|
|
|
2018-06-12 18:00:27 +02:00
|
|
|
let error_access;
|
|
|
|
let the_place_err;
|
|
|
|
|
2017-10-21 21:15:04 +02:00
|
|
|
match kind {
|
2018-05-07 15:58:09 +02:00
|
|
|
Reservation(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Unique))
|
2018-05-22 15:03:40 +02:00
|
|
|
| Reservation(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Mut { .. }))
|
|
|
|
| Write(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Unique))
|
2018-06-22 00:10:52 -03:00
|
|
|
| Write(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Mut { .. })) => {
|
2018-05-07 15:58:09 +02:00
|
|
|
let is_local_mutation_allowed = match borrow_kind {
|
|
|
|
BorrowKind::Unique => LocalMutationIsAllowed::Yes,
|
|
|
|
BorrowKind::Mut { .. } => is_local_mutation_allowed,
|
|
|
|
BorrowKind::Shared => unreachable!(),
|
|
|
|
};
|
2018-03-16 02:52:07 -07:00
|
|
|
match self.is_mutable(place, is_local_mutation_allowed) {
|
2018-06-11 19:11:48 +02:00
|
|
|
Ok(root_place) => {
|
|
|
|
self.add_used_mut(root_place, flow_state);
|
|
|
|
return false;
|
|
|
|
}
|
2018-03-16 02:52:07 -07:00
|
|
|
Err(place_err) => {
|
2018-06-12 18:00:27 +02:00
|
|
|
error_access = AccessKind::MutableBorrow;
|
|
|
|
the_place_err = place_err;
|
2017-10-21 21:15:04 +02:00
|
|
|
}
|
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
2018-03-16 02:52:07 -07:00
|
|
|
Reservation(WriteKind::Mutate) | Write(WriteKind::Mutate) => {
|
|
|
|
match self.is_mutable(place, is_local_mutation_allowed) {
|
2018-06-11 19:11:48 +02:00
|
|
|
Ok(root_place) => {
|
|
|
|
self.add_used_mut(root_place, flow_state);
|
|
|
|
return false;
|
|
|
|
}
|
2018-03-16 02:52:07 -07:00
|
|
|
Err(place_err) => {
|
2018-06-12 18:00:27 +02:00
|
|
|
error_access = AccessKind::Mutate;
|
|
|
|
the_place_err = place_err;
|
2018-03-16 02:52:07 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-12 18:00:27 +02:00
|
|
|
|
2018-07-25 01:34:17 +02:00
|
|
|
Reservation(wk @ WriteKind::Move)
|
|
|
|
| Write(wk @ WriteKind::Move)
|
|
|
|
| Reservation(wk @ WriteKind::StorageDeadOrDrop)
|
|
|
|
| Reservation(wk @ WriteKind::MutableBorrow(BorrowKind::Shared))
|
|
|
|
| Write(wk @ WriteKind::StorageDeadOrDrop)
|
|
|
|
| Write(wk @ WriteKind::MutableBorrow(BorrowKind::Shared)) => {
|
2017-12-01 14:39:51 +02:00
|
|
|
if let Err(_place_err) = self.is_mutable(place, is_local_mutation_allowed) {
|
2018-07-25 01:34:17 +02:00
|
|
|
if self.tcx.migrate_borrowck() {
|
|
|
|
// rust-lang/rust#46908: In pure NLL mode this
|
|
|
|
// code path should be unreachable (and thus
|
|
|
|
// we signal an ICE in the else branch
|
|
|
|
// here). But we can legitimately get here
|
|
|
|
// under borrowck=migrate mode, so instead of
|
|
|
|
// ICE'ing we instead report a legitimate
|
|
|
|
// error (which will then be downgraded to a
|
|
|
|
// warning by the migrate machinery).
|
|
|
|
error_access = match wk {
|
|
|
|
WriteKind::MutableBorrow(_) => AccessKind::MutableBorrow,
|
|
|
|
WriteKind::Move => AccessKind::Move,
|
|
|
|
WriteKind::StorageDeadOrDrop |
|
|
|
|
WriteKind::Mutate => AccessKind::Mutate,
|
|
|
|
};
|
|
|
|
self.report_mutability_error(
|
|
|
|
place,
|
|
|
|
span,
|
|
|
|
_place_err,
|
|
|
|
error_access,
|
|
|
|
location,
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
self.tcx.sess.delay_span_bug(
|
|
|
|
span,
|
|
|
|
&format!(
|
|
|
|
"Accessing `{:?}` with the kind `{:?}` shouldn't be possible",
|
|
|
|
place, kind
|
|
|
|
),
|
|
|
|
);
|
|
|
|
}
|
2017-11-16 17:44:24 +01:00
|
|
|
}
|
2018-06-11 19:11:48 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
Activation(..) => {
|
|
|
|
// permission checks are done at Reservation point.
|
|
|
|
return false;
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
2017-12-14 07:33:29 -05:00
|
|
|
Read(ReadKind::Borrow(BorrowKind::Unique))
|
2018-01-15 12:47:26 +01:00
|
|
|
| Read(ReadKind::Borrow(BorrowKind::Mut { .. }))
|
2017-12-14 07:33:29 -05:00
|
|
|
| Read(ReadKind::Borrow(BorrowKind::Shared))
|
2018-06-11 19:11:48 +02:00
|
|
|
| Read(ReadKind::Copy) => {
|
|
|
|
// Access authorized
|
|
|
|
return false;
|
|
|
|
}
|
2017-10-21 21:15:04 +02:00
|
|
|
}
|
2018-06-12 18:00:27 +02:00
|
|
|
|
|
|
|
// at this point, we have set up the error reporting state.
|
2018-07-15 15:11:29 +01:00
|
|
|
self.report_mutability_error(
|
|
|
|
place,
|
|
|
|
span,
|
|
|
|
the_place_err,
|
|
|
|
error_access,
|
|
|
|
location,
|
|
|
|
);
|
2018-06-12 18:00:27 +02:00
|
|
|
return true;
|
2017-10-21 21:15:04 +02:00
|
|
|
}
|
|
|
|
|
2018-04-21 23:41:44 -07:00
|
|
|
/// Adds the place into the used mutable variables set
|
|
|
|
fn add_used_mut<'d>(
|
|
|
|
&mut self,
|
|
|
|
root_place: RootPlace<'d, 'tcx>,
|
2018-06-22 00:10:52 -03:00
|
|
|
flow_state: &Flows<'cx, 'gcx, 'tcx>,
|
2018-04-21 23:41:44 -07:00
|
|
|
) {
|
|
|
|
match root_place {
|
|
|
|
RootPlace {
|
|
|
|
place: Place::Local(local),
|
|
|
|
is_local_mutation_allowed,
|
|
|
|
} => {
|
|
|
|
if is_local_mutation_allowed != LocalMutationIsAllowed::Yes {
|
|
|
|
// If the local may be initialized, and it is now currently being
|
|
|
|
// mutated, then it is justified to be annotated with the `mut`
|
|
|
|
// keyword, since the mutation may be a possible reassignment.
|
|
|
|
let mpi = self.move_data.rev_lookup.find_local(*local);
|
2018-05-12 14:32:11 -07:00
|
|
|
let ii = &self.move_data.init_path_map[mpi];
|
|
|
|
for index in ii {
|
|
|
|
if flow_state.ever_inits.contains(index) {
|
|
|
|
self.used_mut.insert(*local);
|
2018-05-21 21:45:15 -07:00
|
|
|
break;
|
2018-05-12 14:32:11 -07:00
|
|
|
}
|
2018-04-21 23:41:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-07-03 20:12:09 +01:00
|
|
|
RootPlace {
|
|
|
|
place: _,
|
|
|
|
is_local_mutation_allowed: LocalMutationIsAllowed::Yes,
|
|
|
|
} => {}
|
2018-04-21 23:41:44 -07:00
|
|
|
RootPlace {
|
|
|
|
place: place @ Place::Projection(_),
|
|
|
|
is_local_mutation_allowed: _,
|
|
|
|
} => {
|
2018-07-21 14:16:25 +01:00
|
|
|
if let Some(field) = place.is_upvar_field_projection(self.mir, &self.tcx) {
|
2018-04-21 23:41:44 -07:00
|
|
|
self.used_mut_upvars.push(field);
|
|
|
|
}
|
|
|
|
}
|
2018-07-22 01:01:07 +02:00
|
|
|
RootPlace {
|
|
|
|
place: Place::Promoted(..),
|
|
|
|
is_local_mutation_allowed: _,
|
|
|
|
} => {}
|
2018-04-21 23:41:44 -07:00
|
|
|
RootPlace {
|
|
|
|
place: Place::Static(..),
|
|
|
|
is_local_mutation_allowed: _,
|
|
|
|
} => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Whether this value be written or borrowed mutably.
|
|
|
|
/// Returns the root place if the place passed in is a projection.
|
2017-11-17 04:47:02 -05:00
|
|
|
fn is_mutable<'d>(
|
|
|
|
&self,
|
|
|
|
place: &'d Place<'tcx>,
|
|
|
|
is_local_mutation_allowed: LocalMutationIsAllowed,
|
2018-04-21 23:41:44 -07:00
|
|
|
) -> Result<RootPlace<'d, 'tcx>, &'d Place<'tcx>> {
|
2017-12-01 14:39:51 +02:00
|
|
|
match *place {
|
2017-12-01 14:31:47 +02:00
|
|
|
Place::Local(local) => {
|
2017-10-21 21:15:04 +02:00
|
|
|
let local = &self.mir.local_decls[local];
|
|
|
|
match local.mutability {
|
2017-11-17 04:47:02 -05:00
|
|
|
Mutability::Not => match is_local_mutation_allowed {
|
2018-06-22 00:10:52 -03:00
|
|
|
LocalMutationIsAllowed::Yes => Ok(RootPlace {
|
|
|
|
place,
|
|
|
|
is_local_mutation_allowed: LocalMutationIsAllowed::Yes,
|
|
|
|
}),
|
|
|
|
LocalMutationIsAllowed::ExceptUpvars => Ok(RootPlace {
|
|
|
|
place,
|
|
|
|
is_local_mutation_allowed: LocalMutationIsAllowed::ExceptUpvars,
|
|
|
|
}),
|
2017-11-17 04:47:02 -05:00
|
|
|
LocalMutationIsAllowed::No => Err(place),
|
|
|
|
},
|
2018-06-22 00:10:52 -03:00
|
|
|
Mutability::Mut => Ok(RootPlace {
|
|
|
|
place,
|
|
|
|
is_local_mutation_allowed,
|
|
|
|
}),
|
2017-10-21 21:15:04 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
2018-07-24 20:47:53 +02:00
|
|
|
// The rules for promotion are made by `qualify_consts`, there wouldn't even be a
|
|
|
|
// `Place::Promoted` if the promotion weren't 100% legal. So we just forward this
|
|
|
|
Place::Promoted(_) => Ok(RootPlace {
|
|
|
|
place,
|
|
|
|
is_local_mutation_allowed,
|
|
|
|
}),
|
2018-06-22 00:10:52 -03:00
|
|
|
Place::Static(ref static_) => {
|
2018-01-16 09:31:48 +01:00
|
|
|
if self.tcx.is_static(static_.def_id) != Some(hir::Mutability::MutMutable) {
|
|
|
|
Err(place)
|
|
|
|
} else {
|
2018-06-22 00:10:52 -03:00
|
|
|
Ok(RootPlace {
|
|
|
|
place,
|
|
|
|
is_local_mutation_allowed,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2017-12-01 14:31:47 +02:00
|
|
|
Place::Projection(ref proj) => {
|
2017-10-21 21:15:04 +02:00
|
|
|
match proj.elem {
|
|
|
|
ProjectionElem::Deref => {
|
|
|
|
let base_ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx);
|
|
|
|
|
2017-11-30 23:18:38 +00:00
|
|
|
// Check the kind of deref to decide
|
2017-10-21 21:15:04 +02:00
|
|
|
match base_ty.sty {
|
2018-05-02 15:21:05 +02:00
|
|
|
ty::TyRef(_, _, mutbl) => {
|
|
|
|
match mutbl {
|
2017-10-21 21:15:04 +02:00
|
|
|
// Shared borrowed data is never mutable
|
2017-12-01 14:39:51 +02:00
|
|
|
hir::MutImmutable => Err(place),
|
2017-10-21 21:15:04 +02:00
|
|
|
// Mutably borrowed data is mutable, but only if we have a
|
|
|
|
// unique path to the `&mut`
|
2017-11-30 23:18:38 +00:00
|
|
|
hir::MutMutable => {
|
2018-07-21 14:16:25 +01:00
|
|
|
let mode = match place.is_upvar_field_projection(
|
|
|
|
self.mir, &self.tcx)
|
2017-12-10 17:11:02 +02:00
|
|
|
{
|
2017-12-14 07:33:29 -05:00
|
|
|
Some(field)
|
|
|
|
if {
|
|
|
|
self.mir.upvar_decls[field.index()].by_ref
|
|
|
|
} =>
|
|
|
|
{
|
|
|
|
is_local_mutation_allowed
|
|
|
|
}
|
|
|
|
_ => LocalMutationIsAllowed::Yes,
|
2017-12-07 21:11:10 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
self.is_mutable(&proj.base, mode)
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
2017-10-21 21:15:04 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
2017-10-21 21:15:04 +02:00
|
|
|
ty::TyRawPtr(tnm) => {
|
|
|
|
match tnm.mutbl {
|
|
|
|
// `*const` raw pointers are not mutable
|
2017-12-07 21:11:10 +02:00
|
|
|
hir::MutImmutable => return Err(place),
|
2018-03-23 01:59:56 -07:00
|
|
|
// `*mut` raw pointers are always mutable, regardless of
|
|
|
|
// context. The users have to check by themselves.
|
|
|
|
hir::MutMutable => {
|
2018-06-22 00:10:52 -03:00
|
|
|
return Ok(RootPlace {
|
|
|
|
place,
|
|
|
|
is_local_mutation_allowed,
|
|
|
|
});
|
2018-03-23 01:59:56 -07:00
|
|
|
}
|
2017-10-21 21:15:04 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
2017-11-30 23:18:38 +00:00
|
|
|
// `Box<T>` owns its content, so mutable if its location is mutable
|
2017-11-17 04:47:02 -05:00
|
|
|
_ if base_ty.is_box() => {
|
2017-12-04 13:01:50 +02:00
|
|
|
self.is_mutable(&proj.base, is_local_mutation_allowed)
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
2017-10-21 21:15:04 +02:00
|
|
|
// Deref should only be for reference, pointers or boxes
|
2017-11-30 23:18:38 +00:00
|
|
|
_ => bug!("Deref of unexpected type: {:?}", base_ty),
|
2017-10-21 21:15:04 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
2017-10-21 21:15:04 +02:00
|
|
|
// All other projections are owned by their base path, so mutable if
|
|
|
|
// base path is mutable
|
2017-12-14 07:33:29 -05:00
|
|
|
ProjectionElem::Field(..)
|
|
|
|
| ProjectionElem::Index(..)
|
|
|
|
| ProjectionElem::ConstantIndex { .. }
|
|
|
|
| ProjectionElem::Subslice { .. }
|
|
|
|
| ProjectionElem::Downcast(..) => {
|
2018-07-20 17:30:31 +01:00
|
|
|
let upvar_field_projection = place.is_upvar_field_projection(
|
2018-07-21 14:16:25 +01:00
|
|
|
self.mir, &self.tcx);
|
2018-07-20 17:30:31 +01:00
|
|
|
if let Some(field) = upvar_field_projection {
|
2017-11-30 23:18:38 +00:00
|
|
|
let decl = &self.mir.upvar_decls[field.index()];
|
2017-12-14 07:33:29 -05:00
|
|
|
debug!(
|
|
|
|
"decl.mutability={:?} local_mutation_is_allowed={:?} place={:?}",
|
2018-03-06 02:29:03 -03:00
|
|
|
decl, is_local_mutation_allowed, place
|
2017-12-14 07:33:29 -05:00
|
|
|
);
|
2017-12-07 21:11:10 +02:00
|
|
|
match (decl.mutability, is_local_mutation_allowed) {
|
2017-12-14 07:33:29 -05:00
|
|
|
(Mutability::Not, LocalMutationIsAllowed::No)
|
|
|
|
| (Mutability::Not, LocalMutationIsAllowed::ExceptUpvars) => {
|
|
|
|
Err(place)
|
|
|
|
}
|
|
|
|
(Mutability::Not, LocalMutationIsAllowed::Yes)
|
|
|
|
| (Mutability::Mut, _) => {
|
2018-04-06 12:35:50 -04:00
|
|
|
// Subtle: this is an upvar
|
|
|
|
// reference, so it looks like
|
|
|
|
// `self.foo` -- we want to double
|
|
|
|
// check that the context `*self`
|
|
|
|
// is mutable (i.e., this is not a
|
|
|
|
// `Fn` closure). But if that
|
|
|
|
// check succeeds, we want to
|
|
|
|
// *blame* the mutability on
|
|
|
|
// `place` (that is,
|
|
|
|
// `self.foo`). This is used to
|
|
|
|
// propagate the info about
|
|
|
|
// whether mutability declarations
|
|
|
|
// are used outwards, so that we register
|
|
|
|
// the outer variable as mutable. Otherwise a
|
|
|
|
// test like this fails to record the `mut`
|
|
|
|
// as needed:
|
|
|
|
//
|
|
|
|
// ```
|
|
|
|
// fn foo<F: FnOnce()>(_f: F) { }
|
|
|
|
// fn main() {
|
|
|
|
// let var = Vec::new();
|
|
|
|
// foo(move || {
|
|
|
|
// var.push(1);
|
|
|
|
// });
|
|
|
|
// }
|
|
|
|
// ```
|
|
|
|
let _ = self.is_mutable(&proj.base, is_local_mutation_allowed)?;
|
2018-06-22 00:10:52 -03:00
|
|
|
Ok(RootPlace {
|
|
|
|
place,
|
|
|
|
is_local_mutation_allowed,
|
|
|
|
})
|
2017-12-14 07:33:29 -05:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
2017-12-07 21:11:10 +02:00
|
|
|
} else {
|
|
|
|
self.is_mutable(&proj.base, is_local_mutation_allowed)
|
2017-10-21 21:15:04 +02:00
|
|
|
}
|
2017-11-17 04:47:02 -05:00
|
|
|
}
|
2017-10-21 21:15:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-09-22 15:37:43 +02:00
|
|
|
}
|
2017-07-05 14:52:18 +02:00
|
|
|
|
MIR-borrowck: Big fix to `fn check_if_path_is_moved`.
Fix #44833 (a very specific instance of a very broad bug).
In `check_if_path_is_moved(L)`, check nearest prefix of L with
MovePath, and suffixes of L with MovePaths.
Over the course of review, ariel pointed out a number of issues that
led to this version of the commit:
1. Looking solely at supporting prefixes does not suffice: it
overlooks checking if the path was ever actually initialized in the
first place. So you need to be willing to consider non-supporting
prefixes. Once you are looking at all prefixes, you *could* just
look at the local that forms the base of the projection, but to
handle partial initialization (which still needs to be formally
specified), this code instead looks at the nearest prefix of L that
has an associated MovePath (which, in the limit, will end up being
a local).
2. You also need to consider the suffixes of the given Lvalue, due to
how dataflow is representing partial moves of individual fields out
of struct values.
3. (There was originally a third search, but ariel pointed out that
the first and third could be folded into one.)
Also includes some drive-by refactorings to simplify some method
signatures and prefer `for _ in _` over `loop { }` (at least when it
comes semi-naturally).
2017-10-04 17:46:46 +02:00
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
|
|
|
enum NoMovePathFound {
|
|
|
|
ReachedStatic,
|
|
|
|
}
|
|
|
|
|
2017-12-05 15:08:10 +02:00
|
|
|
/// The degree of overlap between 2 places for borrow-checking.
|
|
|
|
enum Overlap {
|
|
|
|
/// The places might partially overlap - in this case, we give
|
|
|
|
/// up and say that they might conflict. This occurs when
|
|
|
|
/// different fields of a union are borrowed. For example,
|
|
|
|
/// if `u` is a union, we have no way of telling how disjoint
|
|
|
|
/// `u.a.x` and `a.b.y` are.
|
|
|
|
Arbitrary,
|
2017-12-06 00:51:47 +02:00
|
|
|
/// The places have the same type, and are either completely disjoint
|
|
|
|
/// or equal - i.e. they can't "partially" overlap as can occur with
|
|
|
|
/// unions. This is the "base case" on which we recur for extensions
|
|
|
|
/// of the place.
|
2017-12-05 15:08:10 +02:00
|
|
|
EqualOrDisjoint,
|
|
|
|
/// The places are disjoint, so we know all extensions of them
|
|
|
|
/// will also be disjoint.
|
|
|
|
Disjoint,
|
|
|
|
}
|
|
|
|
|
2017-11-07 04:44:41 -05:00
|
|
|
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
|
2017-07-05 14:52:18 +02:00
|
|
|
// FIXME (#16118): function intended to allow the borrow checker
|
|
|
|
// to be less precise in its handling of Box while still allowing
|
|
|
|
// moves out of a Box. They should be removed when/if we stop
|
|
|
|
// treating Box specially (e.g. when/if DerefMove is added...)
|
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
fn base_path<'d>(&self, place: &'d Place<'tcx>) -> &'d Place<'tcx> {
|
2017-07-05 14:52:18 +02:00
|
|
|
//! Returns the base of the leftmost (deepest) dereference of an
|
2017-12-01 14:39:51 +02:00
|
|
|
//! Box in `place`. If there is no dereference of an Box
|
|
|
|
//! in `place`, then it just returns `place` itself.
|
2017-07-05 14:52:18 +02:00
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
let mut cursor = place;
|
|
|
|
let mut deepest = place;
|
2017-07-05 14:52:18 +02:00
|
|
|
loop {
|
|
|
|
let proj = match *cursor {
|
2018-07-22 01:01:07 +02:00
|
|
|
Place::Promoted(_) |
|
2017-12-01 14:31:47 +02:00
|
|
|
Place::Local(..) | Place::Static(..) => return deepest,
|
|
|
|
Place::Projection(ref proj) => proj,
|
2017-07-05 14:52:18 +02:00
|
|
|
};
|
2017-11-17 04:47:02 -05:00
|
|
|
if proj.elem == ProjectionElem::Deref
|
|
|
|
&& place.ty(self.mir, self.tcx).to_ty(self.tcx).is_box()
|
2017-07-05 14:52:18 +02:00
|
|
|
{
|
|
|
|
deepest = &proj.base;
|
|
|
|
}
|
|
|
|
cursor = &proj.base;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
|
|
|
struct Context {
|
|
|
|
kind: ContextKind,
|
|
|
|
loc: Location,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
|
|
|
enum ContextKind {
|
2017-12-07 17:45:13 +01:00
|
|
|
Activation,
|
2017-07-05 14:52:18 +02:00
|
|
|
AssignLhs,
|
|
|
|
AssignRhs,
|
|
|
|
SetDiscrim,
|
|
|
|
InlineAsm,
|
|
|
|
SwitchInt,
|
|
|
|
Drop,
|
|
|
|
DropAndReplace,
|
|
|
|
CallOperator,
|
|
|
|
CallOperand,
|
|
|
|
CallDest,
|
|
|
|
Assert,
|
2017-08-16 13:05:48 -07:00
|
|
|
Yield,
|
2018-05-04 12:04:33 +02:00
|
|
|
ReadForMatch,
|
2017-08-21 12:48:33 +02:00
|
|
|
StorageDead,
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl ContextKind {
|
2017-11-17 04:47:02 -05:00
|
|
|
fn new(self, loc: Location) -> Context {
|
|
|
|
Context {
|
|
|
|
kind: self,
|
|
|
|
loc: loc,
|
|
|
|
}
|
|
|
|
}
|
2017-07-05 14:52:18 +02:00
|
|
|
}
|