Use delayed error handling for Encodable
and Encoder
infallible.
There are two impls of the `Encoder` trait: `opaque::Encoder` and `opaque::FileEncoder`. The former encodes into memory and is infallible, the latter writes to file and is fallible. Currently, standard `Result`/`?`/`unwrap` error handling is used, but this is a bit verbose and has non-trivial cost, which is annoying given how rare failures are (especially in the infallible `opaque::Encoder` case). This commit changes how `Encoder` fallibility is handled. All the `emit_*` methods are now infallible. `opaque::Encoder` requires no great changes for this. `opaque::FileEncoder` now implements a delayed error handling strategy. If a failure occurs, it records this via the `res` field, and all subsequent encoding operations are skipped if `res` indicates an error has occurred. Once encoding is complete, the new `finish` method is called, which returns a `Result`. In other words, there is now a single `Result`-producing method instead of many of them. This has very little effect on how any file errors are reported if `opaque::FileEncoder` has any failures. Much of this commit is boring mechanical changes, removing `Result` return values and `?` or `unwrap` from expressions. The more interesting parts are as follows. - serialize.rs: The `Encoder` trait gains an `Ok` associated type. The `into_inner` method is changed into `finish`, which returns `Result<Vec<u8>, !>`. - opaque.rs: The `FileEncoder` adopts the delayed error handling strategy. Its `Ok` type is a `usize`, returning the number of bytes written, replacing previous uses of `FileEncoder::position`. - Various methods that take an encoder now consume it, rather than being passed a mutable reference, e.g. `serialize_query_result_cache`.
This commit is contained in:
parent
582b9cbc45
commit
1acbe7573d
45 changed files with 611 additions and 682 deletions
|
@ -30,22 +30,20 @@ const HEADER_FORMAT_VERSION: u16 = 0;
|
|||
/// the Git commit hash.
|
||||
const RUSTC_VERSION: Option<&str> = option_env!("CFG_VERSION");
|
||||
|
||||
pub(crate) fn write_file_header(stream: &mut FileEncoder, nightly_build: bool) -> FileEncodeResult {
|
||||
stream.emit_raw_bytes(FILE_MAGIC)?;
|
||||
stream.emit_raw_bytes(&[
|
||||
(HEADER_FORMAT_VERSION >> 0) as u8,
|
||||
(HEADER_FORMAT_VERSION >> 8) as u8,
|
||||
])?;
|
||||
pub(crate) fn write_file_header(stream: &mut FileEncoder, nightly_build: bool) {
|
||||
stream.emit_raw_bytes(FILE_MAGIC);
|
||||
stream
|
||||
.emit_raw_bytes(&[(HEADER_FORMAT_VERSION >> 0) as u8, (HEADER_FORMAT_VERSION >> 8) as u8]);
|
||||
|
||||
let rustc_version = rustc_version(nightly_build);
|
||||
assert_eq!(rustc_version.len(), (rustc_version.len() as u8) as usize);
|
||||
stream.emit_raw_bytes(&[rustc_version.len() as u8])?;
|
||||
stream.emit_raw_bytes(rustc_version.as_bytes())
|
||||
stream.emit_raw_bytes(&[rustc_version.len() as u8]);
|
||||
stream.emit_raw_bytes(rustc_version.as_bytes());
|
||||
}
|
||||
|
||||
pub(crate) fn save_in<F>(sess: &Session, path_buf: PathBuf, name: &str, encode: F)
|
||||
where
|
||||
F: FnOnce(&mut FileEncoder) -> FileEncodeResult,
|
||||
F: FnOnce(FileEncoder) -> FileEncodeResult,
|
||||
{
|
||||
debug!("save: storing data in {}", path_buf.display());
|
||||
|
||||
|
@ -80,28 +78,21 @@ where
|
|||
}
|
||||
};
|
||||
|
||||
if let Err(err) = write_file_header(&mut encoder, sess.is_nightly_build()) {
|
||||
sess.err(&format!("failed to write {} header to `{}`: {}", name, path_buf.display(), err));
|
||||
return;
|
||||
write_file_header(&mut encoder, sess.is_nightly_build());
|
||||
|
||||
match encode(encoder) {
|
||||
Ok(position) => {
|
||||
sess.prof.artifact_size(
|
||||
&name.replace(' ', "_"),
|
||||
path_buf.file_name().unwrap().to_string_lossy(),
|
||||
position as u64,
|
||||
);
|
||||
debug!("save: data written to disk successfully");
|
||||
}
|
||||
Err(err) => {
|
||||
sess.err(&format!("failed to write {} to `{}`: {}", name, path_buf.display(), err));
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(err) = encode(&mut encoder) {
|
||||
sess.err(&format!("failed to write {} to `{}`: {}", name, path_buf.display(), err));
|
||||
return;
|
||||
}
|
||||
|
||||
if let Err(err) = encoder.flush() {
|
||||
sess.err(&format!("failed to flush {} to `{}`: {}", name, path_buf.display(), err));
|
||||
return;
|
||||
}
|
||||
|
||||
sess.prof.artifact_size(
|
||||
&name.replace(' ', "_"),
|
||||
path_buf.file_name().unwrap().to_string_lossy(),
|
||||
encoder.position() as u64,
|
||||
);
|
||||
|
||||
debug!("save: data written to disk successfully");
|
||||
}
|
||||
|
||||
/// Reads the contents of a file with a file header as defined in this module.
|
||||
|
|
|
@ -3,7 +3,7 @@ use rustc_data_structures::sync::join;
|
|||
use rustc_middle::dep_graph::{DepGraph, SerializedDepGraph, WorkProduct, WorkProductId};
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
|
||||
use rustc_serialize::Encodable as RustcEncodable;
|
||||
use rustc_serialize::{Encodable as RustcEncodable, Encoder};
|
||||
use rustc_session::Session;
|
||||
use std::fs;
|
||||
|
||||
|
@ -96,8 +96,9 @@ pub fn save_work_product_index(
|
|||
debug!("save_work_product_index()");
|
||||
dep_graph.assert_ignored();
|
||||
let path = work_products_path(sess);
|
||||
file_format::save_in(sess, path, "work product index", |e| {
|
||||
encode_work_product_index(&new_work_products, e)
|
||||
file_format::save_in(sess, path, "work product index", |mut e| {
|
||||
encode_work_product_index(&new_work_products, &mut e);
|
||||
e.finish()
|
||||
});
|
||||
|
||||
// We also need to clean out old work-products, as not all of them are
|
||||
|
@ -123,7 +124,7 @@ pub fn save_work_product_index(
|
|||
fn encode_work_product_index(
|
||||
work_products: &FxHashMap<WorkProductId, WorkProduct>,
|
||||
encoder: &mut FileEncoder,
|
||||
) -> FileEncodeResult {
|
||||
) {
|
||||
let serialized_products: Vec<_> = work_products
|
||||
.iter()
|
||||
.map(|(id, work_product)| SerializedWorkProduct {
|
||||
|
@ -135,7 +136,7 @@ fn encode_work_product_index(
|
|||
serialized_products.encode(encoder)
|
||||
}
|
||||
|
||||
fn encode_query_cache(tcx: TyCtxt<'_>, encoder: &mut FileEncoder) -> FileEncodeResult {
|
||||
fn encode_query_cache(tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult {
|
||||
tcx.sess.time("incr_comp_serialize_result_cache", || tcx.serialize_query_result_cache(encoder))
|
||||
}
|
||||
|
||||
|
@ -170,24 +171,10 @@ pub fn build_dep_graph(
|
|||
}
|
||||
};
|
||||
|
||||
if let Err(err) = file_format::write_file_header(&mut encoder, sess.is_nightly_build()) {
|
||||
sess.err(&format!(
|
||||
"failed to write dependency graph header to `{}`: {}",
|
||||
path_buf.display(),
|
||||
err
|
||||
));
|
||||
return None;
|
||||
}
|
||||
file_format::write_file_header(&mut encoder, sess.is_nightly_build());
|
||||
|
||||
// First encode the commandline arguments hash
|
||||
if let Err(err) = sess.opts.dep_tracking_hash(false).encode(&mut encoder) {
|
||||
sess.err(&format!(
|
||||
"failed to write dependency graph hash `{}`: {}",
|
||||
path_buf.display(),
|
||||
err
|
||||
));
|
||||
return None;
|
||||
}
|
||||
sess.opts.dep_tracking_hash(false).encode(&mut encoder);
|
||||
|
||||
Some(DepGraph::new(
|
||||
&sess.prof,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue