1
Fork 0

Auto merge of #58085 - wesleywiser:profiler_2, r=wesleywiser

Implement more detailed self profiling

Timing data and cache hits/misses are now recorded at the query level.
This allows us to show detailed per query information such as total time
for each query.

To see detailed query information in the summary pass the `-Z verbose`
flag. For example:

```
rustc -Z self-profile -Z verbose hello_world.rs
```

results in something like:

```md
Self profiling results:

| Phase                                     | Time (ms)      | Time (%) | Queries        | Hits (%)
| ----------------------------------------- | -------------- | -------- | -------------- | --------
| Other                                     |            177 |    54.97 |           8094 |    45.47
| - {time spent not running queries}        |            113 |    35.09 |              0 |     0.00
| - const_eval                              |             16 |     4.97 |             26 |    11.54
| - type_of                                 |              9 |     2.80 |            627 |    27.75
| - const_eval_raw                          |              8 |     2.48 |             22 |     0.00
| - adt_def                                 |              7 |     2.17 |            381 |    11.55
| - visible_parent_map                      |              7 |     2.17 |             99 |    98.99
| - item_attrs                              |              6 |     1.86 |            698 |    50.14
| - item_children                           |              5 |     1.55 |           2815 |     0.00
| - adt_dtorck_constraint                   |              4 |     1.24 |              2 |     0.00
| - adt_destructor                          |              2 |     0.62 |             15 |    86.67
| TypeChecking                              |             53 |    16.46 |           2834 |    79.89
| - trait_impls_of                          |              9 |     2.80 |             65 |    86.15
| - evaluate_obligation                     |              7 |     2.17 |             80 |     2.50
| - const_is_rvalue_promotable_to_static    |              6 |     1.86 |              1 |     0.00
| - is_copy_raw                             |              6 |     1.86 |             29 |    58.62
| - rvalue_promotable_map                   |              6 |     1.86 |              2 |    50.00
| - {time spent not running queries}        |              6 |     1.86 |              0 |     0.00
| - typeck_item_bodies                      |              5 |     1.55 |              1 |     0.00
| - typeck_tables_of                        |              5 |     1.55 |             19 |    94.74
| - dropck_outlives                         |              2 |     0.62 |              1 |     0.00
| - layout_raw                              |              1 |     0.31 |            668 |    87.87
| Linking                                   |             48 |    14.91 |             43 |    46.51
| - {time spent not running queries}        |             48 |    14.91 |              0 |     0.00
| Codegen                                   |             29 |     9.01 |            420 |    61.90
| - {time spent not running queries}        |             16 |     4.97 |              0 |     0.00
| - collect_and_partition_mono_items        |             11 |     3.42 |             13 |    92.31
| - mir_const                               |              1 |     0.31 |              1 |     0.00
| - mir_validated                           |              1 |     0.31 |              3 |    66.67
| Expansion                                 |             14 |     4.35 |              0 |     0.00
| - {time spent not running queries}        |             14 |     4.35 |              0 |     0.00
| BorrowChecking                            |              1 |     0.31 |             12 |    41.67
| - borrowck                                |              1 |     0.31 |              2 |    50.00
| Parsing                                   |              0 |     0.00 |              0 |     0.00

Optimization level: No
Incremental: off
```

<details>
<summary>Rendered</summary>

Self profiling results:

| Phase                                     | Time (ms)      | Time (%) | Queries        | Hits (%)
| ----------------------------------------- | -------------- | -------- | -------------- | --------
| **Other**                                     |           **177** |   **54.97** |   **8094** |  **45.47**
| - {time spent not running queries}        |            113 |    35.09 |              0 |     0.00
| - const_eval                              |             16 |     4.97 |             26 |    11.54
| - type_of                                 |              9 |     2.80 |            627 |    27.75
| - const_eval_raw                          |              8 |     2.48 |             22 |     0.00
| - adt_def                                 |              7 |     2.17 |            381 |    11.55
| - visible_parent_map                      |              7 |     2.17 |             99 |    98.99
| - item_attrs                              |              6 |     1.86 |            698 |    50.14
| - item_children                           |              5 |     1.55 |           2815 |     0.00
| - adt_dtorck_constraint                   |              4 |     1.24 |              2 |     0.00
| - adt_destructor                          |              2 |     0.62 |             15 |    86.67
| TypeChecking                              |             53 |    16.46 |           2834 |    79.89
| - trait_impls_of                          |              9 |     2.80 |             65 |    86.15
| - evaluate_obligation                     |              7 |     2.17 |             80 |     2.50
| - const_is_rvalue_promotable_to_static    |              6 |     1.86 |              1 |     0.00
| - is_copy_raw                             |              6 |     1.86 |             29 |    58.62
| - rvalue_promotable_map                   |              6 |     1.86 |              2 |    50.00
| - {time spent not running queries}        |              6 |     1.86 |              0 |     0.00
| - typeck_item_bodies                      |              5 |     1.55 |              1 |     0.00
| - typeck_tables_of                        |              5 |     1.55 |             19 |    94.74
| - dropck_outlives                         |              2 |     0.62 |              1 |     0.00
| - layout_raw                              |              1 |     0.31 |            668 |    87.87
| Linking                                   |             48 |    14.91 |             43 |    46.51
| - {time spent not running queries}        |             48 |    14.91 |              0 |     0.00
| Codegen                                   |             29 |     9.01 |            420 |    61.90
| - {time spent not running queries}        |             16 |     4.97 |              0 |     0.00
| - collect_and_partition_mono_items        |             11 |     3.42 |             13 |    92.31
| - mir_const                               |              1 |     0.31 |              1 |     0.00
| - mir_validated                           |              1 |     0.31 |              3 |    66.67
| Expansion                                 |             14 |     4.35 |              0 |     0.00
| - {time spent not running queries}        |             14 |     4.35 |              0 |     0.00
| BorrowChecking                            |              1 |     0.31 |             12 |    41.67
| - borrowck                                |              1 |     0.31 |              2 |    50.00
| Parsing                                   |              0 |     0.00 |              0 |     0.00

Optimization level: No
Incremental: off

</details>
cc @nikomatsakis @michaelwoerister @Zoxc

Fixes #54141
This commit is contained in:
bors 2019-02-10 11:18:50 +00:00
commit 68650cacf2
2 changed files with 381 additions and 200 deletions

View file

@ -113,7 +113,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
let mut lock = cache.borrow_mut();
if let Some(value) = lock.results.get(key) {
profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
tcx.sess.profiler(|p| p.record_query_hit(Q::CATEGORY));
tcx.sess.profiler(|p| p.record_query_hit(Q::NAME, Q::CATEGORY));
let result = Ok((value.value.clone(), value.index));
#[cfg(debug_assertions)]
{
@ -375,7 +375,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
if dep_node.kind.is_anon() {
profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
self.sess.profiler(|p| p.start_activity(Q::CATEGORY));
self.sess.profiler(|p| p.start_query(Q::NAME, Q::CATEGORY));
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
job.start(self, diagnostics, |tcx| {
@ -385,7 +385,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
})
});
self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
self.sess.profiler(|p| p.end_query(Q::NAME, Q::CATEGORY));
profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
self.dep_graph.read_index(dep_node_index);
@ -452,14 +452,14 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
let result = if let Some(result) = result {
profq_msg!(self, ProfileQueriesMsg::CacheHit);
self.sess.profiler(|p| p.record_query_hit(Q::CATEGORY));
self.sess.profiler(|p| p.record_query_hit(Q::NAME, Q::CATEGORY));
result
} else {
// We could not load a result from the on-disk cache, so
// recompute.
self.sess.profiler(|p| p.start_activity(Q::CATEGORY));
self.sess.profiler(|p| p.start_query(Q::NAME, Q::CATEGORY));
// The diagnostics for this query have already been
// promoted to the current session during
@ -472,7 +472,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
})
});
self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
self.sess.profiler(|p| p.end_query(Q::NAME, Q::CATEGORY));
result
};
@ -537,7 +537,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
key, dep_node);
profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
self.sess.profiler(|p| p.start_activity(Q::CATEGORY));
self.sess.profiler(|p| p.start_query(Q::NAME, Q::CATEGORY));
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
job.start(self, diagnostics, |tcx| {
@ -557,7 +557,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
})
});
self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
self.sess.profiler(|p| p.end_query(Q::NAME, Q::CATEGORY));
profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) {
@ -600,7 +600,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
let _ = self.get_query::<Q>(DUMMY_SP, key);
} else {
profq_msg!(self, ProfileQueriesMsg::CacheHit);
self.sess.profiler(|p| p.record_query_hit(Q::CATEGORY));
self.sess.profiler(|p| p.record_query_hit(Q::NAME, Q::CATEGORY));
}
}
@ -739,6 +739,7 @@ macro_rules! define_queries_inner {
sess.profiler(|p| {
$(
p.record_computed_queries(
<queries::$name<'_> as QueryConfig<'_>>::NAME,
<queries::$name<'_> as QueryConfig<'_>>::CATEGORY,
self.$name.lock().results.len()
);

View file

@ -1,125 +1,13 @@
use crate::session::config::Options;
use std::collections::{BTreeMap, HashMap};
use std::fs;
use std::io::{self, StderrLock, Write};
use std::io::{self, Write};
use std::thread::ThreadId;
use std::time::Instant;
macro_rules! define_categories {
($($name:ident,)*) => {
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
use crate::session::config::{Options, OptLevel};
#[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)]
pub enum ProfileCategory {
$($name),*
}
#[allow(nonstandard_style)]
struct Categories<T> {
$($name: T),*
}
impl<T: Default> Categories<T> {
fn new() -> Categories<T> {
Categories {
$($name: T::default()),*
}
}
}
impl<T> Categories<T> {
fn get(&self, category: ProfileCategory) -> &T {
match category {
$(ProfileCategory::$name => &self.$name),*
}
}
fn set(&mut self, category: ProfileCategory, value: T) {
match category {
$(ProfileCategory::$name => self.$name = value),*
}
}
}
struct CategoryData {
times: Categories<u64>,
query_counts: Categories<(u64, u64)>,
}
impl CategoryData {
fn new() -> CategoryData {
CategoryData {
times: Categories::new(),
query_counts: Categories::new(),
}
}
fn print(&self, lock: &mut StderrLock<'_>) {
writeln!(lock, "| Phase | Time (ms) \
| Time (%) | Queries | Hits (%)")
.unwrap();
writeln!(lock, "| ---------------- | -------------- \
| -------- | -------------- | --------")
.unwrap();
let total_time = ($(self.times.$name + )* 0) as f32;
$(
let (hits, computed) = self.query_counts.$name;
let total = hits + computed;
let (hits, total) = if total > 0 {
(format!("{:.2}",
(((hits as f32) / (total as f32)) * 100.0)), total.to_string())
} else {
(String::new(), String::new())
};
writeln!(
lock,
"| {0: <16} | {1: <14} | {2: <8.2} | {3: <14} | {4: <8}",
stringify!($name),
self.times.$name / 1_000_000,
((self.times.$name as f32) / total_time) * 100.0,
total,
hits,
).unwrap();
)*
}
fn json(&self) -> String {
let mut json = String::from("[");
$(
let (hits, computed) = self.query_counts.$name;
let total = hits + computed;
//normalize hits to 0%
let hit_percent =
if total > 0 {
((hits as f32) / (total as f32)) * 100.0
} else {
0.0
};
json.push_str(&format!(
"{{ \"category\": \"{}\", \"time_ms\": {},\
\"query_count\": {}, \"query_hits\": {} }},",
stringify!($name),
self.times.$name / 1_000_000,
total,
format!("{:.2}", hit_percent)
));
)*
//remove the trailing ',' character
json.pop();
json.push(']');
json
}
}
}
}
define_categories! {
Parsing,
Expansion,
TypeChecking,
@ -129,18 +17,151 @@ define_categories! {
Other,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ProfilerEvent {
QueryStart { query_name: &'static str, category: ProfileCategory, time: Instant },
QueryEnd { query_name: &'static str, category: ProfileCategory, time: Instant },
GenericActivityStart { category: ProfileCategory, time: Instant },
GenericActivityEnd { category: ProfileCategory, time: Instant },
QueryCacheHit { query_name: &'static str, category: ProfileCategory },
QueryCount { query_name: &'static str, category: ProfileCategory, count: usize },
}
impl ProfilerEvent {
fn is_start_event(&self) -> bool {
use self::ProfilerEvent::*;
match self {
QueryStart { .. } | GenericActivityStart { .. } => true,
QueryEnd { .. } | GenericActivityEnd { .. } |
QueryCacheHit { .. } | QueryCount { .. } => false,
}
}
}
pub struct SelfProfiler {
timer_stack: Vec<ProfileCategory>,
data: CategoryData,
current_timer: Instant,
events: HashMap<ThreadId, Vec<ProfilerEvent>>,
}
struct CategoryResultData {
query_times: BTreeMap<&'static str, u64>,
query_cache_stats: BTreeMap<&'static str, (u64, u64)>, //(hits, total)
}
impl CategoryResultData {
fn new() -> CategoryResultData {
CategoryResultData {
query_times: BTreeMap::new(),
query_cache_stats: BTreeMap::new(),
}
}
fn total_time(&self) -> u64 {
let mut total = 0;
for (_, time) in &self.query_times {
total += time;
}
total
}
fn total_cache_data(&self) -> (u64, u64) {
let (mut hits, mut total) = (0, 0);
for (_, (h, t)) in &self.query_cache_stats {
hits += h;
total += t;
}
(hits, total)
}
}
impl Default for CategoryResultData {
fn default() -> CategoryResultData {
CategoryResultData::new()
}
}
struct CalculatedResults {
categories: BTreeMap<ProfileCategory, CategoryResultData>,
crate_name: Option<String>,
optimization_level: OptLevel,
incremental: bool,
verbose: bool,
}
impl CalculatedResults {
fn new() -> CalculatedResults {
CalculatedResults {
categories: BTreeMap::new(),
crate_name: None,
optimization_level: OptLevel::No,
incremental: false,
verbose: false,
}
}
fn consolidate(mut cr1: CalculatedResults, cr2: CalculatedResults) -> CalculatedResults {
for (category, data) in cr2.categories {
let cr1_data = cr1.categories.entry(category).or_default();
for (query, time) in data.query_times {
*cr1_data.query_times.entry(query).or_default() += time;
}
for (query, (hits, total)) in data.query_cache_stats {
let (h, t) = cr1_data.query_cache_stats.entry(query).or_insert((0, 0));
*h += hits;
*t += total;
}
}
cr1
}
fn total_time(&self) -> u64 {
let mut total = 0;
for (_, data) in &self.categories {
total += data.total_time();
}
total
}
fn with_options(mut self, opts: &Options) -> CalculatedResults {
self.crate_name = opts.crate_name.clone();
self.optimization_level = opts.optimize;
self.incremental = opts.incremental.is_some();
self.verbose = opts.debugging_opts.verbose;
self
}
}
fn time_between_ns(start: Instant, end: Instant) -> u64 {
if start < end {
let time = end - start;
(time.as_secs() * 1_000_000_000) + (time.subsec_nanos() as u64)
} else {
debug!("time_between_ns: ignorning instance of end < start");
0
}
}
fn calculate_percent(numerator: u64, denominator: u64) -> f32 {
if denominator > 0 {
((numerator as f32) / (denominator as f32)) * 100.0
} else {
0.0
}
}
impl SelfProfiler {
pub fn new() -> SelfProfiler {
let mut profiler = SelfProfiler {
timer_stack: Vec::new(),
data: CategoryData::new(),
current_timer: Instant::now(),
events: HashMap::new(),
};
profiler.start_activity(ProfileCategory::Other);
@ -148,104 +169,263 @@ impl SelfProfiler {
profiler
}
#[inline]
pub fn start_activity(&mut self, category: ProfileCategory) {
match self.timer_stack.last().cloned() {
None => {
self.current_timer = Instant::now();
},
Some(current_category) if current_category == category => {
//since the current category is the same as the new activity's category,
//we don't need to do anything with the timer, we just need to push it on the stack
}
Some(current_category) => {
let elapsed = self.stop_timer();
//record the current category's time
let new_time = self.data.times.get(current_category) + elapsed;
self.data.times.set(current_category, new_time);
}
}
//push the new category
self.timer_stack.push(category);
}
pub fn record_computed_queries(&mut self, category: ProfileCategory, count: usize) {
let (hits, computed) = *self.data.query_counts.get(category);
self.data.query_counts.set(category, (hits, computed + count as u64));
}
pub fn record_query_hit(&mut self, category: ProfileCategory) {
let (hits, computed) = *self.data.query_counts.get(category);
self.data.query_counts.set(category, (hits + 1, computed));
self.record(ProfilerEvent::GenericActivityStart {
category,
time: Instant::now(),
})
}
#[inline]
pub fn end_activity(&mut self, category: ProfileCategory) {
match self.timer_stack.pop() {
None => bug!("end_activity() was called but there was no running activity"),
Some(c) =>
self.record(ProfilerEvent::GenericActivityEnd {
category,
time: Instant::now(),
})
}
#[inline]
pub fn record_computed_queries(
&mut self,
query_name: &'static str,
category: ProfileCategory,
count: usize)
{
self.record(ProfilerEvent::QueryCount {
query_name,
category,
count,
})
}
#[inline]
pub fn record_query_hit(&mut self, query_name: &'static str, category: ProfileCategory) {
self.record(ProfilerEvent::QueryCacheHit {
query_name,
category,
})
}
#[inline]
pub fn start_query(&mut self, query_name: &'static str, category: ProfileCategory) {
self.record(ProfilerEvent::QueryStart {
query_name,
category,
time: Instant::now(),
});
}
#[inline]
pub fn end_query(&mut self, query_name: &'static str, category: ProfileCategory) {
self.record(ProfilerEvent::QueryEnd {
query_name,
category,
time: Instant::now(),
})
}
#[inline]
fn record(&mut self, event: ProfilerEvent) {
let thread_id = std::thread::current().id();
let events = self.events.entry(thread_id).or_default();
events.push(event);
}
fn calculate_thread_results(events: &Vec<ProfilerEvent>) -> CalculatedResults {
use self::ProfilerEvent::*;
assert!(
c == category,
"end_activity() was called but a different activity was running"),
}
events.last().map(|e| !e.is_start_event()).unwrap_or(true),
"there was an event running when calculate_reslts() was called"
);
//check if the new running timer is in the same category as this one
//if it is, we don't need to do anything
if let Some(c) = self.timer_stack.last() {
if *c == category {
return;
let mut results = CalculatedResults::new();
//(event, child time to subtract)
let mut query_stack = Vec::new();
for event in events {
match event {
QueryStart { .. } | GenericActivityStart { .. } => {
query_stack.push((event, 0));
},
QueryEnd { query_name, category, time: end_time } => {
let previous_query = query_stack.pop();
if let Some((QueryStart {
query_name: p_query_name,
time: start_time,
category: _ }, child_time_to_subtract)) = previous_query {
assert_eq!(
p_query_name,
query_name,
"Saw a query end but the previous query wasn't the corresponding start"
);
let time_ns = time_between_ns(*start_time, *end_time);
let self_time_ns = time_ns - child_time_to_subtract;
let result_data = results.categories.entry(*category).or_default();
*result_data.query_times.entry(query_name).or_default() += self_time_ns;
if let Some((_, child_time_to_subtract)) = query_stack.last_mut() {
*child_time_to_subtract += time_ns;
}
} else {
bug!("Saw a query end but the previous event wasn't a query start");
}
}
GenericActivityEnd { category, time: end_time } => {
let previous_event = query_stack.pop();
if let Some((GenericActivityStart {
category: previous_category,
time: start_time }, child_time_to_subtract)) = previous_event {
assert_eq!(
previous_category,
category,
"Saw an end but the previous event wasn't the corresponding start"
);
let time_ns = time_between_ns(*start_time, *end_time);
let self_time_ns = time_ns - child_time_to_subtract;
let result_data = results.categories.entry(*category).or_default();
*result_data.query_times
.entry("{time spent not running queries}")
.or_default() += self_time_ns;
if let Some((_, child_time_to_subtract)) = query_stack.last_mut() {
*child_time_to_subtract += time_ns;
}
} else {
bug!("Saw an activity end but the previous event wasn't an activity start");
}
},
QueryCacheHit { category, query_name } => {
let result_data = results.categories.entry(*category).or_default();
let (hits, total) =
result_data.query_cache_stats.entry(query_name).or_insert((0, 0));
*hits += 1;
*total += 1;
},
QueryCount { category, query_name, count } => {
let result_data = results.categories.entry(*category).or_default();
let (_, totals) =
result_data.query_cache_stats.entry(query_name).or_insert((0, 0));
*totals += *count as u64;
},
}
}
//the new timer is different than the previous,
//so record the elapsed time and start a new timer
let elapsed = self.stop_timer();
let new_time = self.data.times.get(category) + elapsed;
self.data.times.set(category, new_time);
//normalize the times to ms
for (_, data) in &mut results.categories {
for (_, time) in &mut data.query_times {
*time = *time / 1_000_000;
}
}
fn stop_timer(&mut self) -> u64 {
let elapsed = self.current_timer.elapsed();
results
}
self.current_timer = Instant::now();
(elapsed.as_secs() * 1_000_000_000) + (elapsed.subsec_nanos() as u64)
fn get_results(&self, opts: &Options) -> CalculatedResults {
self.events
.iter()
.map(|(_, r)| SelfProfiler::calculate_thread_results(r))
.fold(CalculatedResults::new(), CalculatedResults::consolidate)
.with_options(opts)
}
pub fn print_results(&mut self, opts: &Options) {
self.end_activity(ProfileCategory::Other);
assert!(
self.timer_stack.is_empty(),
"there were timers running when print_results() was called");
let results = self.get_results(opts);
let total_time = results.total_time() as f32;
let out = io::stderr();
let mut lock = out.lock();
let crate_name =
opts.crate_name
.as_ref()
.map(|n| format!(" for {}", n))
.unwrap_or_default();
let crate_name = results.crate_name.map(|n| format!(" for {}", n)).unwrap_or_default();
writeln!(lock, "Self profiling results{}:", crate_name).unwrap();
writeln!(lock).unwrap();
self.data.print(&mut lock);
writeln!(lock, "| Phase | Time (ms) \
| Time (%) | Queries | Hits (%)")
.unwrap();
writeln!(lock, "| ----------------------------------------- | -------------- \
| -------- | -------------- | --------")
.unwrap();
let mut categories: Vec<_> = results.categories.iter().collect();
categories.sort_by(|(_, data1), (_, data2)| data2.total_time().cmp(&data1.total_time()));
for (category, data) in categories {
let (category_hits, category_total) = data.total_cache_data();
let category_hit_percent = calculate_percent(category_hits, category_total);
writeln!(
lock,
"| {0: <41} | {1: >14} | {2: >8.2} | {3: >14} | {4: >8}",
format!("{:?}", category),
data.total_time(),
((data.total_time() as f32) / total_time) * 100.0,
category_total,
format!("{:.2}", category_hit_percent),
).unwrap();
//in verbose mode, show individual query data
if results.verbose {
//don't show queries that took less than 1ms
let mut times: Vec<_> = data.query_times.iter().filter(|(_, t)| **t > 0).collect();
times.sort_by(|(_, time1), (_, time2)| time2.cmp(time1));
for (query, time) in times {
let (hits, total) = data.query_cache_stats.get(query).unwrap_or(&(0, 0));
let hit_percent = calculate_percent(*hits, *total);
writeln!(
lock,
"| - {0: <39} | {1: >14} | {2: >8.2} | {3: >14} | {4: >8}",
query,
time,
((*time as f32) / total_time) * 100.0,
total,
format!("{:.2}", hit_percent),
).unwrap();
}
}
}
writeln!(lock).unwrap();
writeln!(lock, "Optimization level: {:?}", opts.optimize).unwrap();
let incremental = if opts.incremental.is_some() { "on" } else { "off" };
writeln!(lock, "Incremental: {}", incremental).unwrap();
writeln!(lock, "Incremental: {}", if results.incremental { "on" } else { "off" }).unwrap();
}
pub fn save_results(&self, opts: &Options) {
let category_data = self.data.json();
let results = self.get_results(opts);
let compilation_options =
format!("{{ \"optimization_level\": \"{:?}\", \"incremental\": {} }}",
opts.optimize,
if opts.incremental.is_some() { "true" } else { "false" });
results.optimization_level,
if results.incremental { "true" } else { "false" });
let mut category_data = String::new();
for (category, data) in &results.categories {
let (hits, total) = data.total_cache_data();
let hit_percent = calculate_percent(hits, total);
category_data.push_str(&format!("{{ \"category\": \"{:?}\", \"time_ms\": {}, \
\"query_count\": {}, \"query_hits\": {} }}",
category,
data.total_time(),
total,
format!("{:.2}", hit_percent)));
}
let json = format!("{{ \"category_data\": {}, \"compilation_options\": {} }}",
category_data,