Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Experiment: Create timing report. #7311

Merged
merged 15 commits into from
Sep 17, 2019
Merged
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ git2-curl = "0.11.0"
glob = "0.3.0"
hex = "0.3"
home = "0.5"
humantime = "1.2.0"
ignore = "0.4.7"
lazy_static = "1.2.0"
jobserver = "0.1.13"
Expand Down
1 change: 1 addition & 0 deletions src/bin/cargo/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ Available unstable (nightly-only) flags:
-Z config-profile -- Read profiles from .cargo/config files
-Z install-upgrade -- `cargo install` will upgrade instead of failing
-Z cache-messages -- Cache compiler messages
-Z timings -- Display concurrency information

Run with 'cargo -Z [FLAG] [SUBCOMMAND]'"
);
Expand Down
2 changes: 1 addition & 1 deletion src/cargo/core/compiler/context/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
export_dir: Option<PathBuf>,
exec: &Arc<dyn Executor>,
) -> CargoResult<Compilation<'cfg>> {
let mut queue = JobQueue::new(self.bcx);
let mut queue = JobQueue::new(self.bcx, units);
let mut plan = BuildPlan::new();
let build_plan = self.bcx.build_config.build_plan;
self.prepare_units(export_dir, units)?;
Expand Down
50 changes: 40 additions & 10 deletions src/cargo/core/compiler/job_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ use super::job::{
Freshness::{self, Dirty, Fresh},
Job,
};
use super::timings::Timings;
use super::{BuildContext, BuildPlan, CompileMode, Context, Unit};
use crate::core::{PackageId, TargetKind};
use crate::handle_error;
Expand All @@ -39,6 +40,7 @@ pub struct JobQueue<'a, 'cfg> {
is_release: bool,
progress: Progress<'cfg>,
next_id: u32,
timings: Option<Timings<'a, 'cfg>>,
}

pub struct JobState<'a> {
Expand Down Expand Up @@ -80,7 +82,7 @@ enum Artifact {
}

enum Message {
Run(String),
Run(u32, String),
BuildPlanMsg(String, ProcessBuilder, Arc<Vec<OutputFile>>),
Stdout(String),
Stderr(String),
Expand All @@ -91,7 +93,7 @@ enum Message {

impl<'a> JobState<'a> {
pub fn running(&self, cmd: &ProcessBuilder) {
let _ = self.tx.send(Message::Run(cmd.to_string()));
let _ = self.tx.send(Message::Run(self.id, cmd.to_string()));
}

pub fn build_plan(
Expand Down Expand Up @@ -119,7 +121,6 @@ impl<'a> JobState<'a> {
/// This should only be called once because a metadata file can only be
/// produced once!
pub fn rmeta_produced(&self) {
assert!(self.rmeta_required.get());
self.rmeta_required.set(false);
let _ = self
.tx
Expand All @@ -128,9 +129,13 @@ impl<'a> JobState<'a> {
}

impl<'a, 'cfg> JobQueue<'a, 'cfg> {
pub fn new(bcx: &BuildContext<'a, 'cfg>) -> JobQueue<'a, 'cfg> {
pub fn new(bcx: &BuildContext<'a, 'cfg>, root_units: &[Unit<'a>]) -> JobQueue<'a, 'cfg> {
let (tx, rx) = channel();
let progress = Progress::with_style("Building", ProgressStyle::Ratio, bcx.config);
let timings = match bcx.config.cli_unstable().timings {
Some(..) => Some(Timings::new(bcx, root_units)),
None => None,
};
JobQueue {
queue: DependencyQueue::new(),
tx,
Expand All @@ -142,6 +147,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {
is_release: bcx.build_config.release,
progress,
next_id: 0,
timings,
}
}

Expand Down Expand Up @@ -316,6 +322,10 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {
// to the jobserver itself.
tokens.truncate(self.active.len() - 1);

if let Some(t) = &mut self.timings {
t.mark_concurrency(self.active.len(), queue.len(), self.queue.len());
}

// Drain all events at once to avoid displaying the progress bar
// unnecessarily.
let events: Vec<_> = self.rx.try_iter().collect();
Expand All @@ -328,18 +338,20 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {

for event in events {
match event {
Message::Run(cmd) => {
Message::Run(id, cmd) => {
cx.bcx
.config
.shell()
.verbose(|c| c.status("Running", &cmd))?;
if let Some(t) = &mut self.timings {
t.unit_start(id, self.active[&id]);
}
}
Message::BuildPlanMsg(module_name, cmd, filenames) => {
plan.update(&module_name, &cmd, &filenames)?;
}
Message::Stdout(out) => {
self.progress.clear();
println!("{}", out);
cx.bcx.config.shell().stdout_println(out);
}
Message::Stderr(err) => {
let mut shell = cx.bcx.config.shell();
Expand Down Expand Up @@ -367,7 +379,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {
};
info!("end ({:?}): {:?}", unit, result);
match result {
Ok(()) => self.finish(&unit, artifact, cx)?,
Ok(()) => self.finish(id, &unit, artifact, cx)?,
Err(e) => {
let msg = "The following warnings were emitted during compilation:";
self.emit_warnings(Some(msg), &unit, cx)?;
Expand Down Expand Up @@ -425,6 +437,9 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {
if !cx.bcx.build_config.build_plan {
cx.bcx.config.shell().status("Finished", message)?;
}
if let Some(t) = &mut self.timings {
t.finished()?;
}
Ok(())
} else {
debug!("queue: {:#?}", self.queue);
Expand Down Expand Up @@ -519,8 +534,16 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {
}

match fresh {
Freshness::Fresh => doit(),
Freshness::Fresh => {
if let Some(t) = &mut self.timings {
t.add_fresh();
}
doit()
}
Freshness::Dirty => {
if let Some(t) = &mut self.timings {
t.add_dirty();
}
scope.spawn(move |_| doit());
}
}
Expand Down Expand Up @@ -558,14 +581,21 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {

fn finish(
&mut self,
id: u32,
unit: &Unit<'a>,
artifact: Artifact,
cx: &mut Context<'_, '_>,
) -> CargoResult<()> {
if unit.mode.is_run_custom_build() && cx.bcx.show_warnings(unit.pkg.package_id()) {
self.emit_warnings(None, unit, cx)?;
}
self.queue.finish(unit, &artifact);
let unlocked = self.queue.finish(unit, &artifact);
if let Some(t) = &mut self.timings {
alexcrichton marked this conversation as resolved.
Show resolved Hide resolved
match artifact {
Artifact::All => t.unit_finished(id, unlocked),
Artifact::Metadata => t.unit_rmeta_finished(id, unlocked),
}
}
Ok(())
}

Expand Down
64 changes: 9 additions & 55 deletions src/cargo/core/compiler/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,20 @@ mod layout;
mod links;
mod output_depinfo;
pub mod standard_lib;
mod timings;
mod unit;
pub mod unit_dependencies;

use std::env;
use std::ffi::{OsStr, OsString};
use std::fs::{self, File};
use std::io::Write;
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use std::sync::Arc;

use failure::Error;
use lazycell::LazyCell;
use log::debug;
use same_file::is_same_file;
use serde::Serialize;

pub use self::build_config::{BuildConfig, CompileMode, MessageFormat};
Expand Down Expand Up @@ -442,12 +442,12 @@ fn link_targets<'a, 'cfg>(
}
};
destinations.push(dst.clone());
hardlink_or_copy(src, dst)?;
paths::link_or_copy(src, dst)?;
if let Some(ref path) = output.export_path {
let export_dir = export_dir.as_ref().unwrap();
paths::create_dir_all(export_dir)?;

hardlink_or_copy(src, path)?;
paths::link_or_copy(src, path)?;
}
}

Expand Down Expand Up @@ -476,54 +476,6 @@ fn link_targets<'a, 'cfg>(
}))
}

/// Hardlink (file) or symlink (dir) src to dst if possible, otherwise copy it.
fn hardlink_or_copy(src: &Path, dst: &Path) -> CargoResult<()> {
debug!("linking {} to {}", src.display(), dst.display());
if is_same_file(src, dst).unwrap_or(false) {
return Ok(());
}

// NB: we can't use dst.exists(), as if dst is a broken symlink,
// dst.exists() will return false. This is problematic, as we still need to
// unlink dst in this case. symlink_metadata(dst).is_ok() will tell us
// whether dst exists *without* following symlinks, which is what we want.
if fs::symlink_metadata(dst).is_ok() {
paths::remove_file(&dst)?;
}

let link_result = if src.is_dir() {
#[cfg(target_os = "redox")]
use std::os::redox::fs::symlink;
#[cfg(unix)]
use std::os::unix::fs::symlink;
#[cfg(windows)]
use std::os::windows::fs::symlink_dir as symlink;

let dst_dir = dst.parent().unwrap();
let src = if src.starts_with(dst_dir) {
src.strip_prefix(dst_dir).unwrap()
} else {
src
};
symlink(src, dst)
} else {
fs::hard_link(src, dst)
};
link_result
.or_else(|err| {
debug!("link failed {}. falling back to fs::copy", err);
fs::copy(src, dst).map(|_| ())
})
.chain_err(|| {
format!(
"failed to link or copy `{}` to `{}`",
src.display(),
dst.display()
)
})?;
Ok(())
}

// For all plugin dependencies, add their -L paths (now calculated and present
// in `build_script_outputs`) to the dynamic library load path for the command
// to execute.
Expand Down Expand Up @@ -746,10 +698,11 @@ fn add_error_format_and_color(
// prettily, and then when parsing JSON messages from rustc we need to
// internally understand that we should extract the `rendered` field and
// present it if we can.
if cx.bcx.build_config.cache_messages() || pipelined {
let wants_artifacts = pipelined || cx.bcx.config.cli_unstable().timings.is_some();
if cx.bcx.build_config.cache_messages() || wants_artifacts {
cmd.arg("--error-format=json");
let mut json = String::from("--json=diagnostic-rendered-ansi");
if pipelined {
if wants_artifacts {
json.push_str(",artifacts");
}
match cx.bcx.build_config.message_format {
Expand Down Expand Up @@ -1123,7 +1076,8 @@ struct OutputOptions {

impl OutputOptions {
fn new<'a>(cx: &Context<'a, '_>, unit: &Unit<'a>) -> OutputOptions {
let look_for_metadata_directive = cx.rmeta_required(unit);
let look_for_metadata_directive =
cx.rmeta_required(unit) || cx.bcx.config.cli_unstable().timings.is_some();
alexcrichton marked this conversation as resolved.
Show resolved Hide resolved
let color = cx.bcx.config.shell().supports_color();
let cache_cell = if cx.bcx.build_config.cache_messages() {
let path = cx.files().message_cache_path(unit);
Expand Down
Loading