Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Experiment: Create timing report. #7311

Merged
merged 15 commits into from
Sep 17, 2019
Merged
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ git2-curl = "0.11.0"
glob = "0.3.0"
hex = "0.3"
home = "0.5"
humantime = "1.2.0"
ignore = "0.4.7"
lazy_static = "1.2.0"
jobserver = "0.1.13"
Expand Down
1 change: 1 addition & 0 deletions src/bin/cargo/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ Available unstable (nightly-only) flags:
-Z config-profile -- Read profiles from .cargo/config files
-Z install-upgrade -- `cargo install` will upgrade instead of failing
-Z cache-messages -- Cache compiler messages
-Z timings -- Display concurrency information

Run with 'cargo -Z [FLAG] [SUBCOMMAND]'"
);
Expand Down
4 changes: 2 additions & 2 deletions src/cargo/core/compiler/context/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
export_dir: Option<PathBuf>,
exec: &Arc<dyn Executor>,
) -> CargoResult<Compilation<'cfg>> {
let mut queue = JobQueue::new(self.bcx);
let mut queue = JobQueue::new(self.bcx, units);
let mut plan = BuildPlan::new();
let build_plan = self.bcx.build_config.build_plan;
self.prepare_units(export_dir, units)?;
Expand Down Expand Up @@ -513,6 +513,6 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
/// Returns whether when `unit` is built whether it should emit metadata as
/// well because some compilations rely on that.
pub fn rmeta_required(&self, unit: &Unit<'a>) -> bool {
self.rmeta_required.contains(unit)
self.rmeta_required.contains(unit) || self.bcx.config.cli_unstable().timings.is_some()
}
}
36 changes: 26 additions & 10 deletions src/cargo/core/compiler/job_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ use super::job::{
Freshness::{self, Dirty, Fresh},
Job,
};
use super::timings::Timings;
use super::{BuildContext, BuildPlan, CompileMode, Context, Unit};
use crate::core::{PackageId, TargetKind};
use crate::handle_error;
Expand All @@ -39,6 +40,7 @@ pub struct JobQueue<'a, 'cfg> {
is_release: bool,
progress: Progress<'cfg>,
next_id: u32,
timings: Timings<'a, 'cfg>,
}

pub struct JobState<'a> {
Expand Down Expand Up @@ -80,7 +82,7 @@ enum Artifact {
}

enum Message {
Run(String),
Run(u32, String),
BuildPlanMsg(String, ProcessBuilder, Arc<Vec<OutputFile>>),
Stdout(String),
Stderr(String),
Expand All @@ -91,7 +93,7 @@ enum Message {

impl<'a> JobState<'a> {
pub fn running(&self, cmd: &ProcessBuilder) {
let _ = self.tx.send(Message::Run(cmd.to_string()));
let _ = self.tx.send(Message::Run(self.id, cmd.to_string()));
}

pub fn build_plan(
Expand Down Expand Up @@ -119,7 +121,6 @@ impl<'a> JobState<'a> {
/// This should only be called once because a metadata file can only be
/// produced once!
pub fn rmeta_produced(&self) {
assert!(self.rmeta_required.get());
self.rmeta_required.set(false);
let _ = self
.tx
Expand All @@ -128,9 +129,10 @@ impl<'a> JobState<'a> {
}

impl<'a, 'cfg> JobQueue<'a, 'cfg> {
pub fn new(bcx: &BuildContext<'a, 'cfg>) -> JobQueue<'a, 'cfg> {
pub fn new(bcx: &BuildContext<'a, 'cfg>, root_units: &[Unit<'a>]) -> JobQueue<'a, 'cfg> {
let (tx, rx) = channel();
let progress = Progress::with_style("Building", ProgressStyle::Ratio, bcx.config);
let timings = Timings::new(bcx, root_units);
JobQueue {
queue: DependencyQueue::new(),
tx,
Expand All @@ -142,6 +144,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {
is_release: bcx.build_config.release,
progress,
next_id: 0,
timings,
}
}

Expand Down Expand Up @@ -316,6 +319,9 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {
// to the jobserver itself.
tokens.truncate(self.active.len() - 1);

self.timings
.mark_concurrency(self.active.len(), queue.len(), self.queue.len());

// Drain all events at once to avoid displaying the progress bar
// unnecessarily.
let events: Vec<_> = self.rx.try_iter().collect();
Expand All @@ -328,18 +334,18 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {

for event in events {
match event {
Message::Run(cmd) => {
Message::Run(id, cmd) => {
cx.bcx
.config
.shell()
.verbose(|c| c.status("Running", &cmd))?;
self.timings.unit_start(id, self.active[&id]);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Technically for the highest fidelity of timing information we should push this unit_start to just before the process is actually spawned on the worker thread itself (using internal synchronization in self.timings). It's probably not worth doing that just yet though since we're already getting what appears to be pretty accurate timing information.

}
Message::BuildPlanMsg(module_name, cmd, filenames) => {
plan.update(&module_name, &cmd, &filenames)?;
}
Message::Stdout(out) => {
self.progress.clear();
println!("{}", out);
cx.bcx.config.shell().stdout_println(out);
}
Message::Stderr(err) => {
let mut shell = cx.bcx.config.shell();
Expand Down Expand Up @@ -367,7 +373,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {
};
info!("end ({:?}): {:?}", unit, result);
match result {
Ok(()) => self.finish(&unit, artifact, cx)?,
Ok(()) => self.finish(id, &unit, artifact, cx)?,
Err(e) => {
let msg = "The following warnings were emitted during compilation:";
self.emit_warnings(Some(msg), &unit, cx)?;
Expand Down Expand Up @@ -425,6 +431,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {
if !cx.bcx.build_config.build_plan {
cx.bcx.config.shell().status("Finished", message)?;
}
self.timings.finished()?;
Ok(())
} else {
debug!("queue: {:#?}", self.queue);
Expand Down Expand Up @@ -519,8 +526,12 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {
}

match fresh {
Freshness::Fresh => doit(),
Freshness::Fresh => {
self.timings.add_fresh();
doit()
}
Freshness::Dirty => {
self.timings.add_dirty();
scope.spawn(move |_| doit());
}
}
Expand Down Expand Up @@ -558,14 +569,19 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {

fn finish(
&mut self,
id: u32,
unit: &Unit<'a>,
artifact: Artifact,
cx: &mut Context<'_, '_>,
) -> CargoResult<()> {
if unit.mode.is_run_custom_build() && cx.bcx.show_warnings(unit.pkg.package_id()) {
self.emit_warnings(None, unit, cx)?;
}
self.queue.finish(unit, &artifact);
let unlocked = self.queue.finish(unit, &artifact);
match artifact {
Artifact::All => self.timings.unit_finished(id, unlocked),
Artifact::Metadata => self.timings.unit_rmeta_finished(id, unlocked),
}
Ok(())
}

Expand Down
56 changes: 4 additions & 52 deletions src/cargo/core/compiler/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,20 @@ mod layout;
mod links;
mod output_depinfo;
pub mod standard_lib;
mod timings;
mod unit;
pub mod unit_dependencies;

use std::env;
use std::ffi::{OsStr, OsString};
use std::fs::{self, File};
use std::io::Write;
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use std::sync::Arc;

use failure::Error;
use lazycell::LazyCell;
use log::debug;
use same_file::is_same_file;
use serde::Serialize;

pub use self::build_config::{BuildConfig, CompileMode, MessageFormat};
Expand Down Expand Up @@ -442,12 +442,12 @@ fn link_targets<'a, 'cfg>(
}
};
destinations.push(dst.clone());
hardlink_or_copy(src, dst)?;
paths::link_or_copy(src, dst)?;
if let Some(ref path) = output.export_path {
let export_dir = export_dir.as_ref().unwrap();
paths::create_dir_all(export_dir)?;

hardlink_or_copy(src, path)?;
paths::link_or_copy(src, path)?;
}
}

Expand Down Expand Up @@ -476,54 +476,6 @@ fn link_targets<'a, 'cfg>(
}))
}

/// Hardlink (file) or symlink (dir) src to dst if possible, otherwise copy it.
fn hardlink_or_copy(src: &Path, dst: &Path) -> CargoResult<()> {
debug!("linking {} to {}", src.display(), dst.display());
if is_same_file(src, dst).unwrap_or(false) {
return Ok(());
}

// NB: we can't use dst.exists(), as if dst is a broken symlink,
// dst.exists() will return false. This is problematic, as we still need to
// unlink dst in this case. symlink_metadata(dst).is_ok() will tell us
// whether dst exists *without* following symlinks, which is what we want.
if fs::symlink_metadata(dst).is_ok() {
paths::remove_file(&dst)?;
}

let link_result = if src.is_dir() {
#[cfg(target_os = "redox")]
use std::os::redox::fs::symlink;
#[cfg(unix)]
use std::os::unix::fs::symlink;
#[cfg(windows)]
use std::os::windows::fs::symlink_dir as symlink;

let dst_dir = dst.parent().unwrap();
let src = if src.starts_with(dst_dir) {
src.strip_prefix(dst_dir).unwrap()
} else {
src
};
symlink(src, dst)
} else {
fs::hard_link(src, dst)
};
link_result
.or_else(|err| {
debug!("link failed {}. falling back to fs::copy", err);
fs::copy(src, dst).map(|_| ())
})
.chain_err(|| {
format!(
"failed to link or copy `{}` to `{}`",
src.display(),
dst.display()
)
})?;
Ok(())
}

// For all plugin dependencies, add their -L paths (now calculated and present
// in `build_script_outputs`) to the dynamic library load path for the command
// to execute.
Expand Down
Loading