diff --git a/.cargo/config.toml b/.cargo/config.toml index 91be03414e9..d29df3626dc 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -2,5 +2,6 @@ lint = "clippy --workspace --all-targets --verbose -- --deny warnings" format = "fmt --all --verbose" codegen = "run -p xtask_codegen --" -benchmark = "run -p xtask_bench --release --" +bench_parser = "run -p xtask_bench --release -- --feature parser" +bench_formatter = "run -p xtask_bench --release -- --feature formatter" coverage = "run -p xtask_coverage --release --" diff --git a/.github/workflows/bench.yml b/.github/workflows/bench_formatter.yml similarity index 91% rename from .github/workflows/bench.yml rename to .github/workflows/bench_formatter.yml index 27afa454948..0ffcbd9c49e 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench_formatter.yml @@ -1,7 +1,7 @@ # Parser benchmark, compares main and PR branch with Criterion. # Comment with text containing `!bench`, a new result will be commented at the bottom of this PR. -name: Parser Benchmark +name: Formatter Benchmark on: issue_comment: @@ -14,13 +14,13 @@ env: jobs: bench: name: Bench - if: github.event.issue.pull_request && contains(github.event.comment.body, '!bench') + if: github.event.issue.pull_request && contains(github.event.comment.body, '!bench_formatter') runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: - os: [ubuntu-latest] + os: [ubuntu-latest] steps: - name: Checkout PR Branch @@ -30,7 +30,7 @@ jobs: - name: Support longpaths run: git config core.longpaths true - + - name: Checkout PR Branch uses: actions/checkout@v2 @@ -50,13 +50,13 @@ jobs: run: cargo build --release --locked -p xtask_bench - name: Run Bench on PR Branch - run: cargo benchmark --save-baseline pr + run: cargo bench_formatter --save-baseline pr - name: Checkout Main Branch run: git checkout main - name: Run Bench on Main Branch - run: cargo benchmark --save-baseline main + run: cargo bench_formatter --save-baseline main - name: Compare Bench Results on ${{ matrix.os }} id: bench_comparison diff --git a/.github/workflows/bench_parser.yml b/.github/workflows/bench_parser.yml new file mode 100644 index 00000000000..7e566bd6433 --- /dev/null +++ b/.github/workflows/bench_parser.yml @@ -0,0 +1,85 @@ +# Parser benchmark, compares main and PR branch with Criterion. +# Comment with text containing `!bench`, a new result will be commented at the bottom of this PR. + +name: Parser Benchmark + +on: + issue_comment: + types: [created] + +env: + RUST_LOG: info + RUST_BACKTRACE: 1 + +jobs: + bench: + name: Bench + if: github.event.issue.pull_request && contains(github.event.comment.body, '!bench_parser') + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + + steps: + - name: Checkout PR Branch + uses: actions/checkout@v2 + with: + submodules: false + + - name: Support longpaths + run: git config core.longpaths true + + - name: Checkout PR Branch + uses: actions/checkout@v2 + + - name: Fetch Main Branch + run: git fetch --no-tags --prune --no-recurse-submodules --depth=1 origin main + + - name: Install toolchain + run: rustup show + + - name: Cache + uses: Swatinem/rust-cache@v1 + + - name: Install critcmp + run: cargo install critcmp + + - name: Compile + run: cargo build --release --locked -p xtask_bench + + - name: Run Bench on PR Branch + run: cargo bench_parser --save-baseline pr + + - name: Checkout Main Branch + run: git checkout main + + - name: Run Bench on Main Branch + run: cargo bench_parser --save-baseline main + + - name: Compare Bench Results on ${{ matrix.os }} + id: bench_comparison + shell: bash + run: | + echo "### Bench results on ${{ matrix.os }} of the parser" > output + echo "\`\`\`" >> output + critcmp main pr >> output + echo "\`\`\`" >> output + cat output + comment="$(cat output)" + comment="${comment//'%'/'%25'}" + comment="${comment//$'\n'/'%0A'}" + comment="${comment//$'\r'/'%0D'}" + echo "::set-output name=comment::$comment" + + - name: Write a new comment + uses: peter-evans/create-or-update-comment@v1.4.5 + continue-on-error: true + with: + issue-number: ${{ github.event.issue.number }} + body: | + ${{ steps.bench_comparison.outputs.comment }} + + - name: Remove Criterion Artifact + run: rm -rf ./target/criterion diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cfa5ca60bf0..c6e36d11fee 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -99,7 +99,8 @@ Here are some other scripts that you might find useful. If you are a core contributor, and you have access to create new branches from the main repository (not a fork), use these comments to run specific workflows: -- `!bench` benchmarks the parser's runtime performance and writes a comment with the results +- `!bench_parser` benchmarks the parser's runtime performance and writes a comment with the results; +- `!bench_formatter` benchmarks the formatter runtime performance and writes a comment with the results; #### Naming patterns diff --git a/Cargo.lock b/Cargo.lock index 78d14d9d400..71aedd8f927 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2025,6 +2025,7 @@ dependencies = [ "itertools", "pico-args", "regex", + "rome_formatter", "rslint_errors", "rslint_parser", "timing", diff --git a/crates/rome_formatter/src/ts/expressions/literal_expression.rs b/crates/rome_formatter/src/ts/expressions/literal_expression.rs index 884aaa24613..40d78ce71fd 100644 --- a/crates/rome_formatter/src/ts/expressions/literal_expression.rs +++ b/crates/rome_formatter/src/ts/expressions/literal_expression.rs @@ -64,7 +64,9 @@ impl ToFormatElement for JsAnyLiteralExpression { JsAnyLiteralExpression::JsNullLiteralExpression(null_literal) => { null_literal.to_format_element(formatter) } - JsAnyLiteralExpression::JsRegexLiteralExpression(_) => todo!(), + JsAnyLiteralExpression::JsRegexLiteralExpression(node) => { + node.to_format_element(formatter) + } } } } diff --git a/crates/rome_formatter/src/ts/expressions/mod.rs b/crates/rome_formatter/src/ts/expressions/mod.rs index 3a4b83f64f7..142b745529e 100644 --- a/crates/rome_formatter/src/ts/expressions/mod.rs +++ b/crates/rome_formatter/src/ts/expressions/mod.rs @@ -7,6 +7,7 @@ mod function_expression; mod identifier_expression; mod literal_expression; mod object_expression; +mod regex_literal; mod sequence_expression; mod static_member_expression; mod super_expression; diff --git a/crates/rome_formatter/src/ts/expressions/regex_literal.rs b/crates/rome_formatter/src/ts/expressions/regex_literal.rs new file mode 100644 index 00000000000..6253e8672ee --- /dev/null +++ b/crates/rome_formatter/src/ts/expressions/regex_literal.rs @@ -0,0 +1,8 @@ +use crate::{FormatElement, FormatResult, Formatter, ToFormatElement}; +use rslint_parser::ast::JsRegexLiteralExpression; + +impl ToFormatElement for JsRegexLiteralExpression { + fn to_format_element(&self, formatter: &Formatter) -> FormatResult { + formatter.format_token(&self.value_token()?) + } +} diff --git a/xtask/bench/Cargo.toml b/xtask/bench/Cargo.toml index 24ea161ceaf..d09f7f8cd79 100644 --- a/xtask/bench/Cargo.toml +++ b/xtask/bench/Cargo.toml @@ -8,6 +8,7 @@ publish = false xtask = { path = '../', version = "0.0" } rslint_parser = { path = "../../crates/rslint_parser", version = "0.3" } rslint_errors = { path = "../../crates/rslint_errors", version = "0.2.0" } +rome_formatter = { path = "../../crates/rome_formatter", version = "0.0.0" } pico-args = "0.3.4" timing = "0.2.3" diff --git a/xtask/bench/README.md b/xtask/bench/README.md index cba5712aaeb..d282525152a 100644 --- a/xtask/bench/README.md +++ b/xtask/bench/README.md @@ -2,17 +2,18 @@ This crate contains benchmark suites for the project. +Criterion is used to generate benchmark results. + ## Parser Benchmark -Criterion is used to generate benchmark results, -you can use the following instruction to get nice benchmark comparison. +To get a benchmark comparison, you need to run the benchmark for `main` branch and your PR: ```bash # (commit your code on pr branch, run) git checkout main -cargo benchmark --save-baseline main +cargo bench_parser --save-baseline main git checkout - -cargo benchmark --save-baseline pr +cargo bench_parser --save-baseline pr critcmp main pr # (cargo install critcmp) ``` @@ -36,6 +37,19 @@ parser/vue.global.prod.js 1.09 28.7±6.39ms 4.2 MB/sec 1 The 1.xx column is the percentage difference, larger means worse. For example jquery is 16% slower on main. And the pr branch performs better overall. +## Formatter benchmark + +To get a benchmark comparison, you need to run the benchmark for `main` branch and your PR: + +```bash +# (commit your code on pr branch, run) +git checkout main +cargo bench_formatter --save-baseline main +git checkout - +cargo bench_formatter --save-baseline pr +critcmp main pr # (cargo install critcmp) +``` + ## Heap Profiling using `dhat` ```bash diff --git a/xtask/bench/src/features/formatter.rs b/xtask/bench/src/features/formatter.rs new file mode 100644 index 00000000000..829b1d38c43 --- /dev/null +++ b/xtask/bench/src/features/formatter.rs @@ -0,0 +1,45 @@ +use crate::BenchmarkSummary; +use rome_formatter::{format, FormatOptions, Formatted}; +use rslint_parser::SyntaxNode; +use std::fmt::{Display, Formatter}; +use std::time::Duration; + +#[derive(Debug, Clone)] +pub struct FormatterMeasurement { + id: String, + formatting: Duration, +} +pub fn benchmark_format_lib(id: &str, root: &SyntaxNode) -> BenchmarkSummary { + let formatter_timer = timing::start(); + run_format(root); + let formatter_duration = formatter_timer.stop(); + + BenchmarkSummary::Formatter(FormatterMeasurement { + id: id.to_string(), + formatting: formatter_duration, + }) +} + +pub fn run_format(root: &SyntaxNode) -> Formatted { + format(FormatOptions::default(), root).unwrap() +} + +impl FormatterMeasurement { + fn total(&self) -> Duration { + self.formatting + } + + pub(crate) fn summary(&self) -> String { + format!("{}, Formatting: {:?}", self.id, self.total(),) + } +} + +impl Display for FormatterMeasurement { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let _ = writeln!(f, "\tFormatting: {:>10?}", self.formatting); + let _ = writeln!(f, "\t ----------"); + let _ = writeln!(f, "\tTotal: {:>10?}", self.total()); + + Ok(()) + } +} diff --git a/xtask/bench/src/features/mod.rs b/xtask/bench/src/features/mod.rs new file mode 100644 index 00000000000..f19d4aa918e --- /dev/null +++ b/xtask/bench/src/features/mod.rs @@ -0,0 +1,2 @@ +pub mod formatter; +pub mod parser; diff --git a/xtask/bench/src/features/parser.rs b/xtask/bench/src/features/parser.rs new file mode 100644 index 00000000000..949551497c5 --- /dev/null +++ b/xtask/bench/src/features/parser.rs @@ -0,0 +1,140 @@ +use crate::BenchmarkSummary; +use itertools::Itertools; +use rslint_errors::Diagnostic; +use rslint_parser::ast::JsAnyRoot; +use rslint_parser::{Parse, Syntax}; +use std::fmt::{Display, Formatter}; +use std::ops::Add; +use std::time::Duration; + +#[derive(Debug, Clone)] +pub struct ParseMeasurement { + id: String, + tokenization: Duration, + parsing: Duration, + tree_sink: Duration, + diagnostics: Vec, +} + +#[cfg(feature = "dhat-on")] +fn print_diff(before: dhat::Stats, current: dhat::Stats) -> dhat::Stats { + use humansize::{file_size_opts as options, FileSize}; + + println!("\tMemory"); + if let Some(heap) = ¤t.heap { + println!("\t\tCurrent Blocks: {}", heap.curr_blocks); + println!( + "\t\tCurrent Bytes: {}", + heap.curr_bytes.file_size(options::CONVENTIONAL).unwrap() + ); + println!("\t\tMax Blocks: {}", heap.max_blocks); + println!( + "\t\tMax Bytes: {}", + heap.max_bytes.file_size(options::CONVENTIONAL).unwrap() + ); + } + + println!( + "\t\tTotal Blocks: {}", + current.total_blocks - before.total_blocks + ); + println!( + "\t\tTotal Bytes: {}", + (current.total_bytes - before.total_bytes) + .file_size(options::CONVENTIONAL) + .unwrap() + ); + + current +} +pub fn benchmark_parse_lib(id: &str, code: &str) -> BenchmarkSummary { + #[cfg(feature = "dhat-on")] + println!("Start"); + #[cfg(feature = "dhat-on")] + let stats = dhat::get_stats().unwrap(); + + let tokenizer_timer = timing::start(); + let (tokens, mut diagnostics) = rslint_parser::tokenize(code, 0); + let tok_source = rslint_parser::TokenSource::new(code, &tokens); + let tokenization_duration = tokenizer_timer.stop(); + + #[cfg(feature = "dhat-on")] + println!("Tokenizer"); + #[cfg(feature = "dhat-on")] + let stats = print_diff(stats, dhat::get_stats().unwrap()); + + let parser_timer = timing::start(); + let (events, parsing_diags, tokens) = { + let mut parser = + rslint_parser::Parser::new(tok_source, 0, rslint_parser::Syntax::default().script()); + rslint_parser::syntax::program::parse(&mut parser); + let (events, parsing_diags) = parser.finish(); + (events, parsing_diags, tokens) + }; + let parse_duration = parser_timer.stop(); + + #[cfg(feature = "dhat-on")] + println!("Parsed"); + #[cfg(feature = "dhat-on")] + let stats = print_diff(stats, dhat::get_stats().unwrap()); + + let tree_sink_timer = timing::start(); + let mut tree_sink = rslint_parser::LosslessTreeSink::new(code, &tokens); + rslint_parser::process(&mut tree_sink, events, parsing_diags); + let (_green, sink_diags) = tree_sink.finish(); + let tree_sink_duration = tree_sink_timer.stop(); + + #[cfg(feature = "dhat-on")] + println!("Tree-Sink"); + #[cfg(feature = "dhat-on")] + print_diff(stats, dhat::get_stats().unwrap()); + + diagnostics.extend(sink_diags); + BenchmarkSummary::Parser(ParseMeasurement { + id: id.to_string(), + tokenization: tokenization_duration, + parsing: parse_duration, + tree_sink: tree_sink_duration, + diagnostics, + }) +} + +pub fn run_parse(code: &str) -> Parse { + let syntax = Syntax::default().module(); + rslint_parser::parse(code, 0, syntax) +} + +impl ParseMeasurement { + fn total(&self) -> Duration { + self.tokenization.add(self.parsing).add(self.tree_sink) + } + + pub(crate) fn summary(&self) -> String { + format!( + "{}, Total Time: {:?}, tokenization: {:?}, parsing: {:?}, tree_sink: {:?}", + self.id, + self.total(), + self.tokenization, + self.parsing, + self.tree_sink, + ) + } +} + +impl Display for ParseMeasurement { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let _ = writeln!(f, "\tTokenization: {:>10?}", self.tokenization); + let _ = writeln!(f, "\tParsing: {:>10?}", self.parsing); + let _ = writeln!(f, "\tTree_sink: {:>10?}", self.tree_sink); + let _ = writeln!(f, "\t ----------"); + let _ = writeln!(f, "\tTotal: {:>10?}", self.total()); + + let _ = writeln!(f, "\tDiagnostics"); + let diagnostics = &self.diagnostics.iter().group_by(|x| x.severity); + for (severity, items) in diagnostics { + let _ = writeln!(f, "\t\t{:?}: {}", severity, items.count()); + } + + Ok(()) + } +} diff --git a/xtask/bench/src/lib.rs b/xtask/bench/src/lib.rs index 094a0ecf4df..e458e670e8f 100644 --- a/xtask/bench/src/lib.rs +++ b/xtask/bench/src/lib.rs @@ -1,90 +1,76 @@ -use ansi_rgb::{red, Foreground}; -use itertools::Itertools; -use rslint_errors::Diagnostic; +mod features; +mod utils; + +use rslint_parser::{parse, Syntax}; use std::fmt::{Display, Formatter}; -use std::ops::Add; +use std::str::FromStr; use std::time::Duration; -use std::{path::PathBuf, str::FromStr}; -fn err_to_string(e: E) -> String { - format!("{:?}", e) +pub use crate::features::formatter::benchmark_format_lib; +use crate::features::formatter::{run_format, FormatterMeasurement}; +pub use crate::features::parser::benchmark_parse_lib; +use crate::features::parser::{run_parse, ParseMeasurement}; +pub use utils::get_code; + +/// What feature to benchmark +pub enum FeatureToBenchmark { + /// benchmark of the parser + Parser, + /// benchmark of the formatter + Formatter, +} + +impl FromStr for FeatureToBenchmark { + type Err = pico_args::Error; + + fn from_str(s: &str) -> Result { + match s { + "parser" => Ok(Self::Parser), + "formatter" => Ok(Self::Formatter), + _ => Err(pico_args::Error::OptionWithoutAValue("feature")), + } + } } -#[cfg(feature = "dhat-on")] -fn print_diff(before: dhat::Stats, current: dhat::Stats) -> dhat::Stats { - use humansize::{file_size_opts as options, FileSize}; - - println!("\tMemory"); - if let Some(heap) = ¤t.heap { - println!("\t\tCurrent Blocks: {}", heap.curr_blocks); - println!( - "\t\tCurrent Bytes: {}", - heap.curr_bytes.file_size(options::CONVENTIONAL).unwrap() - ); - println!("\t\tMax Blocks: {}", heap.max_blocks); - println!( - "\t\tMax Bytes: {}", - heap.max_bytes.file_size(options::CONVENTIONAL).unwrap() - ); +impl Display for FeatureToBenchmark { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + FeatureToBenchmark::Parser => writeln!(f, "parser"), + FeatureToBenchmark::Formatter => writeln!(f, "formatter"), + } } +} - println!( - "\t\tTotal Blocks: {}", - current.total_blocks - before.total_blocks - ); - println!( - "\t\tTotal Bytes: {}", - (current.total_bytes - before.total_bytes) - .file_size(options::CONVENTIONAL) - .unwrap() - ); - - current +/// If groups the summary by their category and creates a small interface +/// where each bench result can create their summary +pub enum BenchmarkSummary { + Parser(ParseMeasurement), + Formatter(FormatterMeasurement), } -pub fn get_code(lib: &str) -> Result<(String, String), String> { - let url = url::Url::from_str(lib).map_err(err_to_string)?; - let segments = url - .path_segments() - .ok_or_else(|| "lib url has no segments".to_string())?; - let filename = segments - .last() - .ok_or_else(|| "lib url has no segments".to_string())?; - - let mut file = PathBuf::from_str("target").map_err(err_to_string)?; - file.push(filename); - - match std::fs::read_to_string(&file) { - Ok(code) => { - println!("[{}] - using [{}]", filename.fg(red()), file.display()); - Ok((filename.to_string(), code)) +impl BenchmarkSummary { + pub fn summary(&self) -> String { + match self { + BenchmarkSummary::Parser(result) => result.summary(), + BenchmarkSummary::Formatter(result) => result.summary(), } - Err(_) => { - println!( - "[{}] - Downloading [{}] to [{}]", - filename, - lib, - file.display() - ); - match ureq::get(lib).call() { - Ok(response) => { - let mut reader = response.into_reader(); - - let _ = std::fs::remove_file(&file); - let mut writer = std::fs::File::create(&file).map_err(err_to_string)?; - let _ = std::io::copy(&mut reader, &mut writer); - - std::fs::read_to_string(&file) - .map_err(err_to_string) - .map(|code| (filename.to_string(), code)) - } - Err(e) => Err(format!("{:?}", e)), - } + } +} + +impl Display for BenchmarkSummary { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + BenchmarkSummary::Parser(result) => std::fmt::Display::fmt(&result, f), + BenchmarkSummary::Formatter(result) => std::fmt::Display::fmt(&result, f), } } } -pub fn run(filter: String, criterion: bool, baseline: Option) { +fn err_to_string(e: E) -> String { + format!("{:?}", e) +} + +pub fn run(filter: String, criterion: bool, baseline: Option, feature: FeatureToBenchmark) { let regex = regex::Regex::new(filter.as_str()).unwrap(); let libs = include_str!("libs.txt").lines(); @@ -109,20 +95,44 @@ pub fn run(filter: String, criterion: bool, baseline: Option) { if let Some(ref baseline) = baseline { criterion = criterion.save_baseline(baseline.to_string()); } - let mut group = criterion.benchmark_group("parser"); + let mut group = criterion.benchmark_group(feature.to_string()); group.throughput(criterion::Throughput::Bytes(code.len() as u64)); - group.bench_function(&id, |b| { - b.iter(|| { - let _ = criterion::black_box(rslint_parser::parse_module(code, 0)); - }) + group.bench_function(&id, |b| match feature { + FeatureToBenchmark::Parser => b.iter(|| { + criterion::black_box(run_parse(code)); + }), + FeatureToBenchmark::Formatter => { + let syntax = Syntax::default().module(); + let root = parse(code, 0, syntax).syntax(); + b.iter(|| { + criterion::black_box(run_format(&root)); + }) + } }); group.finish(); } else { //warmup - rslint_parser::parse_module(code, 0); + match feature { + FeatureToBenchmark::Parser => { + run_parse(code); + } + FeatureToBenchmark::Formatter => { + let syntax = Syntax::default().module(); + let root = parse(code, 0, syntax).syntax(); + run_format(&root); + } + } } - let result = benchmark_lib(&id, code); + let result = match feature { + FeatureToBenchmark::Parser => benchmark_parse_lib(&id, code), + FeatureToBenchmark::Formatter => { + let syntax = Syntax::default().module(); + let root = parse(code, 0, syntax).syntax(); + benchmark_format_lib(&id, &root) + } + }; + summary.push(result.summary()); println!("Benchmark: {}", lib); @@ -138,99 +148,3 @@ pub fn run(filter: String, criterion: bool, baseline: Option) { println!("{}", l); } } - -fn benchmark_lib(id: &str, code: &str) -> BenchmarkResult { - #[cfg(feature = "dhat-on")] - println!("Start"); - #[cfg(feature = "dhat-on")] - let stats = dhat::get_stats().unwrap(); - - let tokenizer_timer = timing::start(); - let (tokens, mut diagnostics) = rslint_parser::tokenize(code, 0); - let tok_source = rslint_parser::TokenSource::new(code, &tokens); - let tokenization_duration = tokenizer_timer.stop(); - - #[cfg(feature = "dhat-on")] - println!("Tokenizer"); - #[cfg(feature = "dhat-on")] - let stats = print_diff(stats, dhat::get_stats().unwrap()); - - let parser_timer = timing::start(); - let (events, parsing_diags, tokens) = { - let mut parser = - rslint_parser::Parser::new(tok_source, 0, rslint_parser::Syntax::default().script()); - rslint_parser::syntax::program::parse(&mut parser); - let (events, parsing_diags) = parser.finish(); - (events, parsing_diags, tokens) - }; - let parse_duration = parser_timer.stop(); - - #[cfg(feature = "dhat-on")] - println!("Parsed"); - #[cfg(feature = "dhat-on")] - let stats = print_diff(stats, dhat::get_stats().unwrap()); - - let tree_sink_timer = timing::start(); - let mut tree_sink = rslint_parser::LosslessTreeSink::new(code, &tokens); - rslint_parser::process(&mut tree_sink, events, parsing_diags); - let (_green, sink_diags) = tree_sink.finish(); - let tree_sink_duration = tree_sink_timer.stop(); - - #[cfg(feature = "dhat-on")] - println!("Tree-Sink"); - #[cfg(feature = "dhat-on")] - print_diff(stats, dhat::get_stats().unwrap()); - - diagnostics.extend(sink_diags); - BenchmarkResult { - id: id.to_string(), - tokenization: tokenization_duration, - parsing: parse_duration, - tree_sink: tree_sink_duration, - diagnostics, - } -} - -#[derive(Debug, Clone)] -struct BenchmarkResult { - id: String, - tokenization: Duration, - parsing: Duration, - tree_sink: Duration, - diagnostics: Vec, -} - -impl BenchmarkResult { - fn total(&self) -> Duration { - self.tokenization.add(self.parsing).add(self.tree_sink) - } - - fn summary(&self) -> String { - format!( - "{},Total Time,{:?},tokenization,{:?},parsing,{:?},tree_sink,{:?}", - self.id, - self.total(), - self.tokenization, - self.parsing, - self.tree_sink, - ) - } -} - -impl Display for BenchmarkResult { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - let _ = writeln!(f, "\tTokenization: {:>10?}", self.tokenization); - let _ = writeln!(f, "\tParsing: {:>10?}", self.parsing); - let _ = writeln!(f, "\tTree_sink: {:>10?}", self.tree_sink); - let _ = writeln!(f, "\t ----------"); - let _ = writeln!(f, "\tTotal: {:>10?}", self.total()); - - let _ = writeln!(f, "\tDiagnostics"); - let diagnostics = &self.diagnostics.iter().group_by(|x| x.severity); - for (severity, items) in diagnostics { - let _ = writeln!(f, "\t\t{:?}: {}", severity, items.count()); - } - - Ok(()) - } -} diff --git a/xtask/bench/src/main.rs b/xtask/bench/src/main.rs index c6e258f3eae..d01b6627b0a 100644 --- a/xtask/bench/src/main.rs +++ b/xtask/bench/src/main.rs @@ -1,7 +1,6 @@ use pico_args::Arguments; use xtask::{project_root, pushd, Result}; - -use xtask_bench::run; +use xtask_bench::{run, FeatureToBenchmark}; #[cfg(feature = "dhat-on")] use dhat::DhatAlloc; @@ -10,7 +9,7 @@ use dhat::DhatAlloc; #[global_allocator] static ALLOCATOR: DhatAlloc = DhatAlloc; -fn main() -> Result<()> { +fn main() -> Result<(), pico_args::Error> { #[cfg(feature = "dhat-on")] let dhat = dhat::Dhat::start_heap_profiling(); @@ -32,8 +31,9 @@ fn main() -> Result<()> { .unwrap() .unwrap_or(true); let baseline: Option = args.opt_value_from_str("--save-baseline").unwrap(); + // "feature" is a mandatory option and will throw an error if it's missing or incorrect + let feature: FeatureToBenchmark = args.value_from_str("--feature")?; - run(filter, criterion, baseline); - + run(filter, criterion, baseline, feature); Ok(()) } diff --git a/xtask/bench/src/utils.rs b/xtask/bench/src/utils.rs new file mode 100644 index 00000000000..ce4d4aff0a9 --- /dev/null +++ b/xtask/bench/src/utils.rs @@ -0,0 +1,48 @@ +use crate::err_to_string; +use ansi_rgb::{red, Foreground}; +use std::path::PathBuf; +use std::str::FromStr; + +pub fn get_code(lib: &str) -> Result<(String, String), String> { + let url = url::Url::from_str(lib).map_err(err_to_string)?; + let segments = url + .path_segments() + .ok_or_else(|| "lib url has no segments".to_string())?; + let filename = segments + .last() + .ok_or_else(|| "lib url has no segments".to_string())?; + + let mut file = PathBuf::from_str("target").map_err(err_to_string)?; + file.push(filename); + + match std::fs::read_to_string(&file) { + Ok(code) => { + println!("[{}] - using [{}]", filename.fg(red()), file.display()); + Ok((filename.to_string(), code)) + } + Err(_) => { + println!( + "[{}] - Downloading [{}] to [{}]", + filename, + lib, + file.display() + ); + match ureq::get(lib).call() { + Ok(response) => { + let mut reader = response.into_reader(); + + let mut writer = std::fs::File::create(&file).map_err(err_to_string)?; + if let Err(err) = std::io::copy(&mut reader, &mut writer) { + drop(writer); + std::fs::remove_file(&file).ok(); + return Err(err_to_string(err)); + } + std::fs::read_to_string(&file) + .map_err(err_to_string) + .map(|code| (filename.to_string(), code)) + } + Err(e) => Err(format!("{:?}", e)), + } + } + } +}