From 58240e23f7759d417a1be64db774eaf613f217d8 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Wed, 23 Feb 2022 04:47:44 +0100 Subject: [PATCH 01/12] benchmarks: Implement benchmarks for streamable --- benchmarks/block_store.py | 5 +- benchmarks/streamable.py | 213 ++++++++++++++++++++++++++++++++++++++ benchmarks/utils.py | 97 +++++++++++++++++ 3 files changed, 311 insertions(+), 4 deletions(-) create mode 100644 benchmarks/streamable.py diff --git a/benchmarks/block_store.py b/benchmarks/block_store.py index 2190412898a4..4169bf50ab06 100644 --- a/benchmarks/block_store.py +++ b/benchmarks/block_store.py @@ -6,6 +6,7 @@ import os import sys +from benchmarks.utils import clvm_generator from chia.util.db_wrapper import DBWrapper from chia.util.ints import uint128, uint64, uint32, uint8 from utils import ( @@ -36,10 +37,6 @@ random.seed(123456789) -with open("clvm_generator.bin", "rb") as f: - clvm_generator = f.read() - - async def run_add_block_benchmark(version: int): verbose: bool = "--verbose" in sys.argv diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py new file mode 100644 index 000000000000..14bfc507b2e9 --- /dev/null +++ b/benchmarks/streamable.py @@ -0,0 +1,213 @@ +from dataclasses import dataclass +from enum import Enum +from time import monotonic +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union + +import click +from utils import rand_bytes, rand_full_block, rand_hash + +from chia.types.blockchain_format.sized_bytes import bytes32 +from chia.types.full_block import FullBlock +from chia.util.ints import uint8, uint64 +from chia.util.streamable import Streamable, streamable + + +@dataclass(frozen=True) +@streamable +class BenchmarkInner(Streamable): + a: str + + +@dataclass(frozen=True) +@streamable +class BenchmarkMiddle(Streamable): + a: uint64 + b: List[bytes32] + c: Tuple[str, bool, uint8, List[bytes]] + d: Tuple[BenchmarkInner, BenchmarkInner] + e: BenchmarkInner + + +@dataclass(frozen=True) +@streamable +class BenchmarkClass(Streamable): + a: Optional[BenchmarkMiddle] + b: Optional[BenchmarkMiddle] + c: BenchmarkMiddle + d: List[BenchmarkMiddle] + e: Tuple[BenchmarkMiddle, BenchmarkMiddle, BenchmarkMiddle] + + +def get_random_inner() -> BenchmarkInner: + return BenchmarkInner(rand_bytes(20).hex()) + + +def get_random_middle() -> BenchmarkMiddle: + a: uint64 = uint64(10) + b: List[bytes32] = [rand_hash() for _ in range(a)] + c: Tuple[str, bool, uint8, List[bytes]] = ("benchmark", False, uint8(1), [rand_bytes(a) for _ in range(a)]) + d: Tuple[BenchmarkInner, BenchmarkInner] = (get_random_inner(), get_random_inner()) + e: BenchmarkInner = get_random_inner() + return BenchmarkMiddle(a, b, c, d, e) + + +def get_random_benchmark_object() -> BenchmarkClass: + a: Optional[BenchmarkMiddle] = None + b: Optional[BenchmarkMiddle] = get_random_middle() + c: BenchmarkMiddle = get_random_middle() + d: List[BenchmarkMiddle] = [get_random_middle() for _ in range(5)] + e: Tuple[BenchmarkMiddle, BenchmarkMiddle, BenchmarkMiddle] = ( + get_random_middle(), + get_random_middle(), + get_random_middle(), + ) + return BenchmarkClass(a, b, c, d, e) + + +def print_row( + runs: Union[str, int], iterations: Union[str, int], mode: str, duration: Union[str, int], end: str = "\n" +) -> None: + runs = "{0:<10}".format(f"{runs}") + iterations = "{0:<14}".format(f"{iterations}") + mode = "{0:<10}".format(f"{mode}") + duration = "{0:>13}".format(f"{duration}") + print(f"{runs} | {iterations} | {mode} | {duration}", end=end) + + +def benchmark_object_creation(iterations: int, class_generator: Callable[[], Any]) -> float: + start = monotonic() + obj = class_generator() + cls = type(obj) + for i in range(iterations): + cls(**obj.__dict__) + return monotonic() - start + + +def benchmark_conversion( + iterations: int, + class_generator: Callable[[], Any], + conversion_cb: Callable[[Any], Any], + preparation_cb: Optional[Callable[[Any], Any]] = None, +) -> float: + obj = class_generator() + start = monotonic() + prepared_data = obj + if preparation_cb is not None: + prepared_data = preparation_cb(obj) + for i in range(iterations): + conversion_cb(prepared_data) + return monotonic() - start + + +class Data(Enum): + all = 0 + benchmark = 1 + full_block = 2 + + +class Mode(Enum): + all = 0 + creation = 1 + to_bytes = 2 + from_bytes = 3 + to_json = 4 + from_json = 5 + + +def to_bytes(obj: Any) -> bytes: + return bytes(obj) + + +@dataclass +class ModeParameter: + iterations: int + conversion_cb: Optional[Callable[[Any], Any]] = None + preparation_cb: Optional[Callable[[Any], Any]] = None + + +@dataclass +class BenchmarkParameter: + data_class: Type[Any] + object_creation_cb: Callable[[], Any] + mode_parameter: Dict[Mode, ModeParameter] + + +benchmark_parameter: Dict[Data, BenchmarkParameter] = { + Data.benchmark: BenchmarkParameter( + BenchmarkClass, + get_random_benchmark_object, + { + Mode.creation: ModeParameter(58000), + Mode.to_bytes: ModeParameter(2200, to_bytes), + Mode.from_bytes: ModeParameter(3600, BenchmarkClass.from_bytes, to_bytes), + Mode.to_json: ModeParameter(1100, BenchmarkClass.to_json_dict), + Mode.from_json: ModeParameter(930, BenchmarkClass.from_json_dict, BenchmarkClass.to_json_dict), + }, + ), + Data.full_block: BenchmarkParameter( + FullBlock, + rand_full_block, + { + Mode.creation: ModeParameter(43000), + Mode.to_bytes: ModeParameter(9650, to_bytes), + Mode.from_bytes: ModeParameter(365, FullBlock.from_bytes, to_bytes), + Mode.to_json: ModeParameter(2400, FullBlock.to_json_dict), + Mode.from_json: ModeParameter(335, FullBlock.from_json_dict, FullBlock.to_json_dict), + }, + ), +} + + +def run_benchmarks(data: Data, mode: Mode, runs: int, multiplier: int) -> None: + results: Dict[Data, Dict[Mode, List[int]]] = {} + for current_data, parameter in benchmark_parameter.items(): + results[current_data] = {} + if data == Data.all or current_data == data: + print(f"\nRun {mode.name} benchmarks with the class: {parameter.data_class.__name__}") + print_row("runs", "iterations/run", "mode", "result [ms]") + for current_mode, mode_parameter in parameter.mode_parameter.items(): + results[current_data][current_mode] = [] + if mode == Mode.all or current_mode == mode: + duration: float + iterations: int = mode_parameter.iterations * multiplier + for _ in range(max(1, runs)): + if current_mode == Mode.creation: + duration = benchmark_object_creation(iterations, parameter.object_creation_cb) + else: + assert mode_parameter.conversion_cb is not None + duration = benchmark_conversion( + iterations, + parameter.object_creation_cb, + mode_parameter.conversion_cb, + mode_parameter.preparation_cb, + ) + current_duration: int = int(duration * 1000) + results[current_data][current_mode].append(current_duration) + print_row("last", iterations, current_mode.name, current_duration, "\r") + average_duration: int = int(sum(results[current_data][current_mode]) / runs) + print_row(runs, iterations, current_mode.name, average_duration) + + +data_option_help: str = "|".join([d.name for d in Data]) +mode_option_help: str = "|".join([m.name for m in Mode]) + + +@click.command() +@click.option("-d", "--data", default=Data.all.name, help=data_option_help) +@click.option("-m", "--mode", default=Mode.all.name, help=mode_option_help) +@click.option("-r", "--runs", default=5, help="Number of benchmark runs to average results") +@click.option("-n", "--multiplier", default=1, help="Multiplier for iterations/run") +def run(data: str, mode: str, runs: int, multiplier: int) -> None: + try: + Data[data] + except Exception: + raise click.BadOptionUsage("data", f"{data} is not a valid data option. Select one from: " + data_option_help) + try: + Mode[mode] + except Exception: + raise click.BadOptionUsage("mode", f"{mode} is not a valid mode option. Select one from: " + mode_option_help) + run_benchmarks(Data[data], Mode[mode], runs, multiplier) + + +if __name__ == "__main__": + run() # pylint: disable = no-value-for-parameter diff --git a/benchmarks/utils.py b/benchmarks/utils.py index 4ecf9fb98939..73c97a939ee5 100644 --- a/benchmarks/utils.py +++ b/benchmarks/utils.py @@ -5,6 +5,13 @@ from chia.types.blockchain_format.coin import Coin from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.blockchain_format.vdf import VDFInfo, VDFProof +from chia.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo +from chia.types.blockchain_format.pool_target import PoolTarget +from chia.types.blockchain_format.program import SerializedProgram +from chia.types.blockchain_format.proof_of_space import ProofOfSpace +from chia.types.blockchain_format.reward_chain_block import RewardChainBlock +from chia.types.full_block import FullBlock +from chia.util.ints import uint128 from chia.util.db_wrapper import DBWrapper from typing import Tuple from pathlib import Path @@ -18,6 +25,9 @@ # farmer puzzle hash ph = bytes32(b"a" * 32) +with open(Path(os.path.realpath(__file__)).parent / "clvm_generator.bin", "rb") as f: + clvm_generator = f.read() + def rewards(height: uint32) -> Tuple[Coin, Coin]: farmer_coin = create_farmer_coin(height, ph, uint64(250000000), DEFAULT_CONSTANTS.GENESIS_CHALLENGE) @@ -66,6 +76,93 @@ def rand_vdf_proof() -> VDFProof: ) +def rand_full_block() -> FullBlock: + proof_of_space = ProofOfSpace( + rand_hash(), + rand_g1(), + None, + rand_g1(), + uint8(0), + rand_bytes(8 * 32), + ) + + reward_chain_block = RewardChainBlock( + uint128(1), + uint32(2), + uint128(3), + uint8(4), + rand_hash(), + proof_of_space, + None, + rand_g2(), + rand_vdf(), + None, + rand_g2(), + rand_vdf(), + rand_vdf(), + True, + ) + + pool_target = PoolTarget( + rand_hash(), + uint32(0), + ) + + foliage_block_data = FoliageBlockData( + rand_hash(), + pool_target, + rand_g2(), + rand_hash(), + rand_hash(), + ) + + foliage = Foliage( + rand_hash(), + rand_hash(), + foliage_block_data, + rand_g2(), + rand_hash(), + rand_g2(), + ) + + foliage_transaction_block = FoliageTransactionBlock( + rand_hash(), + uint64(0), + rand_hash(), + rand_hash(), + rand_hash(), + rand_hash(), + ) + + farmer_coin, pool_coin = rewards(uint32(0)) + + transactions_info = TransactionsInfo( + rand_hash(), + rand_hash(), + rand_g2(), + uint64(0), + uint64(1), + [farmer_coin, pool_coin], + ) + + full_block = FullBlock( + [], + reward_chain_block, + rand_vdf_proof(), + rand_vdf_proof(), + rand_vdf_proof(), + rand_vdf_proof(), + rand_vdf_proof(), + foliage, + foliage_transaction_block, + transactions_info, + SerializedProgram.from_bytes(clvm_generator), + [], + ) + + return full_block + + async def setup_db(name: str, db_version: int) -> DBWrapper: db_filename = Path(name) try: From 06356bcfcefaf6a59e8e0e491411630c87d897df Mon Sep 17 00:00:00 2001 From: xdustinface Date: Wed, 23 Feb 2022 16:51:50 +0100 Subject: [PATCH 02/12] benchmarks: Collect iterations per time instead of time per iterations --- benchmarks/streamable.py | 79 ++++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 36 deletions(-) diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py index 14bfc507b2e9..704ce1e6842c 100644 --- a/benchmarks/streamable.py +++ b/benchmarks/streamable.py @@ -68,7 +68,7 @@ def print_row( runs: Union[str, int], iterations: Union[str, int], mode: str, duration: Union[str, int], end: str = "\n" ) -> None: runs = "{0:<10}".format(f"{runs}") - iterations = "{0:<14}".format(f"{iterations}") + iterations = "{0:<10}".format(f"{iterations}") mode = "{0:<10}".format(f"{mode}") duration = "{0:>13}".format(f"{duration}") print(f"{runs} | {iterations} | {mode} | {duration}", end=end) @@ -120,8 +120,7 @@ def to_bytes(obj: Any) -> bytes: @dataclass class ModeParameter: - iterations: int - conversion_cb: Optional[Callable[[Any], Any]] = None + conversion_cb: Callable[[Any], Any] preparation_cb: Optional[Callable[[Any], Any]] = None @@ -129,7 +128,7 @@ class ModeParameter: class BenchmarkParameter: data_class: Type[Any] object_creation_cb: Callable[[], Any] - mode_parameter: Dict[Mode, ModeParameter] + mode_parameter: Dict[Mode, Optional[ModeParameter]] benchmark_parameter: Dict[Data, BenchmarkParameter] = { @@ -137,55 +136,63 @@ class BenchmarkParameter: BenchmarkClass, get_random_benchmark_object, { - Mode.creation: ModeParameter(58000), - Mode.to_bytes: ModeParameter(2200, to_bytes), - Mode.from_bytes: ModeParameter(3600, BenchmarkClass.from_bytes, to_bytes), - Mode.to_json: ModeParameter(1100, BenchmarkClass.to_json_dict), - Mode.from_json: ModeParameter(930, BenchmarkClass.from_json_dict, BenchmarkClass.to_json_dict), + Mode.creation: None, + Mode.to_bytes: ModeParameter(to_bytes), + Mode.from_bytes: ModeParameter(BenchmarkClass.from_bytes, to_bytes), + Mode.to_json: ModeParameter(BenchmarkClass.to_json_dict), + Mode.from_json: ModeParameter(BenchmarkClass.from_json_dict, BenchmarkClass.to_json_dict), }, ), Data.full_block: BenchmarkParameter( FullBlock, rand_full_block, { - Mode.creation: ModeParameter(43000), - Mode.to_bytes: ModeParameter(9650, to_bytes), - Mode.from_bytes: ModeParameter(365, FullBlock.from_bytes, to_bytes), - Mode.to_json: ModeParameter(2400, FullBlock.to_json_dict), - Mode.from_json: ModeParameter(335, FullBlock.from_json_dict, FullBlock.to_json_dict), + Mode.creation: None, + Mode.to_bytes: ModeParameter(to_bytes), + Mode.from_bytes: ModeParameter(FullBlock.from_bytes, to_bytes), + Mode.to_json: ModeParameter(FullBlock.to_json_dict), + Mode.from_json: ModeParameter(FullBlock.from_json_dict, FullBlock.to_json_dict), }, ), } -def run_benchmarks(data: Data, mode: Mode, runs: int, multiplier: int) -> None: +def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> int: + iterations: int = 0 + start = monotonic() + while int((monotonic() - start) * 1000) < ms_to_run: + cb() + iterations += 1 + return iterations + + +def run_benchmarks(data: Data, mode: Mode, runs: int, milliseconds: int) -> None: results: Dict[Data, Dict[Mode, List[int]]] = {} for current_data, parameter in benchmark_parameter.items(): results[current_data] = {} if data == Data.all or current_data == data: print(f"\nRun {mode.name} benchmarks with the class: {parameter.data_class.__name__}") - print_row("runs", "iterations/run", "mode", "result [ms]") - for current_mode, mode_parameter in parameter.mode_parameter.items(): + print_row("runs", "ms/run", "mode", "avg iterations") + for current_mode, current_mode_parameter in parameter.mode_parameter.items(): results[current_data][current_mode] = [] if mode == Mode.all or current_mode == mode: - duration: float - iterations: int = mode_parameter.iterations * multiplier + iterations: int for _ in range(max(1, runs)): + obj = parameter.object_creation_cb() if current_mode == Mode.creation: - duration = benchmark_object_creation(iterations, parameter.object_creation_cb) + cls = type(obj) + iterations = run_for_ms(lambda: cls(**obj.__dict__), milliseconds) else: - assert mode_parameter.conversion_cb is not None - duration = benchmark_conversion( - iterations, - parameter.object_creation_cb, - mode_parameter.conversion_cb, - mode_parameter.preparation_cb, - ) - current_duration: int = int(duration * 1000) - results[current_data][current_mode].append(current_duration) - print_row("last", iterations, current_mode.name, current_duration, "\r") - average_duration: int = int(sum(results[current_data][current_mode]) / runs) - print_row(runs, iterations, current_mode.name, average_duration) + assert current_mode_parameter is not None + conversion_cb = current_mode_parameter.conversion_cb + assert conversion_cb is not None + if current_mode_parameter.preparation_cb is not None: + obj = current_mode_parameter.preparation_cb(obj) + iterations = run_for_ms(lambda: conversion_cb(obj), milliseconds) + results[current_data][current_mode].append(iterations) + print_row("last", milliseconds, current_mode.name, iterations, "\r") + average_iterations: int = int(sum(results[current_data][current_mode]) / runs) + print_row(runs, milliseconds, current_mode.name, average_iterations) data_option_help: str = "|".join([d.name for d in Data]) @@ -195,9 +202,9 @@ def run_benchmarks(data: Data, mode: Mode, runs: int, multiplier: int) -> None: @click.command() @click.option("-d", "--data", default=Data.all.name, help=data_option_help) @click.option("-m", "--mode", default=Mode.all.name, help=mode_option_help) -@click.option("-r", "--runs", default=5, help="Number of benchmark runs to average results") -@click.option("-n", "--multiplier", default=1, help="Multiplier for iterations/run") -def run(data: str, mode: str, runs: int, multiplier: int) -> None: +@click.option("-r", "--runs", default=50, help="Number of benchmark runs to average results") +@click.option("-t", "--ms", default=50, help="Milliseconds per run") +def run(data: str, mode: str, runs: int, ms: int) -> None: try: Data[data] except Exception: @@ -206,7 +213,7 @@ def run(data: str, mode: str, runs: int, multiplier: int) -> None: Mode[mode] except Exception: raise click.BadOptionUsage("mode", f"{mode} is not a valid mode option. Select one from: " + mode_option_help) - run_benchmarks(Data[data], Mode[mode], runs, multiplier) + run_benchmarks(Data[data], Mode[mode], runs, ms) if __name__ == "__main__": From 54341b403ff31ed170b61ed0c8c9534e08cb5673 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Wed, 23 Feb 2022 19:06:26 +0100 Subject: [PATCH 03/12] benchmarks: Add standard deviation to streamable benchs --- benchmarks/streamable.py | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py index 704ce1e6842c..3d298906fb63 100644 --- a/benchmarks/streamable.py +++ b/benchmarks/streamable.py @@ -1,5 +1,6 @@ from dataclasses import dataclass from enum import Enum +from statistics import stdev from time import monotonic from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union @@ -65,13 +66,19 @@ def get_random_benchmark_object() -> BenchmarkClass: def print_row( - runs: Union[str, int], iterations: Union[str, int], mode: str, duration: Union[str, int], end: str = "\n" + runs: Union[str, int], + iterations: Union[str, int], + mode: str, + duration: Union[str, int], + std_deviation: Union[str, float], + end: str = "\n", ) -> None: runs = "{0:<10}".format(f"{runs}") iterations = "{0:<10}".format(f"{iterations}") mode = "{0:<10}".format(f"{mode}") - duration = "{0:>13}".format(f"{duration}") - print(f"{runs} | {iterations} | {mode} | {duration}", end=end) + duration = "{0:>14}".format(f"{duration}") + std_deviation = "{0:>13}".format(f"{std_deviation}") + print(f"{runs} | {iterations} | {mode} | {duration} | {std_deviation}", end=end) def benchmark_object_creation(iterations: int, class_generator: Callable[[], Any]) -> float: @@ -166,17 +173,22 @@ def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> int: return iterations +def calc_stdev(iterations: List[int]) -> float: + return 0 if len(iterations) < 2 else int(stdev(iterations) * 100) / 100 + + def run_benchmarks(data: Data, mode: Mode, runs: int, milliseconds: int) -> None: results: Dict[Data, Dict[Mode, List[int]]] = {} for current_data, parameter in benchmark_parameter.items(): results[current_data] = {} if data == Data.all or current_data == data: print(f"\nRun {mode.name} benchmarks with the class: {parameter.data_class.__name__}") - print_row("runs", "ms/run", "mode", "avg iterations") + print_row("runs", "ms/run", "mode", "avg iterations", "std deviation") for current_mode, current_mode_parameter in parameter.mode_parameter.items(): results[current_data][current_mode] = [] if mode == Mode.all or current_mode == mode: iterations: int + all_iterations: List[int] = results[current_data][current_mode] for _ in range(max(1, runs)): obj = parameter.object_creation_cb() if current_mode == Mode.creation: @@ -189,10 +201,10 @@ def run_benchmarks(data: Data, mode: Mode, runs: int, milliseconds: int) -> None if current_mode_parameter.preparation_cb is not None: obj = current_mode_parameter.preparation_cb(obj) iterations = run_for_ms(lambda: conversion_cb(obj), milliseconds) - results[current_data][current_mode].append(iterations) - print_row("last", milliseconds, current_mode.name, iterations, "\r") - average_iterations: int = int(sum(results[current_data][current_mode]) / runs) - print_row(runs, milliseconds, current_mode.name, average_iterations) + all_iterations.append(iterations) + print_row("last", milliseconds, current_mode.name, iterations, calc_stdev(all_iterations), "\r") + average_iterations: int = int(sum(all_iterations) / runs) + print_row(runs, milliseconds, current_mode.name, average_iterations, calc_stdev(all_iterations)) data_option_help: str = "|".join([d.name for d in Data]) From e4fbf73e401e11913e7ff419e2808f58e20d5ec3 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Wed, 23 Feb 2022 21:28:38 +0100 Subject: [PATCH 04/12] benchmarks: Add ns/iteration to streamable benchs --- benchmarks/streamable.py | 79 +++++++++++++++++++++++++++------------- 1 file changed, 54 insertions(+), 25 deletions(-) diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py index 3d298906fb63..71e22fff0e38 100644 --- a/benchmarks/streamable.py +++ b/benchmarks/streamable.py @@ -1,6 +1,6 @@ from dataclasses import dataclass from enum import Enum -from statistics import stdev +from statistics import mean, stdev from time import monotonic from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union @@ -66,19 +66,22 @@ def get_random_benchmark_object() -> BenchmarkClass: def print_row( + *, runs: Union[str, int], - iterations: Union[str, int], + ms_per_run: Union[str, int], + ns_per_iteration: Union[str, int], mode: str, - duration: Union[str, int], - std_deviation: Union[str, float], + avg_iterations: Union[str, int], + stdev_iterations: Union[str, float], end: str = "\n", ) -> None: runs = "{0:<10}".format(f"{runs}") - iterations = "{0:<10}".format(f"{iterations}") + ms_per_run = "{0:<10}".format(f"{ms_per_run}") + ns_per_iteration = "{0:<12}".format(f"{ns_per_iteration}") mode = "{0:<10}".format(f"{mode}") - duration = "{0:>14}".format(f"{duration}") - std_deviation = "{0:>13}".format(f"{std_deviation}") - print(f"{runs} | {iterations} | {mode} | {duration} | {std_deviation}", end=end) + avg_iterations = "{0:>14}".format(f"{avg_iterations}") + stdev_iterations = "{0:>13}".format(f"{stdev_iterations}") + print(f"{runs} | {ms_per_run} | {ns_per_iteration} | {mode} | {avg_iterations} | {stdev_iterations}", end=end) def benchmark_object_creation(iterations: int, class_generator: Callable[[], Any]) -> float: @@ -164,13 +167,15 @@ class BenchmarkParameter: } -def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> int: - iterations: int = 0 +def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> List[int]: + ns_iteration_results: List[int] = [] start = monotonic() while int((monotonic() - start) * 1000) < ms_to_run: + start_iteration = monotonic() cb() - iterations += 1 - return iterations + stop_iteration = monotonic() + ns_iteration_results.append(int((stop_iteration - start_iteration) * 1000 * 1000)) + return ns_iteration_results def calc_stdev(iterations: List[int]) -> float: @@ -178,33 +183,57 @@ def calc_stdev(iterations: List[int]) -> float: def run_benchmarks(data: Data, mode: Mode, runs: int, milliseconds: int) -> None: - results: Dict[Data, Dict[Mode, List[int]]] = {} + results: Dict[Data, Dict[Mode, List[List[int]]]] = {} for current_data, parameter in benchmark_parameter.items(): results[current_data] = {} if data == Data.all or current_data == data: print(f"\nRun {mode.name} benchmarks with the class: {parameter.data_class.__name__}") - print_row("runs", "ms/run", "mode", "avg iterations", "std deviation") + print_row( + runs="runs", + ms_per_run="ms/run", + ns_per_iteration="ns/iteration", + mode="mode", + avg_iterations="avg iterations", + stdev_iterations="stdev iterations", + ) for current_mode, current_mode_parameter in parameter.mode_parameter.items(): results[current_data][current_mode] = [] if mode == Mode.all or current_mode == mode: - iterations: int - all_iterations: List[int] = results[current_data][current_mode] - for _ in range(max(1, runs)): + ns_iteration_results: List[int] + all_results: List[List[int]] = results[current_data][current_mode] + + def print_results(print_run: int, final: bool) -> None: + total_iterations: int = sum(len(x) for x in all_results) + total_elapsed_ns: int = sum(sum(x) for x in all_results) + print_row( + runs=print_run if final else "current", + ms_per_run=int(mean(sum(x) for x in all_results) / 1000), + ns_per_iteration=int(total_elapsed_ns / total_iterations), + mode=current_mode.name, + avg_iterations=int(total_iterations / print_run), + stdev_iterations=calc_stdev([len(x) for x in all_results]), + end="\n" if final else "\r", + ) + + current_run: int = 0 + while current_run < runs: + current_run += 1 obj = parameter.object_creation_cb() if current_mode == Mode.creation: cls = type(obj) - iterations = run_for_ms(lambda: cls(**obj.__dict__), milliseconds) + ns_iteration_results = run_for_ms(lambda: cls(**obj.__dict__), milliseconds) else: assert current_mode_parameter is not None conversion_cb = current_mode_parameter.conversion_cb assert conversion_cb is not None + obj = parameter.object_creation_cb() if current_mode_parameter.preparation_cb is not None: obj = current_mode_parameter.preparation_cb(obj) - iterations = run_for_ms(lambda: conversion_cb(obj), milliseconds) - all_iterations.append(iterations) - print_row("last", milliseconds, current_mode.name, iterations, calc_stdev(all_iterations), "\r") - average_iterations: int = int(sum(all_iterations) / runs) - print_row(runs, milliseconds, current_mode.name, average_iterations, calc_stdev(all_iterations)) + ns_iteration_results = run_for_ms(lambda: conversion_cb(obj), milliseconds) + all_results.append(ns_iteration_results) + print_results(current_run, False) + assert current_run == runs + print_results(runs, True) data_option_help: str = "|".join([d.name for d in Data]) @@ -214,7 +243,7 @@ def run_benchmarks(data: Data, mode: Mode, runs: int, milliseconds: int) -> None @click.command() @click.option("-d", "--data", default=Data.all.name, help=data_option_help) @click.option("-m", "--mode", default=Mode.all.name, help=mode_option_help) -@click.option("-r", "--runs", default=50, help="Number of benchmark runs to average results") +@click.option("-r", "--runs", default=100, help="Number of benchmark runs to average results") @click.option("-t", "--ms", default=50, help="Milliseconds per run") def run(data: str, mode: str, runs: int, ms: int) -> None: try: @@ -225,7 +254,7 @@ def run(data: str, mode: str, runs: int, ms: int) -> None: Mode[mode] except Exception: raise click.BadOptionUsage("mode", f"{mode} is not a valid mode option. Select one from: " + mode_option_help) - run_benchmarks(Data[data], Mode[mode], runs, ms) + run_benchmarks(Data[data], Mode[mode], max(1, runs), ms) if __name__ == "__main__": From 2c931a98d01498eea67e570624c536ccde4759c9 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Wed, 23 Feb 2022 23:03:11 +0100 Subject: [PATCH 05/12] benchmarks: Move object creation out or the runs loop --- benchmarks/streamable.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py index 71e22fff0e38..aa49d3493db9 100644 --- a/benchmarks/streamable.py +++ b/benchmarks/streamable.py @@ -201,6 +201,7 @@ def run_benchmarks(data: Data, mode: Mode, runs: int, milliseconds: int) -> None if mode == Mode.all or current_mode == mode: ns_iteration_results: List[int] all_results: List[List[int]] = results[current_data][current_mode] + obj = parameter.object_creation_cb() def print_results(print_run: int, final: bool) -> None: total_iterations: int = sum(len(x) for x in all_results) @@ -218,7 +219,7 @@ def print_results(print_run: int, final: bool) -> None: current_run: int = 0 while current_run < runs: current_run += 1 - obj = parameter.object_creation_cb() + if current_mode == Mode.creation: cls = type(obj) ns_iteration_results = run_for_ms(lambda: cls(**obj.__dict__), milliseconds) @@ -226,10 +227,10 @@ def print_results(print_run: int, final: bool) -> None: assert current_mode_parameter is not None conversion_cb = current_mode_parameter.conversion_cb assert conversion_cb is not None - obj = parameter.object_creation_cb() + prepared_obj = parameter.object_creation_cb() if current_mode_parameter.preparation_cb is not None: - obj = current_mode_parameter.preparation_cb(obj) - ns_iteration_results = run_for_ms(lambda: conversion_cb(obj), milliseconds) + prepared_obj = current_mode_parameter.preparation_cb(obj) + ns_iteration_results = run_for_ms(lambda: conversion_cb(prepared_obj), milliseconds) all_results.append(ns_iteration_results) print_results(current_run, False) assert current_run == runs From ecb1bf56a81253e1038099ee41f56620e0926374 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Thu, 24 Feb 2022 00:13:51 +0100 Subject: [PATCH 06/12] benchmarks: Use `click.Choice` for `--data` and `--mode` --- benchmarks/streamable.py | 58 +++++++++++++++------------------------- benchmarks/utils.py | 12 +++++++++ 2 files changed, 34 insertions(+), 36 deletions(-) diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py index aa49d3493db9..69bc67e492da 100644 --- a/benchmarks/streamable.py +++ b/benchmarks/streamable.py @@ -5,7 +5,7 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union import click -from utils import rand_bytes, rand_full_block, rand_hash +from utils import EnumType, rand_bytes, rand_full_block, rand_hash from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.full_block import FullBlock @@ -109,19 +109,21 @@ def benchmark_conversion( return monotonic() - start -class Data(Enum): - all = 0 - benchmark = 1 - full_block = 2 +# The strings in this Enum are by purpose. See benchmark.utils.EnumType. +class Data(str, Enum): + all = "all" + benchmark = "benchmark" + full_block = "full_block" -class Mode(Enum): - all = 0 - creation = 1 - to_bytes = 2 - from_bytes = 3 - to_json = 4 - from_json = 5 +# The strings in this Enum are by purpose. See benchmark.utils.EnumType. +class Mode(str, Enum): + all = "all" + creation = "creation" + to_bytes = "to_bytes" + from_bytes = "from_bytes" + to_json = "to_json" + from_json = "from_json" def to_bytes(obj: Any) -> bytes: @@ -182,7 +184,12 @@ def calc_stdev(iterations: List[int]) -> float: return 0 if len(iterations) < 2 else int(stdev(iterations) * 100) / 100 -def run_benchmarks(data: Data, mode: Mode, runs: int, milliseconds: int) -> None: +@click.command() +@click.option("-d", "--data", default=Data.all, type=EnumType(Data)) +@click.option("-m", "--mode", default=Mode.all, type=EnumType(Mode)) +@click.option("-r", "--runs", default=100, help="Number of benchmark runs to average results") +@click.option("-t", "--ms", default=50, help="Milliseconds per run") +def run(data: Data, mode: Mode, runs: int, ms: int) -> None: results: Dict[Data, Dict[Mode, List[List[int]]]] = {} for current_data, parameter in benchmark_parameter.items(): results[current_data] = {} @@ -222,7 +229,7 @@ def print_results(print_run: int, final: bool) -> None: if current_mode == Mode.creation: cls = type(obj) - ns_iteration_results = run_for_ms(lambda: cls(**obj.__dict__), milliseconds) + ns_iteration_results = run_for_ms(lambda: cls(**obj.__dict__), ms) else: assert current_mode_parameter is not None conversion_cb = current_mode_parameter.conversion_cb @@ -230,33 +237,12 @@ def print_results(print_run: int, final: bool) -> None: prepared_obj = parameter.object_creation_cb() if current_mode_parameter.preparation_cb is not None: prepared_obj = current_mode_parameter.preparation_cb(obj) - ns_iteration_results = run_for_ms(lambda: conversion_cb(prepared_obj), milliseconds) + ns_iteration_results = run_for_ms(lambda: conversion_cb(prepared_obj), ms) all_results.append(ns_iteration_results) print_results(current_run, False) assert current_run == runs print_results(runs, True) -data_option_help: str = "|".join([d.name for d in Data]) -mode_option_help: str = "|".join([m.name for m in Mode]) - - -@click.command() -@click.option("-d", "--data", default=Data.all.name, help=data_option_help) -@click.option("-m", "--mode", default=Mode.all.name, help=mode_option_help) -@click.option("-r", "--runs", default=100, help="Number of benchmark runs to average results") -@click.option("-t", "--ms", default=50, help="Milliseconds per run") -def run(data: str, mode: str, runs: int, ms: int) -> None: - try: - Data[data] - except Exception: - raise click.BadOptionUsage("data", f"{data} is not a valid data option. Select one from: " + data_option_help) - try: - Mode[mode] - except Exception: - raise click.BadOptionUsage("mode", f"{mode} is not a valid mode option. Select one from: " + mode_option_help) - run_benchmarks(Data[data], Mode[mode], max(1, runs), ms) - - if __name__ == "__main__": run() # pylint: disable = no-value-for-parameter diff --git a/benchmarks/utils.py b/benchmarks/utils.py index 73c97a939ee5..3f3604bd750a 100644 --- a/benchmarks/utils.py +++ b/benchmarks/utils.py @@ -17,6 +17,7 @@ from pathlib import Path from datetime import datetime import aiosqlite +import click import os import sys import random @@ -29,6 +30,17 @@ clvm_generator = f.read() +# Workaround to allow `Enum` with click.Choice: https://github.com/pallets/click/issues/605#issuecomment-901099036 +class EnumType(click.Choice): + def __init__(self, enum, case_sensitive=False): + self.__enum = enum + super().__init__(choices=[item.value for item in enum], case_sensitive=case_sensitive) + + def convert(self, value, param, ctx): + converted_str = super().convert(value, param, ctx) + return self.__enum(converted_str) + + def rewards(height: uint32) -> Tuple[Coin, Coin]: farmer_coin = create_farmer_coin(height, ph, uint64(250000000), DEFAULT_CONSTANTS.GENESIS_CHALLENGE) pool_coin = create_pool_coin(height, ph, uint64(1750000000), DEFAULT_CONSTANTS.GENESIS_CHALLENGE) From 8cabece10a88c0b6ff2d0d5ec42895d9aa3d2255 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Thu, 24 Feb 2022 01:26:02 +0100 Subject: [PATCH 07/12] =?UTF-8?q?benchmarks:=20Its=20=C2=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- benchmarks/streamable.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py index 69bc67e492da..51b42a56f68a 100644 --- a/benchmarks/streamable.py +++ b/benchmarks/streamable.py @@ -69,7 +69,7 @@ def print_row( *, runs: Union[str, int], ms_per_run: Union[str, int], - ns_per_iteration: Union[str, int], + us_per_iteration: Union[str, int], mode: str, avg_iterations: Union[str, int], stdev_iterations: Union[str, float], @@ -77,11 +77,11 @@ def print_row( ) -> None: runs = "{0:<10}".format(f"{runs}") ms_per_run = "{0:<10}".format(f"{ms_per_run}") - ns_per_iteration = "{0:<12}".format(f"{ns_per_iteration}") + us_per_iteration = "{0:<12}".format(f"{us_per_iteration}") mode = "{0:<10}".format(f"{mode}") avg_iterations = "{0:>14}".format(f"{avg_iterations}") stdev_iterations = "{0:>13}".format(f"{stdev_iterations}") - print(f"{runs} | {ms_per_run} | {ns_per_iteration} | {mode} | {avg_iterations} | {stdev_iterations}", end=end) + print(f"{runs} | {ms_per_run} | {us_per_iteration} | {mode} | {avg_iterations} | {stdev_iterations}", end=end) def benchmark_object_creation(iterations: int, class_generator: Callable[[], Any]) -> float: @@ -170,14 +170,14 @@ class BenchmarkParameter: def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> List[int]: - ns_iteration_results: List[int] = [] + us_iteration_results: List[int] = [] start = monotonic() while int((monotonic() - start) * 1000) < ms_to_run: start_iteration = monotonic() cb() stop_iteration = monotonic() - ns_iteration_results.append(int((stop_iteration - start_iteration) * 1000 * 1000)) - return ns_iteration_results + us_iteration_results.append(int((stop_iteration - start_iteration) * 1000 * 1000)) + return us_iteration_results def calc_stdev(iterations: List[int]) -> float: @@ -198,7 +198,7 @@ def run(data: Data, mode: Mode, runs: int, ms: int) -> None: print_row( runs="runs", ms_per_run="ms/run", - ns_per_iteration="ns/iteration", + us_per_iteration="µs/iteration", mode="mode", avg_iterations="avg iterations", stdev_iterations="stdev iterations", @@ -206,17 +206,17 @@ def run(data: Data, mode: Mode, runs: int, ms: int) -> None: for current_mode, current_mode_parameter in parameter.mode_parameter.items(): results[current_data][current_mode] = [] if mode == Mode.all or current_mode == mode: - ns_iteration_results: List[int] + us_iteration_results: List[int] all_results: List[List[int]] = results[current_data][current_mode] obj = parameter.object_creation_cb() def print_results(print_run: int, final: bool) -> None: total_iterations: int = sum(len(x) for x in all_results) - total_elapsed_ns: int = sum(sum(x) for x in all_results) + total_elapsed_us: int = sum(sum(x) for x in all_results) print_row( runs=print_run if final else "current", ms_per_run=int(mean(sum(x) for x in all_results) / 1000), - ns_per_iteration=int(total_elapsed_ns / total_iterations), + us_per_iteration=int(total_elapsed_us / total_iterations), mode=current_mode.name, avg_iterations=int(total_iterations / print_run), stdev_iterations=calc_stdev([len(x) for x in all_results]), @@ -229,7 +229,7 @@ def print_results(print_run: int, final: bool) -> None: if current_mode == Mode.creation: cls = type(obj) - ns_iteration_results = run_for_ms(lambda: cls(**obj.__dict__), ms) + us_iteration_results = run_for_ms(lambda: cls(**obj.__dict__), ms) else: assert current_mode_parameter is not None conversion_cb = current_mode_parameter.conversion_cb @@ -237,8 +237,8 @@ def print_results(print_run: int, final: bool) -> None: prepared_obj = parameter.object_creation_cb() if current_mode_parameter.preparation_cb is not None: prepared_obj = current_mode_parameter.preparation_cb(obj) - ns_iteration_results = run_for_ms(lambda: conversion_cb(prepared_obj), ms) - all_results.append(ns_iteration_results) + us_iteration_results = run_for_ms(lambda: conversion_cb(prepared_obj), ms) + all_results.append(us_iteration_results) print_results(current_run, False) assert current_run == runs print_results(runs, True) From ae28873762c502968d57430076bdfd3ba1accc9f Mon Sep 17 00:00:00 2001 From: xdustinface Date: Thu, 24 Feb 2022 01:48:52 +0100 Subject: [PATCH 08/12] benchmarks: Improve logging --- benchmarks/streamable.py | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py index 51b42a56f68a..ea074e6eb759 100644 --- a/benchmarks/streamable.py +++ b/benchmarks/streamable.py @@ -67,21 +67,17 @@ def get_random_benchmark_object() -> BenchmarkClass: def print_row( *, - runs: Union[str, int], - ms_per_run: Union[str, int], - us_per_iteration: Union[str, int], mode: str, + us_per_iteration: Union[str, int], avg_iterations: Union[str, int], stdev_iterations: Union[str, float], end: str = "\n", ) -> None: - runs = "{0:<10}".format(f"{runs}") - ms_per_run = "{0:<10}".format(f"{ms_per_run}") - us_per_iteration = "{0:<12}".format(f"{us_per_iteration}") mode = "{0:<10}".format(f"{mode}") + us_per_iteration = "{0:<12}".format(f"{us_per_iteration}") avg_iterations = "{0:>14}".format(f"{avg_iterations}") - stdev_iterations = "{0:>13}".format(f"{stdev_iterations}") - print(f"{runs} | {ms_per_run} | {us_per_iteration} | {mode} | {avg_iterations} | {stdev_iterations}", end=end) + stdev_iterations = "{0:>18}".format(f"{stdev_iterations}") + print(f"{mode} | {us_per_iteration} | {avg_iterations} | {stdev_iterations}", end=end) def benchmark_object_creation(iterations: int, class_generator: Callable[[], Any]) -> float: @@ -180,8 +176,9 @@ def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> List[int]: return us_iteration_results -def calc_stdev(iterations: List[int]) -> float: - return 0 if len(iterations) < 2 else int(stdev(iterations) * 100) / 100 +def calc_stdev_percent(iterations: List[int], avg: int) -> float: + deviation = 0 if len(iterations) < 2 else int(stdev(iterations) * 100) / 100 + return int((deviation / avg * 100) * 100) / 100 @click.command() @@ -194,14 +191,12 @@ def run(data: Data, mode: Mode, runs: int, ms: int) -> None: for current_data, parameter in benchmark_parameter.items(): results[current_data] = {} if data == Data.all or current_data == data: - print(f"\nRun {mode.name} benchmarks with the class: {parameter.data_class.__name__}") + print(f"\nruns: {runs}, ms/run: {ms}, benchmarks: {mode.name}, data: {parameter.data_class.__name__}") print_row( - runs="runs", - ms_per_run="ms/run", - us_per_iteration="µs/iteration", mode="mode", + us_per_iteration="µs/iteration", avg_iterations="avg iterations", - stdev_iterations="stdev iterations", + stdev_iterations="stdev iterations %", ) for current_mode, current_mode_parameter in parameter.mode_parameter.items(): results[current_data][current_mode] = [] @@ -213,13 +208,13 @@ def run(data: Data, mode: Mode, runs: int, ms: int) -> None: def print_results(print_run: int, final: bool) -> None: total_iterations: int = sum(len(x) for x in all_results) total_elapsed_us: int = sum(sum(x) for x in all_results) + avg_iterations: int = int(total_iterations / print_run) + stdev_iterations: float = calc_stdev_percent([len(x) for x in all_results], avg_iterations) print_row( - runs=print_run if final else "current", - ms_per_run=int(mean(sum(x) for x in all_results) / 1000), - us_per_iteration=int(total_elapsed_us / total_iterations), mode=current_mode.name, - avg_iterations=int(total_iterations / print_run), - stdev_iterations=calc_stdev([len(x) for x in all_results]), + us_per_iteration=int(total_elapsed_us / total_iterations), + avg_iterations=avg_iterations, + stdev_iterations=stdev_iterations, end="\n" if final else "\r", ) From 9b958d54f1dbcf9337d527c974d7418e8295d53f Mon Sep 17 00:00:00 2001 From: xdustinface Date: Thu, 24 Feb 2022 02:58:37 +0100 Subject: [PATCH 09/12] benchmarks: Drop unused code --- benchmarks/streamable.py | 27 +-------------------------- 1 file changed, 1 insertion(+), 26 deletions(-) diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py index ea074e6eb759..e4437518cc50 100644 --- a/benchmarks/streamable.py +++ b/benchmarks/streamable.py @@ -1,6 +1,6 @@ from dataclasses import dataclass from enum import Enum -from statistics import mean, stdev +from statistics import stdev from time import monotonic from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union @@ -80,31 +80,6 @@ def print_row( print(f"{mode} | {us_per_iteration} | {avg_iterations} | {stdev_iterations}", end=end) -def benchmark_object_creation(iterations: int, class_generator: Callable[[], Any]) -> float: - start = monotonic() - obj = class_generator() - cls = type(obj) - for i in range(iterations): - cls(**obj.__dict__) - return monotonic() - start - - -def benchmark_conversion( - iterations: int, - class_generator: Callable[[], Any], - conversion_cb: Callable[[Any], Any], - preparation_cb: Optional[Callable[[Any], Any]] = None, -) -> float: - obj = class_generator() - start = monotonic() - prepared_data = obj - if preparation_cb is not None: - prepared_data = preparation_cb(obj) - for i in range(iterations): - conversion_cb(prepared_data) - return monotonic() - start - - # The strings in this Enum are by purpose. See benchmark.utils.EnumType. class Data(str, Enum): all = "all" From a0520e88ae63590233d6bd75dce167a26392dda3 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Thu, 24 Feb 2022 03:02:06 +0100 Subject: [PATCH 10/12] benchmarks: Use `process_time` as clock --- benchmarks/streamable.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py index e4437518cc50..4db4b3b91e00 100644 --- a/benchmarks/streamable.py +++ b/benchmarks/streamable.py @@ -1,7 +1,7 @@ from dataclasses import dataclass from enum import Enum from statistics import stdev -from time import monotonic +from time import process_time as clock from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union import click @@ -142,11 +142,11 @@ class BenchmarkParameter: def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> List[int]: us_iteration_results: List[int] = [] - start = monotonic() - while int((monotonic() - start) * 1000) < ms_to_run: - start_iteration = monotonic() + start = clock() + while int((clock() - start) * 1000) < ms_to_run: + start_iteration = clock() cb() - stop_iteration = monotonic() + stop_iteration = clock() us_iteration_results.append(int((stop_iteration - start_iteration) * 1000 * 1000)) return us_iteration_results From 70669cb55b04726e5c55e961e26e17042469c26c Mon Sep 17 00:00:00 2001 From: xdustinface Date: Thu, 24 Feb 2022 03:20:24 +0100 Subject: [PATCH 11/12] benchmarks: Add stdev `us/iterations %` + more precission --- benchmarks/streamable.py | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py index 4db4b3b91e00..8cb711ac6b96 100644 --- a/benchmarks/streamable.py +++ b/benchmarks/streamable.py @@ -68,16 +68,18 @@ def get_random_benchmark_object() -> BenchmarkClass: def print_row( *, mode: str, - us_per_iteration: Union[str, int], + us_per_iteration: Union[str, float], + stdev_us_per_iteration: Union[str, float], avg_iterations: Union[str, int], stdev_iterations: Union[str, float], end: str = "\n", ) -> None: mode = "{0:<10}".format(f"{mode}") us_per_iteration = "{0:<12}".format(f"{us_per_iteration}") - avg_iterations = "{0:>14}".format(f"{avg_iterations}") - stdev_iterations = "{0:>18}".format(f"{stdev_iterations}") - print(f"{mode} | {us_per_iteration} | {avg_iterations} | {stdev_iterations}", end=end) + stdev_us_per_iteration = "{0:>20}".format(f"{stdev_us_per_iteration}") + avg_iterations = "{0:>18}".format(f"{avg_iterations}") + stdev_iterations = "{0:>22}".format(f"{stdev_iterations}") + print(f"{mode} | {us_per_iteration} | {stdev_us_per_iteration} | {avg_iterations} | {stdev_iterations}", end=end) # The strings in this Enum are by purpose. See benchmark.utils.EnumType. @@ -151,7 +153,7 @@ def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> List[int]: return us_iteration_results -def calc_stdev_percent(iterations: List[int], avg: int) -> float: +def calc_stdev_percent(iterations: List[int], avg: float) -> float: deviation = 0 if len(iterations) < 2 else int(stdev(iterations) * 100) / 100 return int((deviation / avg * 100) * 100) / 100 @@ -170,8 +172,9 @@ def run(data: Data, mode: Mode, runs: int, ms: int) -> None: print_row( mode="mode", us_per_iteration="µs/iteration", - avg_iterations="avg iterations", - stdev_iterations="stdev iterations %", + stdev_us_per_iteration="stdev µs/iteration %", + avg_iterations="avg iterations/run", + stdev_iterations="stdev iterations/run %", ) for current_mode, current_mode_parameter in parameter.mode_parameter.items(): results[current_data][current_mode] = [] @@ -181,14 +184,19 @@ def run(data: Data, mode: Mode, runs: int, ms: int) -> None: obj = parameter.object_creation_cb() def print_results(print_run: int, final: bool) -> None: - total_iterations: int = sum(len(x) for x in all_results) - total_elapsed_us: int = sum(sum(x) for x in all_results) - avg_iterations: int = int(total_iterations / print_run) + all_runtimes: List[int] = [x for inner in all_results for x in inner] + total_iterations: int = len(all_runtimes) + total_elapsed_us: int = sum(all_runtimes) + avg_iterations: float = total_iterations / print_run stdev_iterations: float = calc_stdev_percent([len(x) for x in all_results], avg_iterations) + stdev_us_per_iteration: float = calc_stdev_percent( + all_runtimes, total_elapsed_us / total_iterations + ) print_row( mode=current_mode.name, - us_per_iteration=int(total_elapsed_us / total_iterations), - avg_iterations=avg_iterations, + us_per_iteration=int(total_elapsed_us / total_iterations * 100) / 100, + stdev_us_per_iteration=stdev_us_per_iteration, + avg_iterations=int(avg_iterations), stdev_iterations=stdev_iterations, end="\n" if final else "\r", ) From 0ad521e072aaf15d1cffd5e2913cea83ed632f1b Mon Sep 17 00:00:00 2001 From: xdustinface Date: Thu, 24 Feb 2022 04:09:03 +0100 Subject: [PATCH 12/12] benchmarks: Add `--live/--no-live` option to enable live results --- benchmarks/streamable.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/benchmarks/streamable.py b/benchmarks/streamable.py index 8cb711ac6b96..135420202972 100644 --- a/benchmarks/streamable.py +++ b/benchmarks/streamable.py @@ -163,7 +163,8 @@ def calc_stdev_percent(iterations: List[int], avg: float) -> float: @click.option("-m", "--mode", default=Mode.all, type=EnumType(Mode)) @click.option("-r", "--runs", default=100, help="Number of benchmark runs to average results") @click.option("-t", "--ms", default=50, help="Milliseconds per run") -def run(data: Data, mode: Mode, runs: int, ms: int) -> None: +@click.option("--live/--no-live", default=False, help="Print live results (slower)") +def run(data: Data, mode: Mode, runs: int, ms: int, live: bool) -> None: results: Dict[Data, Dict[Mode, List[List[int]]]] = {} for current_data, parameter in benchmark_parameter.items(): results[current_data] = {} @@ -217,7 +218,8 @@ def print_results(print_run: int, final: bool) -> None: prepared_obj = current_mode_parameter.preparation_cb(obj) us_iteration_results = run_for_ms(lambda: conversion_cb(prepared_obj), ms) all_results.append(us_iteration_results) - print_results(current_run, False) + if live: + print_results(current_run, False) assert current_run == runs print_results(runs, True)