Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

benchmarks: Implement benchmarks for streamable #10388

Merged
merged 12 commits into from
Feb 24, 2022
Merged
5 changes: 1 addition & 4 deletions benchmarks/block_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import os
import sys

from benchmarks.utils import clvm_generator
from chia.util.db_wrapper import DBWrapper
from chia.util.ints import uint128, uint64, uint32, uint8
from utils import (
Expand Down Expand Up @@ -36,10 +37,6 @@
random.seed(123456789)


with open("clvm_generator.bin", "rb") as f:
clvm_generator = f.read()


async def run_add_block_benchmark(version: int):

verbose: bool = "--verbose" in sys.argv
Expand Down
228 changes: 228 additions & 0 deletions benchmarks/streamable.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,228 @@
from dataclasses import dataclass
from enum import Enum
from statistics import stdev
from time import process_time as clock
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union

import click
from utils import EnumType, rand_bytes, rand_full_block, rand_hash

from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.full_block import FullBlock
from chia.util.ints import uint8, uint64
from chia.util.streamable import Streamable, streamable


@dataclass(frozen=True)
@streamable
class BenchmarkInner(Streamable):
a: str


@dataclass(frozen=True)
@streamable
class BenchmarkMiddle(Streamable):
a: uint64
b: List[bytes32]
c: Tuple[str, bool, uint8, List[bytes]]
d: Tuple[BenchmarkInner, BenchmarkInner]
e: BenchmarkInner


@dataclass(frozen=True)
@streamable
class BenchmarkClass(Streamable):
a: Optional[BenchmarkMiddle]
b: Optional[BenchmarkMiddle]
c: BenchmarkMiddle
d: List[BenchmarkMiddle]
e: Tuple[BenchmarkMiddle, BenchmarkMiddle, BenchmarkMiddle]


def get_random_inner() -> BenchmarkInner:
return BenchmarkInner(rand_bytes(20).hex())


def get_random_middle() -> BenchmarkMiddle:
a: uint64 = uint64(10)
b: List[bytes32] = [rand_hash() for _ in range(a)]
c: Tuple[str, bool, uint8, List[bytes]] = ("benchmark", False, uint8(1), [rand_bytes(a) for _ in range(a)])
d: Tuple[BenchmarkInner, BenchmarkInner] = (get_random_inner(), get_random_inner())
e: BenchmarkInner = get_random_inner()
return BenchmarkMiddle(a, b, c, d, e)


def get_random_benchmark_object() -> BenchmarkClass:
a: Optional[BenchmarkMiddle] = None
b: Optional[BenchmarkMiddle] = get_random_middle()
c: BenchmarkMiddle = get_random_middle()
d: List[BenchmarkMiddle] = [get_random_middle() for _ in range(5)]
e: Tuple[BenchmarkMiddle, BenchmarkMiddle, BenchmarkMiddle] = (
get_random_middle(),
get_random_middle(),
get_random_middle(),
)
return BenchmarkClass(a, b, c, d, e)


def print_row(
*,
mode: str,
us_per_iteration: Union[str, float],
stdev_us_per_iteration: Union[str, float],
avg_iterations: Union[str, int],
stdev_iterations: Union[str, float],
end: str = "\n",
) -> None:
mode = "{0:<10}".format(f"{mode}")
us_per_iteration = "{0:<12}".format(f"{us_per_iteration}")
stdev_us_per_iteration = "{0:>20}".format(f"{stdev_us_per_iteration}")
avg_iterations = "{0:>18}".format(f"{avg_iterations}")
stdev_iterations = "{0:>22}".format(f"{stdev_iterations}")
print(f"{mode} | {us_per_iteration} | {stdev_us_per_iteration} | {avg_iterations} | {stdev_iterations}", end=end)


# The strings in this Enum are by purpose. See benchmark.utils.EnumType.
class Data(str, Enum):
all = "all"
benchmark = "benchmark"
full_block = "full_block"


# The strings in this Enum are by purpose. See benchmark.utils.EnumType.
class Mode(str, Enum):
all = "all"
creation = "creation"
to_bytes = "to_bytes"
from_bytes = "from_bytes"
to_json = "to_json"
from_json = "from_json"


def to_bytes(obj: Any) -> bytes:
return bytes(obj)


@dataclass
class ModeParameter:
conversion_cb: Callable[[Any], Any]
preparation_cb: Optional[Callable[[Any], Any]] = None


@dataclass
class BenchmarkParameter:
data_class: Type[Any]
object_creation_cb: Callable[[], Any]
mode_parameter: Dict[Mode, Optional[ModeParameter]]


benchmark_parameter: Dict[Data, BenchmarkParameter] = {
Data.benchmark: BenchmarkParameter(
BenchmarkClass,
get_random_benchmark_object,
{
Mode.creation: None,
Mode.to_bytes: ModeParameter(to_bytes),
Mode.from_bytes: ModeParameter(BenchmarkClass.from_bytes, to_bytes),
Mode.to_json: ModeParameter(BenchmarkClass.to_json_dict),
Mode.from_json: ModeParameter(BenchmarkClass.from_json_dict, BenchmarkClass.to_json_dict),
},
),
Data.full_block: BenchmarkParameter(
FullBlock,
rand_full_block,
{
Mode.creation: None,
Mode.to_bytes: ModeParameter(to_bytes),
Mode.from_bytes: ModeParameter(FullBlock.from_bytes, to_bytes),
Mode.to_json: ModeParameter(FullBlock.to_json_dict),
Mode.from_json: ModeParameter(FullBlock.from_json_dict, FullBlock.to_json_dict),
},
),
}


def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> List[int]:
us_iteration_results: List[int] = []
start = clock()
while int((clock() - start) * 1000) < ms_to_run:
start_iteration = clock()
cb()
stop_iteration = clock()
us_iteration_results.append(int((stop_iteration - start_iteration) * 1000 * 1000))
return us_iteration_results


def calc_stdev_percent(iterations: List[int], avg: float) -> float:
deviation = 0 if len(iterations) < 2 else int(stdev(iterations) * 100) / 100
return int((deviation / avg * 100) * 100) / 100


@click.command()
@click.option("-d", "--data", default=Data.all, type=EnumType(Data))
@click.option("-m", "--mode", default=Mode.all, type=EnumType(Mode))
@click.option("-r", "--runs", default=100, help="Number of benchmark runs to average results")
@click.option("-t", "--ms", default=50, help="Milliseconds per run")
@click.option("--live/--no-live", default=False, help="Print live results (slower)")
def run(data: Data, mode: Mode, runs: int, ms: int, live: bool) -> None:
results: Dict[Data, Dict[Mode, List[List[int]]]] = {}
for current_data, parameter in benchmark_parameter.items():
results[current_data] = {}
if data == Data.all or current_data == data:
print(f"\nruns: {runs}, ms/run: {ms}, benchmarks: {mode.name}, data: {parameter.data_class.__name__}")
print_row(
mode="mode",
us_per_iteration="µs/iteration",
stdev_us_per_iteration="stdev µs/iteration %",
avg_iterations="avg iterations/run",
stdev_iterations="stdev iterations/run %",
)
for current_mode, current_mode_parameter in parameter.mode_parameter.items():
results[current_data][current_mode] = []
if mode == Mode.all or current_mode == mode:
us_iteration_results: List[int]
all_results: List[List[int]] = results[current_data][current_mode]
obj = parameter.object_creation_cb()

def print_results(print_run: int, final: bool) -> None:
all_runtimes: List[int] = [x for inner in all_results for x in inner]
total_iterations: int = len(all_runtimes)
total_elapsed_us: int = sum(all_runtimes)
avg_iterations: float = total_iterations / print_run
stdev_iterations: float = calc_stdev_percent([len(x) for x in all_results], avg_iterations)
stdev_us_per_iteration: float = calc_stdev_percent(
all_runtimes, total_elapsed_us / total_iterations
)
print_row(
mode=current_mode.name,
us_per_iteration=int(total_elapsed_us / total_iterations * 100) / 100,
stdev_us_per_iteration=stdev_us_per_iteration,
avg_iterations=int(avg_iterations),
stdev_iterations=stdev_iterations,
end="\n" if final else "\r",
)

current_run: int = 0
while current_run < runs:
current_run += 1

if current_mode == Mode.creation:
cls = type(obj)
us_iteration_results = run_for_ms(lambda: cls(**obj.__dict__), ms)
else:
assert current_mode_parameter is not None
conversion_cb = current_mode_parameter.conversion_cb
assert conversion_cb is not None
prepared_obj = parameter.object_creation_cb()
if current_mode_parameter.preparation_cb is not None:
prepared_obj = current_mode_parameter.preparation_cb(obj)
us_iteration_results = run_for_ms(lambda: conversion_cb(prepared_obj), ms)
all_results.append(us_iteration_results)
if live:
print_results(current_run, False)
assert current_run == runs
print_results(runs, True)


if __name__ == "__main__":
run() # pylint: disable = no-value-for-parameter
109 changes: 109 additions & 0 deletions benchmarks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,19 @@
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.vdf import VDFInfo, VDFProof
from chia.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.program import SerializedProgram
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.reward_chain_block import RewardChainBlock
from chia.types.full_block import FullBlock
from chia.util.ints import uint128
from chia.util.db_wrapper import DBWrapper
from typing import Tuple
from pathlib import Path
from datetime import datetime
import aiosqlite
import click
import os
import sys
import random
Expand All @@ -18,6 +26,20 @@
# farmer puzzle hash
ph = bytes32(b"a" * 32)

with open(Path(os.path.realpath(__file__)).parent / "clvm_generator.bin", "rb") as f:
clvm_generator = f.read()


# Workaround to allow `Enum` with click.Choice: https:/pallets/click/issues/605#issuecomment-901099036
class EnumType(click.Choice):
def __init__(self, enum, case_sensitive=False):
self.__enum = enum
super().__init__(choices=[item.value for item in enum], case_sensitive=case_sensitive)

def convert(self, value, param, ctx):
converted_str = super().convert(value, param, ctx)
return self.__enum(converted_str)


def rewards(height: uint32) -> Tuple[Coin, Coin]:
farmer_coin = create_farmer_coin(height, ph, uint64(250000000), DEFAULT_CONSTANTS.GENESIS_CHALLENGE)
Expand Down Expand Up @@ -66,6 +88,93 @@ def rand_vdf_proof() -> VDFProof:
)


def rand_full_block() -> FullBlock:
proof_of_space = ProofOfSpace(
rand_hash(),
rand_g1(),
None,
rand_g1(),
uint8(0),
rand_bytes(8 * 32),
)

reward_chain_block = RewardChainBlock(
uint128(1),
uint32(2),
uint128(3),
uint8(4),
rand_hash(),
proof_of_space,
None,
rand_g2(),
rand_vdf(),
None,
rand_g2(),
rand_vdf(),
rand_vdf(),
True,
)

pool_target = PoolTarget(
rand_hash(),
uint32(0),
)

foliage_block_data = FoliageBlockData(
rand_hash(),
pool_target,
rand_g2(),
rand_hash(),
rand_hash(),
)

foliage = Foliage(
rand_hash(),
rand_hash(),
foliage_block_data,
rand_g2(),
rand_hash(),
rand_g2(),
)

foliage_transaction_block = FoliageTransactionBlock(
rand_hash(),
uint64(0),
rand_hash(),
rand_hash(),
rand_hash(),
rand_hash(),
)

farmer_coin, pool_coin = rewards(uint32(0))

transactions_info = TransactionsInfo(
rand_hash(),
rand_hash(),
rand_g2(),
uint64(0),
uint64(1),
[farmer_coin, pool_coin],
)

full_block = FullBlock(
[],
reward_chain_block,
rand_vdf_proof(),
rand_vdf_proof(),
rand_vdf_proof(),
rand_vdf_proof(),
rand_vdf_proof(),
foliage,
foliage_transaction_block,
transactions_info,
SerializedProgram.from_bytes(clvm_generator),
[],
)

return full_block


async def setup_db(name: str, db_version: int) -> DBWrapper:
db_filename = Path(name)
try:
Expand Down